1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/arm/lithium-codegen-arm.h"
8 #include "src/arm/lithium-gap-resolver-arm.h"
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/code-stubs.h"
12 #include "src/cpu-profiler.h"
13 #include "src/hydrogen-osr.h"
14 #include "src/ic/ic.h"
15 #include "src/ic/stub-cache.h"
21 class SafepointGenerator FINAL : public CallWrapper {
23 SafepointGenerator(LCodeGen* codegen,
24 LPointerMap* pointers,
25 Safepoint::DeoptMode mode)
29 virtual ~SafepointGenerator() {}
31 void BeforeCall(int call_size) const OVERRIDE {}
33 void AfterCall() const OVERRIDE {
34 codegen_->RecordSafepoint(pointers_, deopt_mode_);
39 LPointerMap* pointers_;
40 Safepoint::DeoptMode deopt_mode_;
46 bool LCodeGen::GenerateCode() {
47 LPhase phase("Z_Code generation", chunk());
51 // Open a frame scope to indicate that there is a frame on the stack. The
52 // NONE indicates that the scope shouldn't actually generate code to set up
53 // the frame (that is done in GeneratePrologue).
54 FrameScope frame_scope(masm_, StackFrame::NONE);
56 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
57 GenerateJumpTable() && GenerateSafepointTable();
61 void LCodeGen::FinishCode(Handle<Code> code) {
63 code->set_stack_slots(GetStackSlotCount());
64 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
65 PopulateDeoptimizationData(code);
69 void LCodeGen::SaveCallerDoubles() {
70 DCHECK(info()->saves_caller_doubles());
71 DCHECK(NeedsEagerFrame());
72 Comment(";;; Save clobbered callee double registers");
74 BitVector* doubles = chunk()->allocated_double_registers();
75 BitVector::Iterator save_iterator(doubles);
76 while (!save_iterator.Done()) {
77 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
78 MemOperand(sp, count * kDoubleSize));
79 save_iterator.Advance();
85 void LCodeGen::RestoreCallerDoubles() {
86 DCHECK(info()->saves_caller_doubles());
87 DCHECK(NeedsEagerFrame());
88 Comment(";;; Restore clobbered callee double registers");
89 BitVector* doubles = chunk()->allocated_double_registers();
90 BitVector::Iterator save_iterator(doubles);
92 while (!save_iterator.Done()) {
93 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
94 MemOperand(sp, count * kDoubleSize));
95 save_iterator.Advance();
101 bool LCodeGen::GeneratePrologue() {
102 DCHECK(is_generating());
104 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
108 if (strlen(FLAG_stop_at) > 0 &&
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
114 // r1: Callee's JS function.
115 // cp: Callee's context.
116 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
117 // fp: Caller's frame pointer.
120 // Sloppy mode functions and builtins need to replace the receiver with the
121 // global proxy when called as functions (without an explicit receiver
123 if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
124 !info_->is_native()) {
126 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
127 __ ldr(r2, MemOperand(sp, receiver_offset));
128 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
131 __ ldr(r2, GlobalObjectOperand());
132 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
134 __ str(r2, MemOperand(sp, receiver_offset));
140 info()->set_prologue_offset(masm_->pc_offset());
141 if (NeedsEagerFrame()) {
142 if (info()->IsStub()) {
145 __ Prologue(info()->IsCodePreAgingActive());
147 frame_is_built_ = true;
148 info_->AddNoFrameRange(0, masm_->pc_offset());
151 // Reserve space for the stack slots needed by the code.
152 int slots = GetStackSlotCount();
154 if (FLAG_debug_code) {
155 __ sub(sp, sp, Operand(slots * kPointerSize));
158 __ add(r0, sp, Operand(slots * kPointerSize));
159 __ mov(r1, Operand(kSlotsZapValue));
162 __ sub(r0, r0, Operand(kPointerSize));
163 __ str(r1, MemOperand(r0, 2 * kPointerSize));
169 __ sub(sp, sp, Operand(slots * kPointerSize));
173 if (info()->saves_caller_doubles()) {
177 // Possibly allocate a local context.
178 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
179 if (heap_slots > 0) {
180 Comment(";;; Allocate local context");
181 bool need_write_barrier = true;
182 // Argument to NewContext is the function, which is in r1.
183 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
184 FastNewContextStub stub(isolate(), heap_slots);
186 // Result of FastNewContextStub is always in new space.
187 need_write_barrier = false;
190 __ CallRuntime(Runtime::kNewFunctionContext, 1);
192 RecordSafepoint(Safepoint::kNoLazyDeopt);
193 // Context is returned in both r0 and cp. It replaces the context
194 // passed to us. It's saved in the stack and kept live in cp.
196 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
197 // Copy any necessary parameters into the context.
198 int num_parameters = scope()->num_parameters();
199 for (int i = 0; i < num_parameters; i++) {
200 Variable* var = scope()->parameter(i);
201 if (var->IsContextSlot()) {
202 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
203 (num_parameters - 1 - i) * kPointerSize;
204 // Load parameter from stack.
205 __ ldr(r0, MemOperand(fp, parameter_offset));
206 // Store it in the context.
207 MemOperand target = ContextOperand(cp, var->index());
209 // Update the write barrier. This clobbers r3 and r0.
210 if (need_write_barrier) {
211 __ RecordWriteContextSlot(
216 GetLinkRegisterState(),
218 } else if (FLAG_debug_code) {
220 __ JumpIfInNewSpace(cp, r0, &done);
221 __ Abort(kExpectedNewSpaceObject);
226 Comment(";;; End allocate local context");
230 if (FLAG_trace && info()->IsOptimizing()) {
231 // We have not executed any compiled code yet, so cp still holds the
233 __ CallRuntime(Runtime::kTraceEnter, 0);
235 return !is_aborted();
239 void LCodeGen::GenerateOsrPrologue() {
240 // Generate the OSR entry prologue at the first unknown OSR value, or if there
241 // are none, at the OSR entrypoint instruction.
242 if (osr_pc_offset_ >= 0) return;
244 osr_pc_offset_ = masm()->pc_offset();
246 // Adjust the frame size, subsuming the unoptimized frame into the
248 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
250 __ sub(sp, sp, Operand(slots * kPointerSize));
254 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
255 if (instr->IsCall()) {
256 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
258 if (!instr->IsLazyBailout() && !instr->IsGap()) {
259 safepoints_.BumpLastLazySafepointIndex();
264 bool LCodeGen::GenerateDeferredCode() {
265 DCHECK(is_generating());
266 if (deferred_.length() > 0) {
267 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
268 LDeferredCode* code = deferred_[i];
271 instructions_->at(code->instruction_index())->hydrogen_value();
272 RecordAndWritePosition(
273 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
275 Comment(";;; <@%d,#%d> "
276 "-------------------- Deferred %s --------------------",
277 code->instruction_index(),
278 code->instr()->hydrogen_value()->id(),
279 code->instr()->Mnemonic());
280 __ bind(code->entry());
281 if (NeedsDeferredFrame()) {
282 Comment(";;; Build frame");
283 DCHECK(!frame_is_built_);
284 DCHECK(info()->IsStub());
285 frame_is_built_ = true;
287 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
289 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
290 Comment(";;; Deferred code");
293 if (NeedsDeferredFrame()) {
294 Comment(";;; Destroy frame");
295 DCHECK(frame_is_built_);
298 frame_is_built_ = false;
300 __ jmp(code->exit());
304 // Force constant pool emission at the end of the deferred code to make
305 // sure that no constant pools are emitted after.
306 masm()->CheckConstPool(true, false);
308 return !is_aborted();
312 bool LCodeGen::GenerateJumpTable() {
313 // Check that the jump table is accessible from everywhere in the function
314 // code, i.e. that offsets to the table can be encoded in the 24bit signed
315 // immediate of a branch instruction.
316 // To simplify we consider the code size from the first instruction to the
317 // end of the jump table. We also don't consider the pc load delta.
318 // Each entry in the jump table generates one instruction and inlines one
319 // 32bit data after it.
320 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
321 jump_table_.length() * 7)) {
322 Abort(kGeneratedCodeIsTooLarge);
325 if (jump_table_.length() > 0) {
326 Label needs_frame, call_deopt_entry;
328 Comment(";;; -------------------- Jump table --------------------");
329 Address base = jump_table_[0].address;
331 Register entry_offset = scratch0();
333 int length = jump_table_.length();
334 for (int i = 0; i < length; i++) {
335 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
336 __ bind(&table_entry->label);
338 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
339 Address entry = table_entry->address;
340 DeoptComment(table_entry->deopt_info);
342 // Second-level deopt table entries are contiguous and small, so instead
343 // of loading the full, absolute address of each one, load an immediate
344 // offset which will be added to the base address later.
345 __ mov(entry_offset, Operand(entry - base));
347 if (table_entry->needs_frame) {
348 DCHECK(!info()->saves_caller_doubles());
349 Comment(";;; call deopt with frame");
353 __ bl(&call_deopt_entry);
355 info()->LogDeoptCallPosition(masm()->pc_offset(),
356 table_entry->deopt_info.inlining_id);
357 masm()->CheckConstPool(false, false);
360 if (needs_frame.is_linked()) {
361 __ bind(&needs_frame);
362 // This variant of deopt can only be used with stubs. Since we don't
363 // have a function pointer to install in the stack frame that we're
364 // building, install a special marker there instead.
365 DCHECK(info()->IsStub());
366 __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
368 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
371 Comment(";;; call deopt");
372 __ bind(&call_deopt_entry);
374 if (info()->saves_caller_doubles()) {
375 DCHECK(info()->IsStub());
376 RestoreCallerDoubles();
379 // Add the base address to the offset previously loaded in entry_offset.
380 __ add(entry_offset, entry_offset,
381 Operand(ExternalReference::ForDeoptEntry(base)));
385 // Force constant pool emission at the end of the deopt jump table to make
386 // sure that no constant pools are emitted after.
387 masm()->CheckConstPool(true, false);
389 // The deoptimization jump table is the last part of the instruction
390 // sequence. Mark the generated code as done unless we bailed out.
391 if (!is_aborted()) status_ = DONE;
392 return !is_aborted();
396 bool LCodeGen::GenerateSafepointTable() {
398 safepoints_.Emit(masm(), GetStackSlotCount());
399 return !is_aborted();
403 Register LCodeGen::ToRegister(int index) const {
404 return Register::FromAllocationIndex(index);
408 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
409 return DwVfpRegister::FromAllocationIndex(index);
413 Register LCodeGen::ToRegister(LOperand* op) const {
414 DCHECK(op->IsRegister());
415 return ToRegister(op->index());
419 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
420 if (op->IsRegister()) {
421 return ToRegister(op->index());
422 } else if (op->IsConstantOperand()) {
423 LConstantOperand* const_op = LConstantOperand::cast(op);
424 HConstant* constant = chunk_->LookupConstant(const_op);
425 Handle<Object> literal = constant->handle(isolate());
426 Representation r = chunk_->LookupLiteralRepresentation(const_op);
427 if (r.IsInteger32()) {
428 DCHECK(literal->IsNumber());
429 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
430 } else if (r.IsDouble()) {
431 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
433 DCHECK(r.IsSmiOrTagged());
434 __ Move(scratch, literal);
437 } else if (op->IsStackSlot()) {
438 __ ldr(scratch, ToMemOperand(op));
446 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
447 DCHECK(op->IsDoubleRegister());
448 return ToDoubleRegister(op->index());
452 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
453 SwVfpRegister flt_scratch,
454 DwVfpRegister dbl_scratch) {
455 if (op->IsDoubleRegister()) {
456 return ToDoubleRegister(op->index());
457 } else if (op->IsConstantOperand()) {
458 LConstantOperand* const_op = LConstantOperand::cast(op);
459 HConstant* constant = chunk_->LookupConstant(const_op);
460 Handle<Object> literal = constant->handle(isolate());
461 Representation r = chunk_->LookupLiteralRepresentation(const_op);
462 if (r.IsInteger32()) {
463 DCHECK(literal->IsNumber());
464 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
465 __ vmov(flt_scratch, ip);
466 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
468 } else if (r.IsDouble()) {
469 Abort(kUnsupportedDoubleImmediate);
470 } else if (r.IsTagged()) {
471 Abort(kUnsupportedTaggedImmediate);
473 } else if (op->IsStackSlot()) {
474 // TODO(regis): Why is vldr not taking a MemOperand?
475 // __ vldr(dbl_scratch, ToMemOperand(op));
476 MemOperand mem_op = ToMemOperand(op);
477 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
485 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
486 HConstant* constant = chunk_->LookupConstant(op);
487 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
488 return constant->handle(isolate());
492 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
493 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
497 bool LCodeGen::IsSmi(LConstantOperand* op) const {
498 return chunk_->LookupLiteralRepresentation(op).IsSmi();
502 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
503 return ToRepresentation(op, Representation::Integer32());
507 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
508 const Representation& r) const {
509 HConstant* constant = chunk_->LookupConstant(op);
510 int32_t value = constant->Integer32Value();
511 if (r.IsInteger32()) return value;
512 DCHECK(r.IsSmiOrTagged());
513 return reinterpret_cast<int32_t>(Smi::FromInt(value));
517 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
518 HConstant* constant = chunk_->LookupConstant(op);
519 return Smi::FromInt(constant->Integer32Value());
523 double LCodeGen::ToDouble(LConstantOperand* op) const {
524 HConstant* constant = chunk_->LookupConstant(op);
525 DCHECK(constant->HasDoubleValue());
526 return constant->DoubleValue();
530 Operand LCodeGen::ToOperand(LOperand* op) {
531 if (op->IsConstantOperand()) {
532 LConstantOperand* const_op = LConstantOperand::cast(op);
533 HConstant* constant = chunk()->LookupConstant(const_op);
534 Representation r = chunk_->LookupLiteralRepresentation(const_op);
536 DCHECK(constant->HasSmiValue());
537 return Operand(Smi::FromInt(constant->Integer32Value()));
538 } else if (r.IsInteger32()) {
539 DCHECK(constant->HasInteger32Value());
540 return Operand(constant->Integer32Value());
541 } else if (r.IsDouble()) {
542 Abort(kToOperandUnsupportedDoubleImmediate);
544 DCHECK(r.IsTagged());
545 return Operand(constant->handle(isolate()));
546 } else if (op->IsRegister()) {
547 return Operand(ToRegister(op));
548 } else if (op->IsDoubleRegister()) {
549 Abort(kToOperandIsDoubleRegisterUnimplemented);
550 return Operand::Zero();
552 // Stack slots not implemented, use ToMemOperand instead.
554 return Operand::Zero();
558 static int ArgumentsOffsetWithoutFrame(int index) {
560 return -(index + 1) * kPointerSize;
564 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
565 DCHECK(!op->IsRegister());
566 DCHECK(!op->IsDoubleRegister());
567 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
568 if (NeedsEagerFrame()) {
569 return MemOperand(fp, StackSlotOffset(op->index()));
571 // Retrieve parameter without eager stack-frame relative to the
573 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
578 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
579 DCHECK(op->IsDoubleStackSlot());
580 if (NeedsEagerFrame()) {
581 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
583 // Retrieve parameter without eager stack-frame relative to the
586 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
591 void LCodeGen::WriteTranslation(LEnvironment* environment,
592 Translation* translation) {
593 if (environment == NULL) return;
595 // The translation includes one command per value in the environment.
596 int translation_size = environment->translation_size();
597 // The output frame height does not include the parameters.
598 int height = translation_size - environment->parameter_count();
600 WriteTranslation(environment->outer(), translation);
601 bool has_closure_id = !info()->closure().is_null() &&
602 !info()->closure().is_identical_to(environment->closure());
603 int closure_id = has_closure_id
604 ? DefineDeoptimizationLiteral(environment->closure())
605 : Translation::kSelfLiteralId;
607 switch (environment->frame_type()) {
609 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
612 translation->BeginConstructStubFrame(closure_id, translation_size);
615 DCHECK(translation_size == 1);
617 translation->BeginGetterStubFrame(closure_id);
620 DCHECK(translation_size == 2);
622 translation->BeginSetterStubFrame(closure_id);
625 translation->BeginCompiledStubFrame();
627 case ARGUMENTS_ADAPTOR:
628 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
632 int object_index = 0;
633 int dematerialized_index = 0;
634 for (int i = 0; i < translation_size; ++i) {
635 LOperand* value = environment->values()->at(i);
636 AddToTranslation(environment,
639 environment->HasTaggedValueAt(i),
640 environment->HasUint32ValueAt(i),
642 &dematerialized_index);
647 void LCodeGen::AddToTranslation(LEnvironment* environment,
648 Translation* translation,
652 int* object_index_pointer,
653 int* dematerialized_index_pointer) {
654 if (op == LEnvironment::materialization_marker()) {
655 int object_index = (*object_index_pointer)++;
656 if (environment->ObjectIsDuplicateAt(object_index)) {
657 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
658 translation->DuplicateObject(dupe_of);
661 int object_length = environment->ObjectLengthAt(object_index);
662 if (environment->ObjectIsArgumentsAt(object_index)) {
663 translation->BeginArgumentsObject(object_length);
665 translation->BeginCapturedObject(object_length);
667 int dematerialized_index = *dematerialized_index_pointer;
668 int env_offset = environment->translation_size() + dematerialized_index;
669 *dematerialized_index_pointer += object_length;
670 for (int i = 0; i < object_length; ++i) {
671 LOperand* value = environment->values()->at(env_offset + i);
672 AddToTranslation(environment,
675 environment->HasTaggedValueAt(env_offset + i),
676 environment->HasUint32ValueAt(env_offset + i),
677 object_index_pointer,
678 dematerialized_index_pointer);
683 if (op->IsStackSlot()) {
685 translation->StoreStackSlot(op->index());
686 } else if (is_uint32) {
687 translation->StoreUint32StackSlot(op->index());
689 translation->StoreInt32StackSlot(op->index());
691 } else if (op->IsDoubleStackSlot()) {
692 translation->StoreDoubleStackSlot(op->index());
693 } else if (op->IsRegister()) {
694 Register reg = ToRegister(op);
696 translation->StoreRegister(reg);
697 } else if (is_uint32) {
698 translation->StoreUint32Register(reg);
700 translation->StoreInt32Register(reg);
702 } else if (op->IsDoubleRegister()) {
703 DoubleRegister reg = ToDoubleRegister(op);
704 translation->StoreDoubleRegister(reg);
705 } else if (op->IsConstantOperand()) {
706 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
707 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
708 translation->StoreLiteral(src_index);
715 int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
716 int size = masm()->CallSize(code, mode);
717 if (code->kind() == Code::BINARY_OP_IC ||
718 code->kind() == Code::COMPARE_IC) {
719 size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric.
725 void LCodeGen::CallCode(Handle<Code> code,
726 RelocInfo::Mode mode,
728 TargetAddressStorageMode storage_mode) {
729 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
733 void LCodeGen::CallCodeGeneric(Handle<Code> code,
734 RelocInfo::Mode mode,
736 SafepointMode safepoint_mode,
737 TargetAddressStorageMode storage_mode) {
738 DCHECK(instr != NULL);
739 // Block literal pool emission to ensure nop indicating no inlined smi code
740 // is in the correct position.
741 Assembler::BlockConstPoolScope block_const_pool(masm());
742 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
743 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
745 // Signal that we don't inline smi code before these stubs in the
746 // optimizing code generator.
747 if (code->kind() == Code::BINARY_OP_IC ||
748 code->kind() == Code::COMPARE_IC) {
754 void LCodeGen::CallRuntime(const Runtime::Function* function,
757 SaveFPRegsMode save_doubles) {
758 DCHECK(instr != NULL);
760 __ CallRuntime(function, num_arguments, save_doubles);
762 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
766 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
767 if (context->IsRegister()) {
768 __ Move(cp, ToRegister(context));
769 } else if (context->IsStackSlot()) {
770 __ ldr(cp, ToMemOperand(context));
771 } else if (context->IsConstantOperand()) {
772 HConstant* constant =
773 chunk_->LookupConstant(LConstantOperand::cast(context));
774 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
781 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
785 LoadContextFromDeferred(context);
786 __ CallRuntimeSaveDoubles(id);
787 RecordSafepointWithRegisters(
788 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
792 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
793 Safepoint::DeoptMode mode) {
794 environment->set_has_been_used();
795 if (!environment->HasBeenRegistered()) {
796 // Physical stack frame layout:
797 // -x ............. -4 0 ..................................... y
798 // [incoming arguments] [spill slots] [pushed outgoing arguments]
800 // Layout of the environment:
801 // 0 ..................................................... size-1
802 // [parameters] [locals] [expression stack including arguments]
804 // Layout of the translation:
805 // 0 ........................................................ size - 1 + 4
806 // [expression stack including arguments] [locals] [4 words] [parameters]
807 // |>------------ translation_size ------------<|
810 int jsframe_count = 0;
811 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
813 if (e->frame_type() == JS_FUNCTION) {
817 Translation translation(&translations_, frame_count, jsframe_count, zone());
818 WriteTranslation(environment, &translation);
819 int deoptimization_index = deoptimizations_.length();
820 int pc_offset = masm()->pc_offset();
821 environment->Register(deoptimization_index,
823 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
824 deoptimizations_.Add(environment, zone());
829 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
830 Deoptimizer::DeoptReason deopt_reason,
831 Deoptimizer::BailoutType bailout_type) {
832 LEnvironment* environment = instr->environment();
833 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
834 DCHECK(environment->HasBeenRegistered());
835 int id = environment->deoptimization_index();
836 DCHECK(info()->IsOptimizing() || info()->IsStub());
838 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
840 Abort(kBailoutWasNotPrepared);
844 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
845 Register scratch = scratch0();
846 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
848 // Store the condition on the stack if necessary
849 if (condition != al) {
850 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
851 __ mov(scratch, Operand(1), LeaveCC, condition);
856 __ mov(scratch, Operand(count));
857 __ ldr(r1, MemOperand(scratch));
858 __ sub(r1, r1, Operand(1), SetCC);
859 __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
860 __ str(r1, MemOperand(scratch));
863 if (condition != al) {
864 // Clean up the stack before the deoptimizer call
868 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
870 // 'Restore' the condition in a slightly hacky way. (It would be better
871 // to use 'msr' and 'mrs' instructions here, but they are not supported by
872 // our ARM simulator).
873 if (condition != al) {
875 __ cmp(scratch, Operand::Zero());
879 if (info()->ShouldTrapOnDeopt()) {
880 __ stop("trap_on_deopt", condition);
883 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
885 DCHECK(info()->IsStub() || frame_is_built_);
886 // Go through jump table if we need to handle condition, build frame, or
887 // restore caller doubles.
888 if (condition == al && frame_is_built_ &&
889 !info()->saves_caller_doubles()) {
890 DeoptComment(deopt_info);
891 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
892 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
894 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
896 // We often have several deopts to the same entry, reuse the last
897 // jump entry if this is the case.
898 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
899 jump_table_.is_empty() ||
900 !table_entry.IsEquivalentTo(jump_table_.last())) {
901 jump_table_.Add(table_entry, zone());
903 __ b(condition, &jump_table_.last().label);
908 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
909 Deoptimizer::DeoptReason deopt_reason) {
910 Deoptimizer::BailoutType bailout_type = info()->IsStub()
912 : Deoptimizer::EAGER;
913 DeoptimizeIf(condition, instr, deopt_reason, bailout_type);
917 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
918 int length = deoptimizations_.length();
919 if (length == 0) return;
920 Handle<DeoptimizationInputData> data =
921 DeoptimizationInputData::New(isolate(), length, TENURED);
923 Handle<ByteArray> translations =
924 translations_.CreateByteArray(isolate()->factory());
925 data->SetTranslationByteArray(*translations);
926 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
927 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
928 if (info_->IsOptimizing()) {
929 // Reference to shared function info does not change between phases.
930 AllowDeferredHandleDereference allow_handle_dereference;
931 data->SetSharedFunctionInfo(*info_->shared_info());
933 data->SetSharedFunctionInfo(Smi::FromInt(0));
935 data->SetWeakCellCache(Smi::FromInt(0));
937 Handle<FixedArray> literals =
938 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
939 { AllowDeferredHandleDereference copy_handles;
940 for (int i = 0; i < deoptimization_literals_.length(); i++) {
941 literals->set(i, *deoptimization_literals_[i]);
943 data->SetLiteralArray(*literals);
946 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
947 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
949 // Populate the deoptimization entries.
950 for (int i = 0; i < length; i++) {
951 LEnvironment* env = deoptimizations_[i];
952 data->SetAstId(i, env->ast_id());
953 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
954 data->SetArgumentsStackHeight(i,
955 Smi::FromInt(env->arguments_stack_height()));
956 data->SetPc(i, Smi::FromInt(env->pc_offset()));
958 code->set_deoptimization_data(*data);
962 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
963 int result = deoptimization_literals_.length();
964 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
965 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
967 deoptimization_literals_.Add(literal, zone());
972 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
973 DCHECK(deoptimization_literals_.length() == 0);
975 const ZoneList<Handle<JSFunction> >* inlined_closures =
976 chunk()->inlined_closures();
978 for (int i = 0, length = inlined_closures->length();
981 DefineDeoptimizationLiteral(inlined_closures->at(i));
984 inlined_function_count_ = deoptimization_literals_.length();
988 void LCodeGen::RecordSafepointWithLazyDeopt(
989 LInstruction* instr, SafepointMode safepoint_mode) {
990 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
991 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
993 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
994 RecordSafepointWithRegisters(
995 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1000 void LCodeGen::RecordSafepoint(
1001 LPointerMap* pointers,
1002 Safepoint::Kind kind,
1004 Safepoint::DeoptMode deopt_mode) {
1005 DCHECK(expected_safepoint_kind_ == kind);
1007 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1008 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
1009 kind, arguments, deopt_mode);
1010 for (int i = 0; i < operands->length(); i++) {
1011 LOperand* pointer = operands->at(i);
1012 if (pointer->IsStackSlot()) {
1013 safepoint.DefinePointerSlot(pointer->index(), zone());
1014 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1015 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1018 if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
1019 // Register pp always contains a pointer to the constant pool.
1020 safepoint.DefinePointerRegister(pp, zone());
1025 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1026 Safepoint::DeoptMode deopt_mode) {
1027 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1031 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1032 LPointerMap empty_pointers(zone());
1033 RecordSafepoint(&empty_pointers, deopt_mode);
1037 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1039 Safepoint::DeoptMode deopt_mode) {
1041 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1045 void LCodeGen::RecordAndWritePosition(int position) {
1046 if (position == RelocInfo::kNoPosition) return;
1047 masm()->positions_recorder()->RecordPosition(position);
1048 masm()->positions_recorder()->WriteRecordedPositions();
1052 static const char* LabelType(LLabel* label) {
1053 if (label->is_loop_header()) return " (loop header)";
1054 if (label->is_osr_entry()) return " (OSR entry)";
1059 void LCodeGen::DoLabel(LLabel* label) {
1060 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1061 current_instruction_,
1062 label->hydrogen_value()->id(),
1065 __ bind(label->label());
1066 current_block_ = label->block_id();
1071 void LCodeGen::DoParallelMove(LParallelMove* move) {
1072 resolver_.Resolve(move);
1076 void LCodeGen::DoGap(LGap* gap) {
1077 for (int i = LGap::FIRST_INNER_POSITION;
1078 i <= LGap::LAST_INNER_POSITION;
1080 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1081 LParallelMove* move = gap->GetParallelMove(inner_pos);
1082 if (move != NULL) DoParallelMove(move);
1087 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1092 void LCodeGen::DoParameter(LParameter* instr) {
1097 void LCodeGen::DoCallStub(LCallStub* instr) {
1098 DCHECK(ToRegister(instr->context()).is(cp));
1099 DCHECK(ToRegister(instr->result()).is(r0));
1100 switch (instr->hydrogen()->major_key()) {
1101 case CodeStub::RegExpExec: {
1102 RegExpExecStub stub(isolate());
1103 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1106 case CodeStub::SubString: {
1107 SubStringStub stub(isolate());
1108 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1111 case CodeStub::StringCompare: {
1112 StringCompareStub stub(isolate());
1113 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1122 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1123 GenerateOsrPrologue();
1127 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1128 Register dividend = ToRegister(instr->dividend());
1129 int32_t divisor = instr->divisor();
1130 DCHECK(dividend.is(ToRegister(instr->result())));
1132 // Theoretically, a variation of the branch-free code for integer division by
1133 // a power of 2 (calculating the remainder via an additional multiplication
1134 // (which gets simplified to an 'and') and subtraction) should be faster, and
1135 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1136 // indicate that positive dividends are heavily favored, so the branching
1137 // version performs better.
1138 HMod* hmod = instr->hydrogen();
1139 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1140 Label dividend_is_not_negative, done;
1141 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1142 __ cmp(dividend, Operand::Zero());
1143 __ b(pl, ÷nd_is_not_negative);
1144 // Note that this is correct even for kMinInt operands.
1145 __ rsb(dividend, dividend, Operand::Zero());
1146 __ and_(dividend, dividend, Operand(mask));
1147 __ rsb(dividend, dividend, Operand::Zero(), SetCC);
1148 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1149 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1154 __ bind(÷nd_is_not_negative);
1155 __ and_(dividend, dividend, Operand(mask));
1160 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1161 Register dividend = ToRegister(instr->dividend());
1162 int32_t divisor = instr->divisor();
1163 Register result = ToRegister(instr->result());
1164 DCHECK(!dividend.is(result));
1167 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1171 __ TruncatingDiv(result, dividend, Abs(divisor));
1172 __ mov(ip, Operand(Abs(divisor)));
1173 __ smull(result, ip, result, ip);
1174 __ sub(result, dividend, result, SetCC);
1176 // Check for negative zero.
1177 HMod* hmod = instr->hydrogen();
1178 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1179 Label remainder_not_zero;
1180 __ b(ne, &remainder_not_zero);
1181 __ cmp(dividend, Operand::Zero());
1182 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1183 __ bind(&remainder_not_zero);
1188 void LCodeGen::DoModI(LModI* instr) {
1189 HMod* hmod = instr->hydrogen();
1190 if (CpuFeatures::IsSupported(SUDIV)) {
1191 CpuFeatureScope scope(masm(), SUDIV);
1193 Register left_reg = ToRegister(instr->left());
1194 Register right_reg = ToRegister(instr->right());
1195 Register result_reg = ToRegister(instr->result());
1198 // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1199 // case because we can't return a NaN.
1200 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1201 __ cmp(right_reg, Operand::Zero());
1202 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1205 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1206 // want. We have to deopt if we care about -0, because we can't return that.
1207 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1208 Label no_overflow_possible;
1209 __ cmp(left_reg, Operand(kMinInt));
1210 __ b(ne, &no_overflow_possible);
1211 __ cmp(right_reg, Operand(-1));
1212 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1213 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1215 __ b(ne, &no_overflow_possible);
1216 __ mov(result_reg, Operand::Zero());
1219 __ bind(&no_overflow_possible);
1222 // For 'r3 = r1 % r2' we can have the following ARM code:
1224 // mls r3, r3, r2, r1
1226 __ sdiv(result_reg, left_reg, right_reg);
1227 __ Mls(result_reg, result_reg, right_reg, left_reg);
1229 // If we care about -0, test if the dividend is <0 and the result is 0.
1230 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1231 __ cmp(result_reg, Operand::Zero());
1233 __ cmp(left_reg, Operand::Zero());
1234 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1239 // General case, without any SDIV support.
1240 Register left_reg = ToRegister(instr->left());
1241 Register right_reg = ToRegister(instr->right());
1242 Register result_reg = ToRegister(instr->result());
1243 Register scratch = scratch0();
1244 DCHECK(!scratch.is(left_reg));
1245 DCHECK(!scratch.is(right_reg));
1246 DCHECK(!scratch.is(result_reg));
1247 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1248 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1249 DCHECK(!divisor.is(dividend));
1250 LowDwVfpRegister quotient = double_scratch0();
1251 DCHECK(!quotient.is(dividend));
1252 DCHECK(!quotient.is(divisor));
1255 // Check for x % 0, we have to deopt in this case because we can't return a
1257 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1258 __ cmp(right_reg, Operand::Zero());
1259 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1262 __ Move(result_reg, left_reg);
1263 // Load the arguments in VFP registers. The divisor value is preloaded
1264 // before. Be careful that 'right_reg' is only live on entry.
1265 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1266 __ vmov(double_scratch0().low(), left_reg);
1267 __ vcvt_f64_s32(dividend, double_scratch0().low());
1268 __ vmov(double_scratch0().low(), right_reg);
1269 __ vcvt_f64_s32(divisor, double_scratch0().low());
1271 // We do not care about the sign of the divisor. Note that we still handle
1272 // the kMinInt % -1 case correctly, though.
1273 __ vabs(divisor, divisor);
1274 // Compute the quotient and round it to a 32bit integer.
1275 __ vdiv(quotient, dividend, divisor);
1276 __ vcvt_s32_f64(quotient.low(), quotient);
1277 __ vcvt_f64_s32(quotient, quotient.low());
1279 // Compute the remainder in result.
1280 __ vmul(double_scratch0(), divisor, quotient);
1281 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1282 __ vmov(scratch, double_scratch0().low());
1283 __ sub(result_reg, left_reg, scratch, SetCC);
1285 // If we care about -0, test if the dividend is <0 and the result is 0.
1286 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1288 __ cmp(left_reg, Operand::Zero());
1289 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
1296 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1297 Register dividend = ToRegister(instr->dividend());
1298 int32_t divisor = instr->divisor();
1299 Register result = ToRegister(instr->result());
1300 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1301 DCHECK(!result.is(dividend));
1303 // Check for (0 / -x) that will produce negative zero.
1304 HDiv* hdiv = instr->hydrogen();
1305 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1306 __ cmp(dividend, Operand::Zero());
1307 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1309 // Check for (kMinInt / -1).
1310 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1311 __ cmp(dividend, Operand(kMinInt));
1312 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1314 // Deoptimize if remainder will not be 0.
1315 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1316 divisor != 1 && divisor != -1) {
1317 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1318 __ tst(dividend, Operand(mask));
1319 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1322 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1323 __ rsb(result, dividend, Operand(0));
1326 int32_t shift = WhichPowerOf2Abs(divisor);
1328 __ mov(result, dividend);
1329 } else if (shift == 1) {
1330 __ add(result, dividend, Operand(dividend, LSR, 31));
1332 __ mov(result, Operand(dividend, ASR, 31));
1333 __ add(result, dividend, Operand(result, LSR, 32 - shift));
1335 if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1336 if (divisor < 0) __ rsb(result, result, Operand(0));
1340 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1341 Register dividend = ToRegister(instr->dividend());
1342 int32_t divisor = instr->divisor();
1343 Register result = ToRegister(instr->result());
1344 DCHECK(!dividend.is(result));
1347 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1351 // Check for (0 / -x) that will produce negative zero.
1352 HDiv* hdiv = instr->hydrogen();
1353 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1354 __ cmp(dividend, Operand::Zero());
1355 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1358 __ TruncatingDiv(result, dividend, Abs(divisor));
1359 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1361 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1362 __ mov(ip, Operand(divisor));
1363 __ smull(scratch0(), ip, result, ip);
1364 __ sub(scratch0(), scratch0(), dividend, SetCC);
1365 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1370 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1371 void LCodeGen::DoDivI(LDivI* instr) {
1372 HBinaryOperation* hdiv = instr->hydrogen();
1373 Register dividend = ToRegister(instr->dividend());
1374 Register divisor = ToRegister(instr->divisor());
1375 Register result = ToRegister(instr->result());
1378 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1379 __ cmp(divisor, Operand::Zero());
1380 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1383 // Check for (0 / -x) that will produce negative zero.
1384 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1386 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1387 // Do the test only if it hadn't be done above.
1388 __ cmp(divisor, Operand::Zero());
1390 __ b(pl, &positive);
1391 __ cmp(dividend, Operand::Zero());
1392 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1396 // Check for (kMinInt / -1).
1397 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1398 (!CpuFeatures::IsSupported(SUDIV) ||
1399 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1400 // We don't need to check for overflow when truncating with sdiv
1401 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1402 __ cmp(dividend, Operand(kMinInt));
1403 __ cmp(divisor, Operand(-1), eq);
1404 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1407 if (CpuFeatures::IsSupported(SUDIV)) {
1408 CpuFeatureScope scope(masm(), SUDIV);
1409 __ sdiv(result, dividend, divisor);
1411 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1412 DoubleRegister vright = double_scratch0();
1413 __ vmov(double_scratch0().low(), dividend);
1414 __ vcvt_f64_s32(vleft, double_scratch0().low());
1415 __ vmov(double_scratch0().low(), divisor);
1416 __ vcvt_f64_s32(vright, double_scratch0().low());
1417 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1418 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1419 __ vmov(result, double_scratch0().low());
1422 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1423 // Compute remainder and deopt if it's not zero.
1424 Register remainder = scratch0();
1425 __ Mls(remainder, result, divisor, dividend);
1426 __ cmp(remainder, Operand::Zero());
1427 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1432 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1433 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1434 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1435 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1437 // This is computed in-place.
1438 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1440 __ vmla(addend, multiplier, multiplicand);
1444 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1445 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1446 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1447 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1449 // This is computed in-place.
1450 DCHECK(minuend.is(ToDoubleRegister(instr->result())));
1452 __ vmls(minuend, multiplier, multiplicand);
1456 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1457 Register dividend = ToRegister(instr->dividend());
1458 Register result = ToRegister(instr->result());
1459 int32_t divisor = instr->divisor();
1461 // If the divisor is 1, return the dividend.
1463 __ Move(result, dividend);
1467 // If the divisor is positive, things are easy: There can be no deopts and we
1468 // can simply do an arithmetic right shift.
1469 int32_t shift = WhichPowerOf2Abs(divisor);
1471 __ mov(result, Operand(dividend, ASR, shift));
1475 // If the divisor is negative, we have to negate and handle edge cases.
1476 __ rsb(result, dividend, Operand::Zero(), SetCC);
1477 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1478 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1481 // Dividing by -1 is basically negation, unless we overflow.
1482 if (divisor == -1) {
1483 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1484 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1489 // If the negation could not overflow, simply shifting is OK.
1490 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1491 __ mov(result, Operand(result, ASR, shift));
1495 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
1496 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
1500 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1501 Register dividend = ToRegister(instr->dividend());
1502 int32_t divisor = instr->divisor();
1503 Register result = ToRegister(instr->result());
1504 DCHECK(!dividend.is(result));
1507 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1511 // Check for (0 / -x) that will produce negative zero.
1512 HMathFloorOfDiv* hdiv = instr->hydrogen();
1513 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1514 __ cmp(dividend, Operand::Zero());
1515 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1518 // Easy case: We need no dynamic check for the dividend and the flooring
1519 // division is the same as the truncating division.
1520 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1521 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1522 __ TruncatingDiv(result, dividend, Abs(divisor));
1523 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1527 // In the general case we may need to adjust before and after the truncating
1528 // division to get a flooring division.
1529 Register temp = ToRegister(instr->temp());
1530 DCHECK(!temp.is(dividend) && !temp.is(result));
1531 Label needs_adjustment, done;
1532 __ cmp(dividend, Operand::Zero());
1533 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1534 __ TruncatingDiv(result, dividend, Abs(divisor));
1535 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1537 __ bind(&needs_adjustment);
1538 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1539 __ TruncatingDiv(result, temp, Abs(divisor));
1540 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1541 __ sub(result, result, Operand(1));
1546 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1547 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1548 HBinaryOperation* hdiv = instr->hydrogen();
1549 Register left = ToRegister(instr->dividend());
1550 Register right = ToRegister(instr->divisor());
1551 Register result = ToRegister(instr->result());
1554 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1555 __ cmp(right, Operand::Zero());
1556 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1559 // Check for (0 / -x) that will produce negative zero.
1560 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1562 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1563 // Do the test only if it hadn't be done above.
1564 __ cmp(right, Operand::Zero());
1566 __ b(pl, &positive);
1567 __ cmp(left, Operand::Zero());
1568 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1572 // Check for (kMinInt / -1).
1573 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1574 (!CpuFeatures::IsSupported(SUDIV) ||
1575 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1576 // We don't need to check for overflow when truncating with sdiv
1577 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1578 __ cmp(left, Operand(kMinInt));
1579 __ cmp(right, Operand(-1), eq);
1580 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1583 if (CpuFeatures::IsSupported(SUDIV)) {
1584 CpuFeatureScope scope(masm(), SUDIV);
1585 __ sdiv(result, left, right);
1587 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1588 DoubleRegister vright = double_scratch0();
1589 __ vmov(double_scratch0().low(), left);
1590 __ vcvt_f64_s32(vleft, double_scratch0().low());
1591 __ vmov(double_scratch0().low(), right);
1592 __ vcvt_f64_s32(vright, double_scratch0().low());
1593 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1594 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1595 __ vmov(result, double_scratch0().low());
1599 Register remainder = scratch0();
1600 __ Mls(remainder, result, right, left);
1601 __ cmp(remainder, Operand::Zero());
1603 __ eor(remainder, remainder, Operand(right));
1604 __ add(result, result, Operand(remainder, ASR, 31));
1609 void LCodeGen::DoMulI(LMulI* instr) {
1610 Register result = ToRegister(instr->result());
1611 // Note that result may alias left.
1612 Register left = ToRegister(instr->left());
1613 LOperand* right_op = instr->right();
1615 bool bailout_on_minus_zero =
1616 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1617 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1619 if (right_op->IsConstantOperand()) {
1620 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1622 if (bailout_on_minus_zero && (constant < 0)) {
1623 // The case of a null constant will be handled separately.
1624 // If constant is negative and left is null, the result should be -0.
1625 __ cmp(left, Operand::Zero());
1626 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1632 __ rsb(result, left, Operand::Zero(), SetCC);
1633 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1635 __ rsb(result, left, Operand::Zero());
1639 if (bailout_on_minus_zero) {
1640 // If left is strictly negative and the constant is null, the
1641 // result is -0. Deoptimize if required, otherwise return 0.
1642 __ cmp(left, Operand::Zero());
1643 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
1645 __ mov(result, Operand::Zero());
1648 __ Move(result, left);
1651 // Multiplying by powers of two and powers of two plus or minus
1652 // one can be done faster with shifted operands.
1653 // For other constants we emit standard code.
1654 int32_t mask = constant >> 31;
1655 uint32_t constant_abs = (constant + mask) ^ mask;
1657 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1658 int32_t shift = WhichPowerOf2(constant_abs);
1659 __ mov(result, Operand(left, LSL, shift));
1660 // Correct the sign of the result is the constant is negative.
1661 if (constant < 0) __ rsb(result, result, Operand::Zero());
1662 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1663 int32_t shift = WhichPowerOf2(constant_abs - 1);
1664 __ add(result, left, Operand(left, LSL, shift));
1665 // Correct the sign of the result is the constant is negative.
1666 if (constant < 0) __ rsb(result, result, Operand::Zero());
1667 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1668 int32_t shift = WhichPowerOf2(constant_abs + 1);
1669 __ rsb(result, left, Operand(left, LSL, shift));
1670 // Correct the sign of the result is the constant is negative.
1671 if (constant < 0) __ rsb(result, result, Operand::Zero());
1673 // Generate standard code.
1674 __ mov(ip, Operand(constant));
1675 __ mul(result, left, ip);
1680 DCHECK(right_op->IsRegister());
1681 Register right = ToRegister(right_op);
1684 Register scratch = scratch0();
1685 // scratch:result = left * right.
1686 if (instr->hydrogen()->representation().IsSmi()) {
1687 __ SmiUntag(result, left);
1688 __ smull(result, scratch, result, right);
1690 __ smull(result, scratch, left, right);
1692 __ cmp(scratch, Operand(result, ASR, 31));
1693 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1695 if (instr->hydrogen()->representation().IsSmi()) {
1696 __ SmiUntag(result, left);
1697 __ mul(result, result, right);
1699 __ mul(result, left, right);
1703 if (bailout_on_minus_zero) {
1705 __ teq(left, Operand(right));
1707 // Bail out if the result is minus zero.
1708 __ cmp(result, Operand::Zero());
1709 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1716 void LCodeGen::DoBitI(LBitI* instr) {
1717 LOperand* left_op = instr->left();
1718 LOperand* right_op = instr->right();
1719 DCHECK(left_op->IsRegister());
1720 Register left = ToRegister(left_op);
1721 Register result = ToRegister(instr->result());
1722 Operand right(no_reg);
1724 if (right_op->IsStackSlot()) {
1725 right = Operand(EmitLoadRegister(right_op, ip));
1727 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1728 right = ToOperand(right_op);
1731 switch (instr->op()) {
1732 case Token::BIT_AND:
1733 __ and_(result, left, right);
1736 __ orr(result, left, right);
1738 case Token::BIT_XOR:
1739 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1740 __ mvn(result, Operand(left));
1742 __ eor(result, left, right);
1752 void LCodeGen::DoShiftI(LShiftI* instr) {
1753 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1754 // result may alias either of them.
1755 LOperand* right_op = instr->right();
1756 Register left = ToRegister(instr->left());
1757 Register result = ToRegister(instr->result());
1758 Register scratch = scratch0();
1759 if (right_op->IsRegister()) {
1760 // Mask the right_op operand.
1761 __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1762 switch (instr->op()) {
1764 __ mov(result, Operand(left, ROR, scratch));
1767 __ mov(result, Operand(left, ASR, scratch));
1770 if (instr->can_deopt()) {
1771 __ mov(result, Operand(left, LSR, scratch), SetCC);
1772 DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue);
1774 __ mov(result, Operand(left, LSR, scratch));
1778 __ mov(result, Operand(left, LSL, scratch));
1785 // Mask the right_op operand.
1786 int value = ToInteger32(LConstantOperand::cast(right_op));
1787 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1788 switch (instr->op()) {
1790 if (shift_count != 0) {
1791 __ mov(result, Operand(left, ROR, shift_count));
1793 __ Move(result, left);
1797 if (shift_count != 0) {
1798 __ mov(result, Operand(left, ASR, shift_count));
1800 __ Move(result, left);
1804 if (shift_count != 0) {
1805 __ mov(result, Operand(left, LSR, shift_count));
1807 if (instr->can_deopt()) {
1808 __ tst(left, Operand(0x80000000));
1809 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
1811 __ Move(result, left);
1815 if (shift_count != 0) {
1816 if (instr->hydrogen_value()->representation().IsSmi() &&
1817 instr->can_deopt()) {
1818 if (shift_count != 1) {
1819 __ mov(result, Operand(left, LSL, shift_count - 1));
1820 __ SmiTag(result, result, SetCC);
1822 __ SmiTag(result, left, SetCC);
1824 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1826 __ mov(result, Operand(left, LSL, shift_count));
1829 __ Move(result, left);
1840 void LCodeGen::DoSubI(LSubI* instr) {
1841 LOperand* left = instr->left();
1842 LOperand* right = instr->right();
1843 LOperand* result = instr->result();
1844 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1845 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1847 if (right->IsStackSlot()) {
1848 Register right_reg = EmitLoadRegister(right, ip);
1849 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1851 DCHECK(right->IsRegister() || right->IsConstantOperand());
1852 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1856 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1861 void LCodeGen::DoRSubI(LRSubI* instr) {
1862 LOperand* left = instr->left();
1863 LOperand* right = instr->right();
1864 LOperand* result = instr->result();
1865 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1866 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1868 if (right->IsStackSlot()) {
1869 Register right_reg = EmitLoadRegister(right, ip);
1870 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1872 DCHECK(right->IsRegister() || right->IsConstantOperand());
1873 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1877 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
1882 void LCodeGen::DoConstantI(LConstantI* instr) {
1883 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1887 void LCodeGen::DoConstantS(LConstantS* instr) {
1888 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1892 void LCodeGen::DoConstantD(LConstantD* instr) {
1893 DCHECK(instr->result()->IsDoubleRegister());
1894 DwVfpRegister result = ToDoubleRegister(instr->result());
1895 #if V8_HOST_ARCH_IA32
1896 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1898 uint64_t bits = instr->bits();
1899 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1900 V8_UINT64_C(0x7FF0000000000000)) {
1901 uint32_t lo = static_cast<uint32_t>(bits);
1902 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1903 __ mov(ip, Operand(lo));
1904 __ mov(scratch0(), Operand(hi));
1905 __ vmov(result, ip, scratch0());
1909 double v = instr->value();
1910 __ Vmov(result, v, scratch0());
1914 void LCodeGen::DoConstantE(LConstantE* instr) {
1915 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1919 void LCodeGen::DoConstantT(LConstantT* instr) {
1920 Handle<Object> object = instr->value(isolate());
1921 AllowDeferredHandleDereference smi_check;
1922 __ Move(ToRegister(instr->result()), object);
1926 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1927 Register result = ToRegister(instr->result());
1928 Register map = ToRegister(instr->value());
1929 __ EnumLength(result, map);
1933 void LCodeGen::DoDateField(LDateField* instr) {
1934 Register object = ToRegister(instr->date());
1935 Register result = ToRegister(instr->result());
1936 Register scratch = ToRegister(instr->temp());
1937 Smi* index = instr->index();
1938 Label runtime, done;
1939 DCHECK(object.is(result));
1940 DCHECK(object.is(r0));
1941 DCHECK(!scratch.is(scratch0()));
1942 DCHECK(!scratch.is(object));
1945 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
1946 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1947 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
1949 if (index->value() == 0) {
1950 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1952 if (index->value() < JSDate::kFirstUncachedField) {
1953 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1954 __ mov(scratch, Operand(stamp));
1955 __ ldr(scratch, MemOperand(scratch));
1956 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1957 __ cmp(scratch, scratch0());
1959 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1960 kPointerSize * index->value()));
1964 __ PrepareCallCFunction(2, scratch);
1965 __ mov(r1, Operand(index));
1966 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1972 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1974 String::Encoding encoding) {
1975 if (index->IsConstantOperand()) {
1976 int offset = ToInteger32(LConstantOperand::cast(index));
1977 if (encoding == String::TWO_BYTE_ENCODING) {
1978 offset *= kUC16Size;
1980 STATIC_ASSERT(kCharSize == 1);
1981 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1983 Register scratch = scratch0();
1984 DCHECK(!scratch.is(string));
1985 DCHECK(!scratch.is(ToRegister(index)));
1986 if (encoding == String::ONE_BYTE_ENCODING) {
1987 __ add(scratch, string, Operand(ToRegister(index)));
1989 STATIC_ASSERT(kUC16Size == 2);
1990 __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
1992 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1996 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1997 String::Encoding encoding = instr->hydrogen()->encoding();
1998 Register string = ToRegister(instr->string());
1999 Register result = ToRegister(instr->result());
2001 if (FLAG_debug_code) {
2002 Register scratch = scratch0();
2003 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
2004 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
2006 __ and_(scratch, scratch,
2007 Operand(kStringRepresentationMask | kStringEncodingMask));
2008 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2009 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2010 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
2011 ? one_byte_seq_type : two_byte_seq_type));
2012 __ Check(eq, kUnexpectedStringType);
2015 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2016 if (encoding == String::ONE_BYTE_ENCODING) {
2017 __ ldrb(result, operand);
2019 __ ldrh(result, operand);
2024 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2025 String::Encoding encoding = instr->hydrogen()->encoding();
2026 Register string = ToRegister(instr->string());
2027 Register value = ToRegister(instr->value());
2029 if (FLAG_debug_code) {
2030 Register index = ToRegister(instr->index());
2031 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2032 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2034 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2035 ? one_byte_seq_type : two_byte_seq_type;
2036 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2039 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2040 if (encoding == String::ONE_BYTE_ENCODING) {
2041 __ strb(value, operand);
2043 __ strh(value, operand);
2048 void LCodeGen::DoAddI(LAddI* instr) {
2049 LOperand* left = instr->left();
2050 LOperand* right = instr->right();
2051 LOperand* result = instr->result();
2052 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2053 SBit set_cond = can_overflow ? SetCC : LeaveCC;
2055 if (right->IsStackSlot()) {
2056 Register right_reg = EmitLoadRegister(right, ip);
2057 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
2059 DCHECK(right->IsRegister() || right->IsConstantOperand());
2060 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
2064 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
2069 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2070 LOperand* left = instr->left();
2071 LOperand* right = instr->right();
2072 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2073 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2074 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
2075 Register left_reg = ToRegister(left);
2076 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
2078 : Operand(EmitLoadRegister(right, ip));
2079 Register result_reg = ToRegister(instr->result());
2080 __ cmp(left_reg, right_op);
2081 __ Move(result_reg, left_reg, condition);
2082 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
2084 DCHECK(instr->hydrogen()->representation().IsDouble());
2085 DwVfpRegister left_reg = ToDoubleRegister(left);
2086 DwVfpRegister right_reg = ToDoubleRegister(right);
2087 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
2088 Label result_is_nan, return_left, return_right, check_zero, done;
2089 __ VFPCompareAndSetFlags(left_reg, right_reg);
2090 if (operation == HMathMinMax::kMathMin) {
2091 __ b(mi, &return_left);
2092 __ b(gt, &return_right);
2094 __ b(mi, &return_right);
2095 __ b(gt, &return_left);
2097 __ b(vs, &result_is_nan);
2098 // Left equals right => check for -0.
2099 __ VFPCompareAndSetFlags(left_reg, 0.0);
2100 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2101 __ b(ne, &done); // left == right != 0.
2103 __ b(ne, &return_left); // left == right != 0.
2105 // At this point, both left and right are either 0 or -0.
2106 if (operation == HMathMinMax::kMathMin) {
2107 // We could use a single 'vorr' instruction here if we had NEON support.
2108 __ vneg(left_reg, left_reg);
2109 __ vsub(result_reg, left_reg, right_reg);
2110 __ vneg(result_reg, result_reg);
2112 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2113 // the decision for vadd is easy because vand is a NEON instruction.
2114 __ vadd(result_reg, left_reg, right_reg);
2118 __ bind(&result_is_nan);
2119 __ vadd(result_reg, left_reg, right_reg);
2122 __ bind(&return_right);
2123 __ Move(result_reg, right_reg);
2124 if (!left_reg.is(result_reg)) {
2128 __ bind(&return_left);
2129 __ Move(result_reg, left_reg);
2136 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2137 DwVfpRegister left = ToDoubleRegister(instr->left());
2138 DwVfpRegister right = ToDoubleRegister(instr->right());
2139 DwVfpRegister result = ToDoubleRegister(instr->result());
2140 switch (instr->op()) {
2142 __ vadd(result, left, right);
2145 __ vsub(result, left, right);
2148 __ vmul(result, left, right);
2151 __ vdiv(result, left, right);
2154 __ PrepareCallCFunction(0, 2, scratch0());
2155 __ MovToFloatParameters(left, right);
2157 ExternalReference::mod_two_doubles_operation(isolate()),
2159 // Move the result in the double result register.
2160 __ MovFromFloatResult(result);
2170 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2171 DCHECK(ToRegister(instr->context()).is(cp));
2172 DCHECK(ToRegister(instr->left()).is(r1));
2173 DCHECK(ToRegister(instr->right()).is(r0));
2174 DCHECK(ToRegister(instr->result()).is(r0));
2176 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
2177 // Block literal pool emission to ensure nop indicating no inlined smi code
2178 // is in the correct position.
2179 Assembler::BlockConstPoolScope block_const_pool(masm());
2180 CallCode(code, RelocInfo::CODE_TARGET, instr);
2184 template<class InstrType>
2185 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2186 int left_block = instr->TrueDestination(chunk_);
2187 int right_block = instr->FalseDestination(chunk_);
2189 int next_block = GetNextEmittedBlock();
2191 if (right_block == left_block || condition == al) {
2192 EmitGoto(left_block);
2193 } else if (left_block == next_block) {
2194 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
2195 } else if (right_block == next_block) {
2196 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2198 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2199 __ b(chunk_->GetAssemblyLabel(right_block));
2204 template<class InstrType>
2205 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2206 int false_block = instr->FalseDestination(chunk_);
2207 __ b(condition, chunk_->GetAssemblyLabel(false_block));
2211 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2216 void LCodeGen::DoBranch(LBranch* instr) {
2217 Representation r = instr->hydrogen()->value()->representation();
2218 if (r.IsInteger32() || r.IsSmi()) {
2219 DCHECK(!info()->IsStub());
2220 Register reg = ToRegister(instr->value());
2221 __ cmp(reg, Operand::Zero());
2222 EmitBranch(instr, ne);
2223 } else if (r.IsDouble()) {
2224 DCHECK(!info()->IsStub());
2225 DwVfpRegister reg = ToDoubleRegister(instr->value());
2226 // Test the double value. Zero and NaN are false.
2227 __ VFPCompareAndSetFlags(reg, 0.0);
2228 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
2229 EmitBranch(instr, ne);
2231 DCHECK(r.IsTagged());
2232 Register reg = ToRegister(instr->value());
2233 HType type = instr->hydrogen()->value()->type();
2234 if (type.IsBoolean()) {
2235 DCHECK(!info()->IsStub());
2236 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2237 EmitBranch(instr, eq);
2238 } else if (type.IsSmi()) {
2239 DCHECK(!info()->IsStub());
2240 __ cmp(reg, Operand::Zero());
2241 EmitBranch(instr, ne);
2242 } else if (type.IsJSArray()) {
2243 DCHECK(!info()->IsStub());
2244 EmitBranch(instr, al);
2245 } else if (type.IsHeapNumber()) {
2246 DCHECK(!info()->IsStub());
2247 DwVfpRegister dbl_scratch = double_scratch0();
2248 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2249 // Test the double value. Zero and NaN are false.
2250 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2251 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
2252 EmitBranch(instr, ne);
2253 } else if (type.IsString()) {
2254 DCHECK(!info()->IsStub());
2255 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2256 __ cmp(ip, Operand::Zero());
2257 EmitBranch(instr, ne);
2259 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2260 // Avoid deopts in the case where we've never executed this path before.
2261 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2263 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2264 // undefined -> false.
2265 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2266 __ b(eq, instr->FalseLabel(chunk_));
2268 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2269 // Boolean -> its value.
2270 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2271 __ b(eq, instr->TrueLabel(chunk_));
2272 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2273 __ b(eq, instr->FalseLabel(chunk_));
2275 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2277 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2278 __ b(eq, instr->FalseLabel(chunk_));
2281 if (expected.Contains(ToBooleanStub::SMI)) {
2282 // Smis: 0 -> false, all other -> true.
2283 __ cmp(reg, Operand::Zero());
2284 __ b(eq, instr->FalseLabel(chunk_));
2285 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2286 } else if (expected.NeedsMap()) {
2287 // If we need a map later and have a Smi -> deopt.
2289 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
2292 const Register map = scratch0();
2293 if (expected.NeedsMap()) {
2294 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2296 if (expected.CanBeUndetectable()) {
2297 // Undetectable -> false.
2298 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2299 __ tst(ip, Operand(1 << Map::kIsUndetectable));
2300 __ b(ne, instr->FalseLabel(chunk_));
2304 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2305 // spec object -> true.
2306 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2307 __ b(ge, instr->TrueLabel(chunk_));
2310 if (expected.Contains(ToBooleanStub::STRING)) {
2311 // String value -> false iff empty.
2313 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2314 __ b(ge, ¬_string);
2315 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2316 __ cmp(ip, Operand::Zero());
2317 __ b(ne, instr->TrueLabel(chunk_));
2318 __ b(instr->FalseLabel(chunk_));
2319 __ bind(¬_string);
2322 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2323 // Symbol value -> true.
2324 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2325 __ b(eq, instr->TrueLabel(chunk_));
2328 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2329 // heap number -> false iff +0, -0, or NaN.
2330 DwVfpRegister dbl_scratch = double_scratch0();
2331 Label not_heap_number;
2332 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2333 __ b(ne, ¬_heap_number);
2334 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2335 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2336 __ cmp(r0, r0, vs); // NaN -> false.
2337 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
2338 __ b(instr->TrueLabel(chunk_));
2339 __ bind(¬_heap_number);
2342 if (!expected.IsGeneric()) {
2343 // We've seen something for the first time -> deopt.
2344 // This can only happen if we are not generic already.
2345 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
2352 void LCodeGen::EmitGoto(int block) {
2353 if (!IsNextEmittedBlock(block)) {
2354 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2359 void LCodeGen::DoGoto(LGoto* instr) {
2360 EmitGoto(instr->block_id());
2364 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2365 Condition cond = kNoCondition;
2368 case Token::EQ_STRICT:
2372 case Token::NE_STRICT:
2376 cond = is_unsigned ? lo : lt;
2379 cond = is_unsigned ? hi : gt;
2382 cond = is_unsigned ? ls : le;
2385 cond = is_unsigned ? hs : ge;
2388 case Token::INSTANCEOF:
2396 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2397 LOperand* left = instr->left();
2398 LOperand* right = instr->right();
2400 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2401 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2402 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2404 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2405 // We can statically evaluate the comparison.
2406 double left_val = ToDouble(LConstantOperand::cast(left));
2407 double right_val = ToDouble(LConstantOperand::cast(right));
2408 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2409 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2410 EmitGoto(next_block);
2412 if (instr->is_double()) {
2413 // Compare left and right operands as doubles and load the
2414 // resulting flags into the normal status register.
2415 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2416 // If a NaN is involved, i.e. the result is unordered (V set),
2417 // jump to false block label.
2418 __ b(vs, instr->FalseLabel(chunk_));
2420 if (right->IsConstantOperand()) {
2421 int32_t value = ToInteger32(LConstantOperand::cast(right));
2422 if (instr->hydrogen_value()->representation().IsSmi()) {
2423 __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2425 __ cmp(ToRegister(left), Operand(value));
2427 } else if (left->IsConstantOperand()) {
2428 int32_t value = ToInteger32(LConstantOperand::cast(left));
2429 if (instr->hydrogen_value()->representation().IsSmi()) {
2430 __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2432 __ cmp(ToRegister(right), Operand(value));
2434 // We commuted the operands, so commute the condition.
2435 cond = CommuteCondition(cond);
2437 __ cmp(ToRegister(left), ToRegister(right));
2440 EmitBranch(instr, cond);
2445 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2446 Register left = ToRegister(instr->left());
2447 Register right = ToRegister(instr->right());
2449 __ cmp(left, Operand(right));
2450 EmitBranch(instr, eq);
2454 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2455 if (instr->hydrogen()->representation().IsTagged()) {
2456 Register input_reg = ToRegister(instr->object());
2457 __ mov(ip, Operand(factory()->the_hole_value()));
2458 __ cmp(input_reg, ip);
2459 EmitBranch(instr, eq);
2463 DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2464 __ VFPCompareAndSetFlags(input_reg, input_reg);
2465 EmitFalseBranch(instr, vc);
2467 Register scratch = scratch0();
2468 __ VmovHigh(scratch, input_reg);
2469 __ cmp(scratch, Operand(kHoleNanUpper32));
2470 EmitBranch(instr, eq);
2474 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2475 Representation rep = instr->hydrogen()->value()->representation();
2476 DCHECK(!rep.IsInteger32());
2477 Register scratch = ToRegister(instr->temp());
2479 if (rep.IsDouble()) {
2480 DwVfpRegister value = ToDoubleRegister(instr->value());
2481 __ VFPCompareAndSetFlags(value, 0.0);
2482 EmitFalseBranch(instr, ne);
2483 __ VmovHigh(scratch, value);
2484 __ cmp(scratch, Operand(0x80000000));
2486 Register value = ToRegister(instr->value());
2489 Heap::kHeapNumberMapRootIndex,
2490 instr->FalseLabel(chunk()),
2492 __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2493 __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2494 __ cmp(scratch, Operand(0x80000000));
2495 __ cmp(ip, Operand(0x00000000), eq);
2497 EmitBranch(instr, eq);
2501 Condition LCodeGen::EmitIsObject(Register input,
2503 Label* is_not_object,
2505 Register temp2 = scratch0();
2506 __ JumpIfSmi(input, is_not_object);
2508 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2509 __ cmp(input, temp2);
2510 __ b(eq, is_object);
2513 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2514 // Undetectable objects behave like undefined.
2515 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2516 __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2517 __ b(ne, is_not_object);
2519 // Load instance type and check that it is in object type range.
2520 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2521 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2522 __ b(lt, is_not_object);
2523 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2528 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2529 Register reg = ToRegister(instr->value());
2530 Register temp1 = ToRegister(instr->temp());
2532 Condition true_cond =
2533 EmitIsObject(reg, temp1,
2534 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2536 EmitBranch(instr, true_cond);
2540 Condition LCodeGen::EmitIsString(Register input,
2542 Label* is_not_string,
2543 SmiCheck check_needed = INLINE_SMI_CHECK) {
2544 if (check_needed == INLINE_SMI_CHECK) {
2545 __ JumpIfSmi(input, is_not_string);
2547 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2553 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2554 Register reg = ToRegister(instr->value());
2555 Register temp1 = ToRegister(instr->temp());
2557 SmiCheck check_needed =
2558 instr->hydrogen()->value()->type().IsHeapObject()
2559 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2560 Condition true_cond =
2561 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2563 EmitBranch(instr, true_cond);
2567 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2568 Register input_reg = EmitLoadRegister(instr->value(), ip);
2569 __ SmiTst(input_reg);
2570 EmitBranch(instr, eq);
2574 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2575 Register input = ToRegister(instr->value());
2576 Register temp = ToRegister(instr->temp());
2578 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2579 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2581 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2582 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2583 __ tst(temp, Operand(1 << Map::kIsUndetectable));
2584 EmitBranch(instr, ne);
2588 static Condition ComputeCompareCondition(Token::Value op) {
2590 case Token::EQ_STRICT:
2603 return kNoCondition;
2608 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2609 DCHECK(ToRegister(instr->context()).is(cp));
2610 Token::Value op = instr->op();
2612 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2613 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2614 // This instruction also signals no smi code inlined.
2615 __ cmp(r0, Operand::Zero());
2617 Condition condition = ComputeCompareCondition(op);
2619 EmitBranch(instr, condition);
2623 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2624 InstanceType from = instr->from();
2625 InstanceType to = instr->to();
2626 if (from == FIRST_TYPE) return to;
2627 DCHECK(from == to || to == LAST_TYPE);
2632 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2633 InstanceType from = instr->from();
2634 InstanceType to = instr->to();
2635 if (from == to) return eq;
2636 if (to == LAST_TYPE) return hs;
2637 if (from == FIRST_TYPE) return ls;
2643 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2644 Register scratch = scratch0();
2645 Register input = ToRegister(instr->value());
2647 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2648 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2651 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2652 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2656 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2657 Register input = ToRegister(instr->value());
2658 Register result = ToRegister(instr->result());
2660 __ AssertString(input);
2662 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2663 __ IndexFromHash(result, result);
2667 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2668 LHasCachedArrayIndexAndBranch* instr) {
2669 Register input = ToRegister(instr->value());
2670 Register scratch = scratch0();
2673 FieldMemOperand(input, String::kHashFieldOffset));
2674 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2675 EmitBranch(instr, eq);
2679 // Branches to a label or falls through with the answer in flags. Trashes
2680 // the temp registers, but not the input.
2681 void LCodeGen::EmitClassOfTest(Label* is_true,
2683 Handle<String>class_name,
2687 DCHECK(!input.is(temp));
2688 DCHECK(!input.is(temp2));
2689 DCHECK(!temp.is(temp2));
2691 __ JumpIfSmi(input, is_false);
2693 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2694 // Assuming the following assertions, we can use the same compares to test
2695 // for both being a function type and being in the object type range.
2696 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2697 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2698 FIRST_SPEC_OBJECT_TYPE + 1);
2699 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2700 LAST_SPEC_OBJECT_TYPE - 1);
2701 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2702 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2705 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2708 // Faster code path to avoid two compares: subtract lower bound from the
2709 // actual type and do a signed compare with the width of the type range.
2710 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2711 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2712 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2713 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2714 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2718 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2719 // Check if the constructor in the map is a function.
2720 Register instance_type = ip;
2721 __ GetMapConstructor(temp, temp, temp2, instance_type);
2723 // Objects with a non-function constructor have class 'Object'.
2724 __ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
2725 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
2731 // temp now contains the constructor function. Grab the
2732 // instance class name from there.
2733 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2734 __ ldr(temp, FieldMemOperand(temp,
2735 SharedFunctionInfo::kInstanceClassNameOffset));
2736 // The class name we are testing against is internalized since it's a literal.
2737 // The name in the constructor is internalized because of the way the context
2738 // is booted. This routine isn't expected to work for random API-created
2739 // classes and it doesn't have to because you can't access it with natives
2740 // syntax. Since both sides are internalized it is sufficient to use an
2741 // identity comparison.
2742 __ cmp(temp, Operand(class_name));
2743 // End with the answer in flags.
2747 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2748 Register input = ToRegister(instr->value());
2749 Register temp = scratch0();
2750 Register temp2 = ToRegister(instr->temp());
2751 Handle<String> class_name = instr->hydrogen()->class_name();
2753 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2754 class_name, input, temp, temp2);
2756 EmitBranch(instr, eq);
2760 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2761 Register reg = ToRegister(instr->value());
2762 Register temp = ToRegister(instr->temp());
2764 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2765 __ cmp(temp, Operand(instr->map()));
2766 EmitBranch(instr, eq);
2770 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2771 DCHECK(ToRegister(instr->context()).is(cp));
2772 DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0.
2773 DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1.
2775 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2776 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2778 __ cmp(r0, Operand::Zero());
2779 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2780 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2784 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2785 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2787 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2788 LInstanceOfKnownGlobal* instr)
2789 : LDeferredCode(codegen), instr_(instr) { }
2790 void Generate() OVERRIDE {
2791 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
2794 LInstruction* instr() OVERRIDE { return instr_; }
2795 Label* map_check() { return &map_check_; }
2796 Label* load_bool() { return &load_bool_; }
2799 LInstanceOfKnownGlobal* instr_;
2804 DeferredInstanceOfKnownGlobal* deferred;
2805 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2807 Label done, false_result;
2808 Register object = ToRegister(instr->value());
2809 Register temp = ToRegister(instr->temp());
2810 Register result = ToRegister(instr->result());
2812 // A Smi is not instance of anything.
2813 __ JumpIfSmi(object, &false_result);
2815 // This is the inlined call site instanceof cache. The two occurences of the
2816 // hole value will be patched to the last map/result pair generated by the
2819 Register map = temp;
2820 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2822 // Block constant pool emission to ensure the positions of instructions are
2823 // as expected by the patcher. See InstanceofStub::Generate().
2824 Assembler::BlockConstPoolScope block_const_pool(masm());
2825 __ bind(deferred->map_check()); // Label for calculating code patching.
2826 // We use Factory::the_hole_value() on purpose instead of loading from the
2827 // root array to force relocation to be able to later patch with
2829 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2830 __ mov(ip, Operand(cell));
2831 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
2832 __ cmp(map, Operand(ip));
2833 __ b(ne, &cache_miss);
2834 __ bind(deferred->load_bool()); // Label for calculating code patching.
2835 // We use Factory::the_hole_value() on purpose instead of loading from the
2836 // root array to force relocation to be able to later patch
2837 // with true or false.
2838 __ mov(result, Operand(factory()->the_hole_value()));
2842 // The inlined call site cache did not match. Check null and string before
2843 // calling the deferred code.
2844 __ bind(&cache_miss);
2845 // Null is not instance of anything.
2846 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2847 __ cmp(object, Operand(ip));
2848 __ b(eq, &false_result);
2850 // String values is not instance of anything.
2851 Condition is_string = masm_->IsObjectStringType(object, temp);
2852 __ b(is_string, &false_result);
2854 // Go to the deferred code.
2855 __ b(deferred->entry());
2857 __ bind(&false_result);
2858 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2860 // Here result has either true or false. Deferred code also produces true or
2862 __ bind(deferred->exit());
2867 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2870 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2871 flags = static_cast<InstanceofStub::Flags>(
2872 flags | InstanceofStub::kArgsInRegisters);
2873 flags = static_cast<InstanceofStub::Flags>(
2874 flags | InstanceofStub::kCallSiteInlineCheck);
2875 flags = static_cast<InstanceofStub::Flags>(
2876 flags | InstanceofStub::kReturnTrueFalseObject);
2877 InstanceofStub stub(isolate(), flags);
2879 PushSafepointRegistersScope scope(this);
2880 LoadContextFromDeferred(instr->context());
2882 __ Move(InstanceofStub::right(), instr->function());
2884 int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
2885 int additional_delta = (call_size / Assembler::kInstrSize) + 4;
2886 // Make sure that code size is predicable, since we use specific constants
2887 // offsets in the code to find embedded values..
2888 PredictableCodeSizeScope predictable(
2889 masm_, (additional_delta + 1) * Assembler::kInstrSize);
2890 // Make sure we don't emit any additional entries in the constant pool before
2891 // the call to ensure that the CallCodeSize() calculated the correct number of
2892 // instructions for the constant pool load.
2894 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
2895 int map_check_delta =
2896 masm_->InstructionsGeneratedSince(map_check) + additional_delta;
2897 int bool_load_delta =
2898 masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
2899 Label before_push_delta;
2900 __ bind(&before_push_delta);
2901 __ BlockConstPoolFor(additional_delta);
2902 // r5 is used to communicate the offset to the location of the map check.
2903 __ mov(r5, Operand(map_check_delta * kPointerSize));
2904 // r6 is used to communicate the offset to the location of the bool load.
2905 __ mov(r6, Operand(bool_load_delta * kPointerSize));
2906 // The mov above can generate one or two instructions. The delta was
2907 // computed for two instructions, so we need to pad here in case of one
2909 while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
2913 CallCodeGeneric(stub.GetCode(),
2914 RelocInfo::CODE_TARGET,
2916 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2917 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2918 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2919 // Put the result value (r0) into the result register slot and
2920 // restore all registers.
2921 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
2925 void LCodeGen::DoCmpT(LCmpT* instr) {
2926 DCHECK(ToRegister(instr->context()).is(cp));
2927 Token::Value op = instr->op();
2929 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2930 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2931 // This instruction also signals no smi code inlined.
2932 __ cmp(r0, Operand::Zero());
2934 Condition condition = ComputeCompareCondition(op);
2935 __ LoadRoot(ToRegister(instr->result()),
2936 Heap::kTrueValueRootIndex,
2938 __ LoadRoot(ToRegister(instr->result()),
2939 Heap::kFalseValueRootIndex,
2940 NegateCondition(condition));
2944 void LCodeGen::DoReturn(LReturn* instr) {
2945 if (FLAG_trace && info()->IsOptimizing()) {
2946 // Push the return value on the stack as the parameter.
2947 // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2948 // managed by the register allocator and tearing down the frame, it's
2949 // safe to write to the context register.
2951 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2952 __ CallRuntime(Runtime::kTraceExit, 1);
2954 if (info()->saves_caller_doubles()) {
2955 RestoreCallerDoubles();
2957 int no_frame_start = -1;
2958 if (NeedsEagerFrame()) {
2959 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2961 { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
2962 if (instr->has_constant_parameter_count()) {
2963 int parameter_count = ToInteger32(instr->constant_parameter_count());
2964 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2965 if (sp_delta != 0) {
2966 __ add(sp, sp, Operand(sp_delta));
2969 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2970 Register reg = ToRegister(instr->parameter_count());
2971 // The argument count parameter is a smi
2973 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2978 if (no_frame_start != -1) {
2979 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2986 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2987 DCHECK(FLAG_vector_ics);
2988 Register vector_register = ToRegister(instr->temp_vector());
2989 Register slot_register = VectorLoadICDescriptor::SlotRegister();
2990 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
2991 DCHECK(slot_register.is(r0));
2993 AllowDeferredHandleDereference vector_structure_check;
2994 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2995 __ Move(vector_register, vector);
2996 // No need to allocate this register.
2997 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2998 int index = vector->GetIndex(slot);
2999 __ mov(slot_register, Operand(Smi::FromInt(index)));
3003 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3004 DCHECK(ToRegister(instr->context()).is(cp));
3005 DCHECK(ToRegister(instr->global_object())
3006 .is(LoadDescriptor::ReceiverRegister()));
3007 DCHECK(ToRegister(instr->result()).is(r0));
3009 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3010 if (FLAG_vector_ics) {
3011 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3013 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3014 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
3015 PREMONOMORPHIC).code();
3016 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3020 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3021 Register context = ToRegister(instr->context());
3022 Register result = ToRegister(instr->result());
3023 __ ldr(result, ContextOperand(context, instr->slot_index()));
3024 if (instr->hydrogen()->RequiresHoleCheck()) {
3025 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3027 if (instr->hydrogen()->DeoptimizesOnHole()) {
3028 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3030 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
3036 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3037 Register context = ToRegister(instr->context());
3038 Register value = ToRegister(instr->value());
3039 Register scratch = scratch0();
3040 MemOperand target = ContextOperand(context, instr->slot_index());
3042 Label skip_assignment;
3044 if (instr->hydrogen()->RequiresHoleCheck()) {
3045 __ ldr(scratch, target);
3046 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3047 __ cmp(scratch, ip);
3048 if (instr->hydrogen()->DeoptimizesOnHole()) {
3049 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3051 __ b(ne, &skip_assignment);
3055 __ str(value, target);
3056 if (instr->hydrogen()->NeedsWriteBarrier()) {
3057 SmiCheck check_needed =
3058 instr->hydrogen()->value()->type().IsHeapObject()
3059 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3060 __ RecordWriteContextSlot(context,
3064 GetLinkRegisterState(),
3066 EMIT_REMEMBERED_SET,
3070 __ bind(&skip_assignment);
3074 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3075 HObjectAccess access = instr->hydrogen()->access();
3076 int offset = access.offset();
3077 Register object = ToRegister(instr->object());
3079 if (access.IsExternalMemory()) {
3080 Register result = ToRegister(instr->result());
3081 MemOperand operand = MemOperand(object, offset);
3082 __ Load(result, operand, access.representation());
3086 if (instr->hydrogen()->representation().IsDouble()) {
3087 DwVfpRegister result = ToDoubleRegister(instr->result());
3088 __ vldr(result, FieldMemOperand(object, offset));
3092 Register result = ToRegister(instr->result());
3093 if (!access.IsInobject()) {
3094 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3097 MemOperand operand = FieldMemOperand(object, offset);
3098 __ Load(result, operand, access.representation());
3102 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3103 DCHECK(ToRegister(instr->context()).is(cp));
3104 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3105 DCHECK(ToRegister(instr->result()).is(r0));
3107 // Name is always in r2.
3108 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3109 if (FLAG_vector_ics) {
3110 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3112 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
3113 isolate(), NOT_CONTEXTUAL,
3114 instr->hydrogen()->initialization_state()).code();
3115 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3119 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3120 Register scratch = scratch0();
3121 Register function = ToRegister(instr->function());
3122 Register result = ToRegister(instr->result());
3124 // Get the prototype or initial map from the function.
3126 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3128 // Check that the function has a prototype or an initial map.
3129 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3131 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3133 // If the function does not have an initial map, we're done.
3135 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3138 // Get the prototype from the initial map.
3139 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3146 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3147 Register result = ToRegister(instr->result());
3148 __ LoadRoot(result, instr->index());
3152 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3153 Register arguments = ToRegister(instr->arguments());
3154 Register result = ToRegister(instr->result());
3155 // There are two words between the frame pointer and the last argument.
3156 // Subtracting from length accounts for one of them add one more.
3157 if (instr->length()->IsConstantOperand()) {
3158 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3159 if (instr->index()->IsConstantOperand()) {
3160 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3161 int index = (const_length - const_index) + 1;
3162 __ ldr(result, MemOperand(arguments, index * kPointerSize));
3164 Register index = ToRegister(instr->index());
3165 __ rsb(result, index, Operand(const_length + 1));
3166 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3168 } else if (instr->index()->IsConstantOperand()) {
3169 Register length = ToRegister(instr->length());
3170 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3171 int loc = const_index - 1;
3173 __ sub(result, length, Operand(loc));
3174 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3176 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3179 Register length = ToRegister(instr->length());
3180 Register index = ToRegister(instr->index());
3181 __ sub(result, length, index);
3182 __ add(result, result, Operand(1));
3183 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3188 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3189 Register external_pointer = ToRegister(instr->elements());
3190 Register key = no_reg;
3191 ElementsKind elements_kind = instr->elements_kind();
3192 bool key_is_constant = instr->key()->IsConstantOperand();
3193 int constant_key = 0;
3194 if (key_is_constant) {
3195 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3196 if (constant_key & 0xF0000000) {
3197 Abort(kArrayIndexConstantValueTooBig);
3200 key = ToRegister(instr->key());
3202 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3203 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3204 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3205 int base_offset = instr->base_offset();
3207 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3208 elements_kind == FLOAT32_ELEMENTS ||
3209 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3210 elements_kind == FLOAT64_ELEMENTS) {
3211 int base_offset = instr->base_offset();
3212 DwVfpRegister result = ToDoubleRegister(instr->result());
3213 Operand operand = key_is_constant
3214 ? Operand(constant_key << element_size_shift)
3215 : Operand(key, LSL, shift_size);
3216 __ add(scratch0(), external_pointer, operand);
3217 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3218 elements_kind == FLOAT32_ELEMENTS) {
3219 __ vldr(double_scratch0().low(), scratch0(), base_offset);
3220 __ vcvt_f64_f32(result, double_scratch0().low());
3221 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3222 __ vldr(result, scratch0(), base_offset);
3225 Register result = ToRegister(instr->result());
3226 MemOperand mem_operand = PrepareKeyedOperand(
3227 key, external_pointer, key_is_constant, constant_key,
3228 element_size_shift, shift_size, base_offset);
3229 switch (elements_kind) {
3230 case EXTERNAL_INT8_ELEMENTS:
3232 __ ldrsb(result, mem_operand);
3234 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3235 case EXTERNAL_UINT8_ELEMENTS:
3236 case UINT8_ELEMENTS:
3237 case UINT8_CLAMPED_ELEMENTS:
3238 __ ldrb(result, mem_operand);
3240 case EXTERNAL_INT16_ELEMENTS:
3241 case INT16_ELEMENTS:
3242 __ ldrsh(result, mem_operand);
3244 case EXTERNAL_UINT16_ELEMENTS:
3245 case UINT16_ELEMENTS:
3246 __ ldrh(result, mem_operand);
3248 case EXTERNAL_INT32_ELEMENTS:
3249 case INT32_ELEMENTS:
3250 __ ldr(result, mem_operand);
3252 case EXTERNAL_UINT32_ELEMENTS:
3253 case UINT32_ELEMENTS:
3254 __ ldr(result, mem_operand);
3255 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3256 __ cmp(result, Operand(0x80000000));
3257 DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue);
3260 case FLOAT32_ELEMENTS:
3261 case FLOAT64_ELEMENTS:
3262 case EXTERNAL_FLOAT32_ELEMENTS:
3263 case EXTERNAL_FLOAT64_ELEMENTS:
3264 case FAST_HOLEY_DOUBLE_ELEMENTS:
3265 case FAST_HOLEY_ELEMENTS:
3266 case FAST_HOLEY_SMI_ELEMENTS:
3267 case FAST_DOUBLE_ELEMENTS:
3269 case FAST_SMI_ELEMENTS:
3270 case DICTIONARY_ELEMENTS:
3271 case SLOPPY_ARGUMENTS_ELEMENTS:
3279 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3280 Register elements = ToRegister(instr->elements());
3281 bool key_is_constant = instr->key()->IsConstantOperand();
3282 Register key = no_reg;
3283 DwVfpRegister result = ToDoubleRegister(instr->result());
3284 Register scratch = scratch0();
3286 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3288 int base_offset = instr->base_offset();
3289 if (key_is_constant) {
3290 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3291 if (constant_key & 0xF0000000) {
3292 Abort(kArrayIndexConstantValueTooBig);
3294 base_offset += constant_key * kDoubleSize;
3296 __ add(scratch, elements, Operand(base_offset));
3298 if (!key_is_constant) {
3299 key = ToRegister(instr->key());
3300 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3301 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3302 __ add(scratch, scratch, Operand(key, LSL, shift_size));
3305 __ vldr(result, scratch, 0);
3307 if (instr->hydrogen()->RequiresHoleCheck()) {
3308 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3309 __ cmp(scratch, Operand(kHoleNanUpper32));
3310 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3315 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3316 Register elements = ToRegister(instr->elements());
3317 Register result = ToRegister(instr->result());
3318 Register scratch = scratch0();
3319 Register store_base = scratch;
3320 int offset = instr->base_offset();
3322 if (instr->key()->IsConstantOperand()) {
3323 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3324 offset += ToInteger32(const_operand) * kPointerSize;
3325 store_base = elements;
3327 Register key = ToRegister(instr->key());
3328 // Even though the HLoadKeyed instruction forces the input
3329 // representation for the key to be an integer, the input gets replaced
3330 // during bound check elimination with the index argument to the bounds
3331 // check, which can be tagged, so that case must be handled here, too.
3332 if (instr->hydrogen()->key()->representation().IsSmi()) {
3333 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
3335 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3338 __ ldr(result, MemOperand(store_base, offset));
3340 // Check for the hole value.
3341 if (instr->hydrogen()->RequiresHoleCheck()) {
3342 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3344 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
3346 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3347 __ cmp(result, scratch);
3348 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3354 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3355 if (instr->is_typed_elements()) {
3356 DoLoadKeyedExternalArray(instr);
3357 } else if (instr->hydrogen()->representation().IsDouble()) {
3358 DoLoadKeyedFixedDoubleArray(instr);
3360 DoLoadKeyedFixedArray(instr);
3365 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3367 bool key_is_constant,
3372 if (key_is_constant) {
3373 return MemOperand(base, (constant_key << element_size) + base_offset);
3376 if (base_offset == 0) {
3377 if (shift_size >= 0) {
3378 return MemOperand(base, key, LSL, shift_size);
3380 DCHECK_EQ(-1, shift_size);
3381 return MemOperand(base, key, LSR, 1);
3385 if (shift_size >= 0) {
3386 __ add(scratch0(), base, Operand(key, LSL, shift_size));
3387 return MemOperand(scratch0(), base_offset);
3389 DCHECK_EQ(-1, shift_size);
3390 __ add(scratch0(), base, Operand(key, ASR, 1));
3391 return MemOperand(scratch0(), base_offset);
3396 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3397 DCHECK(ToRegister(instr->context()).is(cp));
3398 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3399 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3401 if (FLAG_vector_ics) {
3402 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3406 CodeFactory::KeyedLoadICInOptimizedCode(
3407 isolate(), instr->hydrogen()->initialization_state()).code();
3408 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3412 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3413 Register scratch = scratch0();
3414 Register result = ToRegister(instr->result());
3416 if (instr->hydrogen()->from_inlined()) {
3417 __ sub(result, sp, Operand(2 * kPointerSize));
3419 // Check if the calling frame is an arguments adaptor frame.
3420 Label done, adapted;
3421 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3422 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3423 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3425 // Result is the frame pointer for the frame if not adapted and for the real
3426 // frame below the adaptor frame if adapted.
3427 __ mov(result, fp, LeaveCC, ne);
3428 __ mov(result, scratch, LeaveCC, eq);
3433 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3434 Register elem = ToRegister(instr->elements());
3435 Register result = ToRegister(instr->result());
3439 // If no arguments adaptor frame the number of arguments is fixed.
3441 __ mov(result, Operand(scope()->num_parameters()));
3444 // Arguments adaptor frame present. Get argument length from there.
3445 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3447 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3448 __ SmiUntag(result);
3450 // Argument length is in result register.
3455 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3456 Register receiver = ToRegister(instr->receiver());
3457 Register function = ToRegister(instr->function());
3458 Register result = ToRegister(instr->result());
3459 Register scratch = scratch0();
3461 // If the receiver is null or undefined, we have to pass the global
3462 // object as a receiver to normal functions. Values have to be
3463 // passed unchanged to builtins and strict-mode functions.
3464 Label global_object, result_in_receiver;
3466 if (!instr->hydrogen()->known_function()) {
3467 // Do not transform the receiver to object for strict mode
3470 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3472 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3473 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3474 __ tst(scratch, Operand(mask));
3475 __ b(ne, &result_in_receiver);
3477 // Do not transform the receiver to object for builtins.
3478 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3479 __ b(ne, &result_in_receiver);
3482 // Normal function. Replace undefined or null with global receiver.
3483 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3484 __ cmp(receiver, scratch);
3485 __ b(eq, &global_object);
3486 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3487 __ cmp(receiver, scratch);
3488 __ b(eq, &global_object);
3490 // Deoptimize if the receiver is not a JS object.
3491 __ SmiTst(receiver);
3492 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
3493 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3494 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3496 __ b(&result_in_receiver);
3497 __ bind(&global_object);
3498 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
3500 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3501 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3503 if (result.is(receiver)) {
3504 __ bind(&result_in_receiver);
3508 __ bind(&result_in_receiver);
3509 __ mov(result, receiver);
3510 __ bind(&result_ok);
3515 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3516 Register receiver = ToRegister(instr->receiver());
3517 Register function = ToRegister(instr->function());
3518 Register length = ToRegister(instr->length());
3519 Register elements = ToRegister(instr->elements());
3520 Register scratch = scratch0();
3521 DCHECK(receiver.is(r0)); // Used for parameter count.
3522 DCHECK(function.is(r1)); // Required by InvokeFunction.
3523 DCHECK(ToRegister(instr->result()).is(r0));
3525 // Copy the arguments to this function possibly from the
3526 // adaptor frame below it.
3527 const uint32_t kArgumentsLimit = 1 * KB;
3528 __ cmp(length, Operand(kArgumentsLimit));
3529 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
3531 // Push the receiver and use the register to keep the original
3532 // number of arguments.
3534 __ mov(receiver, length);
3535 // The arguments are at a one pointer size offset from elements.
3536 __ add(elements, elements, Operand(1 * kPointerSize));
3538 // Loop through the arguments pushing them onto the execution
3541 // length is a small non-negative integer, due to the test above.
3542 __ cmp(length, Operand::Zero());
3545 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3547 __ sub(length, length, Operand(1), SetCC);
3551 DCHECK(instr->HasPointerMap());
3552 LPointerMap* pointers = instr->pointer_map();
3553 SafepointGenerator safepoint_generator(
3554 this, pointers, Safepoint::kLazyDeopt);
3555 // The number of arguments is stored in receiver which is r0, as expected
3556 // by InvokeFunction.
3557 ParameterCount actual(receiver);
3558 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3562 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3563 LOperand* argument = instr->value();
3564 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3565 Abort(kDoPushArgumentNotImplementedForDoubleType);
3567 Register argument_reg = EmitLoadRegister(argument, ip);
3568 __ push(argument_reg);
3573 void LCodeGen::DoDrop(LDrop* instr) {
3574 __ Drop(instr->count());
3578 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3579 Register result = ToRegister(instr->result());
3580 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3584 void LCodeGen::DoContext(LContext* instr) {
3585 // If there is a non-return use, the context must be moved to a register.
3586 Register result = ToRegister(instr->result());
3587 if (info()->IsOptimizing()) {
3588 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3590 // If there is no frame, the context must be in cp.
3591 DCHECK(result.is(cp));
3596 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3597 DCHECK(ToRegister(instr->context()).is(cp));
3598 __ push(cp); // The context is the first argument.
3599 __ Move(scratch0(), instr->hydrogen()->pairs());
3600 __ push(scratch0());
3601 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3602 __ push(scratch0());
3603 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3607 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3608 int formal_parameter_count, int arity,
3609 LInstruction* instr) {
3610 bool dont_adapt_arguments =
3611 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3612 bool can_invoke_directly =
3613 dont_adapt_arguments || formal_parameter_count == arity;
3615 Register function_reg = r1;
3617 LPointerMap* pointers = instr->pointer_map();
3619 if (can_invoke_directly) {
3621 __ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3623 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3624 // is available to write to at this point.
3625 if (dont_adapt_arguments) {
3626 __ mov(r0, Operand(arity));
3630 __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3633 // Set up deoptimization.
3634 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3636 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3637 ParameterCount count(arity);
3638 ParameterCount expected(formal_parameter_count);
3639 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3644 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3645 DCHECK(instr->context() != NULL);
3646 DCHECK(ToRegister(instr->context()).is(cp));
3647 Register input = ToRegister(instr->value());
3648 Register result = ToRegister(instr->result());
3649 Register scratch = scratch0();
3651 // Deoptimize if not a heap number.
3652 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3653 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3654 __ cmp(scratch, Operand(ip));
3655 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3658 Register exponent = scratch0();
3660 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3661 // Check the sign of the argument. If the argument is positive, just
3663 __ tst(exponent, Operand(HeapNumber::kSignMask));
3664 // Move the input to the result if necessary.
3665 __ Move(result, input);
3668 // Input is negative. Reverse its sign.
3669 // Preserve the value of all registers.
3671 PushSafepointRegistersScope scope(this);
3673 // Registers were saved at the safepoint, so we can use
3674 // many scratch registers.
3675 Register tmp1 = input.is(r1) ? r0 : r1;
3676 Register tmp2 = input.is(r2) ? r0 : r2;
3677 Register tmp3 = input.is(r3) ? r0 : r3;
3678 Register tmp4 = input.is(r4) ? r0 : r4;
3680 // exponent: floating point exponent value.
3682 Label allocated, slow;
3683 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3684 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3687 // Slow case: Call the runtime system to do the number allocation.
3690 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3692 // Set the pointer to the new heap number in tmp.
3693 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3694 // Restore input_reg after call to runtime.
3695 __ LoadFromSafepointRegisterSlot(input, input);
3696 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3698 __ bind(&allocated);
3699 // exponent: floating point exponent value.
3700 // tmp1: allocated heap number.
3701 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3702 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3703 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3704 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3706 __ StoreToSafepointRegisterSlot(tmp1, result);
3713 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3714 Register input = ToRegister(instr->value());
3715 Register result = ToRegister(instr->result());
3716 __ cmp(input, Operand::Zero());
3717 __ Move(result, input, pl);
3718 // We can make rsb conditional because the previous cmp instruction
3719 // will clear the V (overflow) flag and rsb won't set this flag
3720 // if input is positive.
3721 __ rsb(result, input, Operand::Zero(), SetCC, mi);
3722 // Deoptimize on overflow.
3723 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
3727 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3728 // Class for deferred case.
3729 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3731 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3732 : LDeferredCode(codegen), instr_(instr) { }
3733 void Generate() OVERRIDE {
3734 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3736 LInstruction* instr() OVERRIDE { return instr_; }
3742 Representation r = instr->hydrogen()->value()->representation();
3744 DwVfpRegister input = ToDoubleRegister(instr->value());
3745 DwVfpRegister result = ToDoubleRegister(instr->result());
3746 __ vabs(result, input);
3747 } else if (r.IsSmiOrInteger32()) {
3748 EmitIntegerMathAbs(instr);
3750 // Representation is tagged.
3751 DeferredMathAbsTaggedHeapNumber* deferred =
3752 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3753 Register input = ToRegister(instr->value());
3755 __ JumpIfNotSmi(input, deferred->entry());
3756 // If smi, handle it directly.
3757 EmitIntegerMathAbs(instr);
3758 __ bind(deferred->exit());
3763 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3764 DwVfpRegister input = ToDoubleRegister(instr->value());
3765 Register result = ToRegister(instr->result());
3766 Register input_high = scratch0();
3769 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3770 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3773 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3775 __ cmp(result, Operand::Zero());
3777 __ cmp(input_high, Operand::Zero());
3778 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
3784 void LCodeGen::DoMathRound(LMathRound* instr) {
3785 DwVfpRegister input = ToDoubleRegister(instr->value());
3786 Register result = ToRegister(instr->result());
3787 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3788 DwVfpRegister input_plus_dot_five = double_scratch1;
3789 Register input_high = scratch0();
3790 DwVfpRegister dot_five = double_scratch0();
3791 Label convert, done;
3793 __ Vmov(dot_five, 0.5, scratch0());
3794 __ vabs(double_scratch1, input);
3795 __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3796 // If input is in [-0.5, -0], the result is -0.
3797 // If input is in [+0, +0.5[, the result is +0.
3798 // If the input is +0.5, the result is 1.
3799 __ b(hi, &convert); // Out of [-0.5, +0.5].
3800 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3801 __ VmovHigh(input_high, input);
3802 __ cmp(input_high, Operand::Zero());
3804 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
3806 __ VFPCompareAndSetFlags(input, dot_five);
3807 __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
3808 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3809 // flag kBailoutOnMinusZero.
3810 __ mov(result, Operand::Zero(), LeaveCC, ne);
3814 __ vadd(input_plus_dot_five, input, dot_five);
3815 // Reuse dot_five (double_scratch0) as we no longer need this value.
3816 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3818 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3823 void LCodeGen::DoMathFround(LMathFround* instr) {
3824 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
3825 DwVfpRegister output_reg = ToDoubleRegister(instr->result());
3826 LowDwVfpRegister scratch = double_scratch0();
3827 __ vcvt_f32_f64(scratch.low(), input_reg);
3828 __ vcvt_f64_f32(output_reg, scratch.low());
3832 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3833 DwVfpRegister input = ToDoubleRegister(instr->value());
3834 DwVfpRegister result = ToDoubleRegister(instr->result());
3835 __ vsqrt(result, input);
3839 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3840 DwVfpRegister input = ToDoubleRegister(instr->value());
3841 DwVfpRegister result = ToDoubleRegister(instr->result());
3842 DwVfpRegister temp = double_scratch0();
3844 // Note that according to ECMA-262 15.8.2.13:
3845 // Math.pow(-Infinity, 0.5) == Infinity
3846 // Math.sqrt(-Infinity) == NaN
3848 __ vmov(temp, -V8_INFINITY, scratch0());
3849 __ VFPCompareAndSetFlags(input, temp);
3850 __ vneg(result, temp, eq);
3853 // Add +0 to convert -0 to +0.
3854 __ vadd(result, input, kDoubleRegZero);
3855 __ vsqrt(result, result);
3860 void LCodeGen::DoPower(LPower* instr) {
3861 Representation exponent_type = instr->hydrogen()->right()->representation();
3862 // Having marked this as a call, we can use any registers.
3863 // Just make sure that the input/output registers are the expected ones.
3864 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3865 DCHECK(!instr->right()->IsDoubleRegister() ||
3866 ToDoubleRegister(instr->right()).is(d1));
3867 DCHECK(!instr->right()->IsRegister() ||
3868 ToRegister(instr->right()).is(tagged_exponent));
3869 DCHECK(ToDoubleRegister(instr->left()).is(d0));
3870 DCHECK(ToDoubleRegister(instr->result()).is(d2));
3872 if (exponent_type.IsSmi()) {
3873 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3875 } else if (exponent_type.IsTagged()) {
3877 __ JumpIfSmi(tagged_exponent, &no_deopt);
3878 DCHECK(!r6.is(tagged_exponent));
3879 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3880 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3881 __ cmp(r6, Operand(ip));
3882 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3884 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3886 } else if (exponent_type.IsInteger32()) {
3887 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3890 DCHECK(exponent_type.IsDouble());
3891 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3897 void LCodeGen::DoMathExp(LMathExp* instr) {
3898 DwVfpRegister input = ToDoubleRegister(instr->value());
3899 DwVfpRegister result = ToDoubleRegister(instr->result());
3900 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3901 DwVfpRegister double_scratch2 = double_scratch0();
3902 Register temp1 = ToRegister(instr->temp1());
3903 Register temp2 = ToRegister(instr->temp2());
3905 MathExpGenerator::EmitMathExp(
3906 masm(), input, result, double_scratch1, double_scratch2,
3907 temp1, temp2, scratch0());
3911 void LCodeGen::DoMathLog(LMathLog* instr) {
3912 __ PrepareCallCFunction(0, 1, scratch0());
3913 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3914 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3916 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3920 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3921 Register input = ToRegister(instr->value());
3922 Register result = ToRegister(instr->result());
3923 __ clz(result, input);
3927 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3928 DCHECK(ToRegister(instr->context()).is(cp));
3929 DCHECK(ToRegister(instr->function()).is(r1));
3930 DCHECK(instr->HasPointerMap());
3932 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3933 if (known_function.is_null()) {
3934 LPointerMap* pointers = instr->pointer_map();
3935 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3936 ParameterCount count(instr->arity());
3937 __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
3939 CallKnownFunction(known_function,
3940 instr->hydrogen()->formal_parameter_count(),
3941 instr->arity(), instr);
3946 void LCodeGen::DoTailCallThroughMegamorphicCache(
3947 LTailCallThroughMegamorphicCache* instr) {
3948 Register receiver = ToRegister(instr->receiver());
3949 Register name = ToRegister(instr->name());
3950 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
3951 DCHECK(name.is(LoadDescriptor::NameRegister()));
3952 DCHECK(receiver.is(r1));
3953 DCHECK(name.is(r2));
3954 Register scratch = r4;
3955 Register extra = r5;
3956 Register extra2 = r6;
3957 Register extra3 = r9;
3960 Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
3961 Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
3962 DCHECK(!FLAG_vector_ics ||
3963 !AreAliased(slot, vector, scratch, extra, extra2, extra3));
3966 // Important for the tail-call.
3967 bool must_teardown_frame = NeedsEagerFrame();
3969 if (!instr->hydrogen()->is_just_miss()) {
3970 DCHECK(!instr->hydrogen()->is_keyed_load());
3972 // The probe will tail call to a handler if found.
3973 isolate()->stub_cache()->GenerateProbe(
3974 masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
3975 receiver, name, scratch, extra, extra2, extra3);
3978 // Tail call to miss if we ended up here.
3979 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
3980 if (instr->hydrogen()->is_keyed_load()) {
3981 KeyedLoadIC::GenerateMiss(masm());
3983 LoadIC::GenerateMiss(masm());
3988 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3989 DCHECK(ToRegister(instr->result()).is(r0));
3991 if (instr->hydrogen()->IsTailCall()) {
3992 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3994 if (instr->target()->IsConstantOperand()) {
3995 LConstantOperand* target = LConstantOperand::cast(instr->target());
3996 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3997 __ Jump(code, RelocInfo::CODE_TARGET);
3999 DCHECK(instr->target()->IsRegister());
4000 Register target = ToRegister(instr->target());
4001 // Make sure we don't emit any additional entries in the constant pool
4002 // before the call to ensure that the CallCodeSize() calculated the
4004 // number of instructions for the constant pool load.
4006 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
4007 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4012 LPointerMap* pointers = instr->pointer_map();
4013 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4015 if (instr->target()->IsConstantOperand()) {
4016 LConstantOperand* target = LConstantOperand::cast(instr->target());
4017 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4018 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4019 PlatformInterfaceDescriptor* call_descriptor =
4020 instr->descriptor().platform_specific_descriptor();
4021 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
4022 call_descriptor->storage_mode());
4024 DCHECK(instr->target()->IsRegister());
4025 Register target = ToRegister(instr->target());
4026 generator.BeforeCall(__ CallSize(target));
4027 // Make sure we don't emit any additional entries in the constant pool
4028 // before the call to ensure that the CallCodeSize() calculated the
4030 // number of instructions for the constant pool load.
4032 ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
4033 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4037 generator.AfterCall();
4042 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4043 DCHECK(ToRegister(instr->function()).is(r1));
4044 DCHECK(ToRegister(instr->result()).is(r0));
4046 if (instr->hydrogen()->pass_argument_count()) {
4047 __ mov(r0, Operand(instr->arity()));
4051 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
4053 // Load the code entry address
4054 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
4057 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4061 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4062 DCHECK(ToRegister(instr->context()).is(cp));
4063 DCHECK(ToRegister(instr->function()).is(r1));
4064 DCHECK(ToRegister(instr->result()).is(r0));
4066 int arity = instr->arity();
4067 CallFunctionFlags flags = instr->hydrogen()->function_flags();
4068 if (instr->hydrogen()->HasVectorAndSlot()) {
4069 Register slot_register = ToRegister(instr->temp_slot());
4070 Register vector_register = ToRegister(instr->temp_vector());
4071 DCHECK(slot_register.is(r3));
4072 DCHECK(vector_register.is(r2));
4074 AllowDeferredHandleDereference vector_structure_check;
4075 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
4076 int index = vector->GetIndex(instr->hydrogen()->slot());
4078 __ Move(vector_register, vector);
4079 __ mov(slot_register, Operand(Smi::FromInt(index)));
4081 CallICState::CallType call_type =
4082 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
4085 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
4086 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4088 CallFunctionStub stub(isolate(), arity, flags);
4089 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4094 void LCodeGen::DoCallNew(LCallNew* instr) {
4095 DCHECK(ToRegister(instr->context()).is(cp));
4096 DCHECK(ToRegister(instr->constructor()).is(r1));
4097 DCHECK(ToRegister(instr->result()).is(r0));
4099 __ mov(r0, Operand(instr->arity()));
4100 // No cell in r2 for construct type feedback in optimized code
4101 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4102 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4103 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4107 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4108 DCHECK(ToRegister(instr->context()).is(cp));
4109 DCHECK(ToRegister(instr->constructor()).is(r1));
4110 DCHECK(ToRegister(instr->result()).is(r0));
4112 __ mov(r0, Operand(instr->arity()));
4113 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4114 ElementsKind kind = instr->hydrogen()->elements_kind();
4115 AllocationSiteOverrideMode override_mode =
4116 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4117 ? DISABLE_ALLOCATION_SITES
4120 if (instr->arity() == 0) {
4121 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4122 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4123 } else if (instr->arity() == 1) {
4125 if (IsFastPackedElementsKind(kind)) {
4127 // We might need a change here
4128 // look at the first argument
4129 __ ldr(r5, MemOperand(sp, 0));
4130 __ cmp(r5, Operand::Zero());
4131 __ b(eq, &packed_case);
4133 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4134 ArraySingleArgumentConstructorStub stub(isolate(),
4137 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4139 __ bind(&packed_case);
4142 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4143 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4146 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4147 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4152 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4153 CallRuntime(instr->function(), instr->arity(), instr);
4157 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4158 Register function = ToRegister(instr->function());
4159 Register code_object = ToRegister(instr->code_object());
4160 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
4162 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4166 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4167 Register result = ToRegister(instr->result());
4168 Register base = ToRegister(instr->base_object());
4169 if (instr->offset()->IsConstantOperand()) {
4170 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4171 __ add(result, base, Operand(ToInteger32(offset)));
4173 Register offset = ToRegister(instr->offset());
4174 __ add(result, base, offset);
4179 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4180 Representation representation = instr->representation();
4182 Register object = ToRegister(instr->object());
4183 Register scratch = scratch0();
4184 HObjectAccess access = instr->hydrogen()->access();
4185 int offset = access.offset();
4187 if (access.IsExternalMemory()) {
4188 Register value = ToRegister(instr->value());
4189 MemOperand operand = MemOperand(object, offset);
4190 __ Store(value, operand, representation);
4194 __ AssertNotSmi(object);
4196 DCHECK(!representation.IsSmi() ||
4197 !instr->value()->IsConstantOperand() ||
4198 IsSmi(LConstantOperand::cast(instr->value())));
4199 if (representation.IsDouble()) {
4200 DCHECK(access.IsInobject());
4201 DCHECK(!instr->hydrogen()->has_transition());
4202 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4203 DwVfpRegister value = ToDoubleRegister(instr->value());
4204 __ vstr(value, FieldMemOperand(object, offset));
4208 if (instr->hydrogen()->has_transition()) {
4209 Handle<Map> transition = instr->hydrogen()->transition_map();
4210 AddDeprecationDependency(transition);
4211 __ mov(scratch, Operand(transition));
4212 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4213 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4214 Register temp = ToRegister(instr->temp());
4215 // Update the write barrier for the map field.
4216 __ RecordWriteForMap(object,
4219 GetLinkRegisterState(),
4225 Register value = ToRegister(instr->value());
4226 if (access.IsInobject()) {
4227 MemOperand operand = FieldMemOperand(object, offset);
4228 __ Store(value, operand, representation);
4229 if (instr->hydrogen()->NeedsWriteBarrier()) {
4230 // Update the write barrier for the object for in-object properties.
4231 __ RecordWriteField(object,
4235 GetLinkRegisterState(),
4237 EMIT_REMEMBERED_SET,
4238 instr->hydrogen()->SmiCheckForWriteBarrier(),
4239 instr->hydrogen()->PointersToHereCheckForValue());
4242 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4243 MemOperand operand = FieldMemOperand(scratch, offset);
4244 __ Store(value, operand, representation);
4245 if (instr->hydrogen()->NeedsWriteBarrier()) {
4246 // Update the write barrier for the properties array.
4247 // object is used as a scratch register.
4248 __ RecordWriteField(scratch,
4252 GetLinkRegisterState(),
4254 EMIT_REMEMBERED_SET,
4255 instr->hydrogen()->SmiCheckForWriteBarrier(),
4256 instr->hydrogen()->PointersToHereCheckForValue());
4262 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4263 DCHECK(ToRegister(instr->context()).is(cp));
4264 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4265 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4267 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4269 StoreIC::initialize_stub(isolate(), instr->language_mode(),
4270 instr->hydrogen()->initialization_state());
4271 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4275 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4276 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4277 if (instr->index()->IsConstantOperand()) {
4278 Operand index = ToOperand(instr->index());
4279 Register length = ToRegister(instr->length());
4280 __ cmp(length, index);
4281 cc = CommuteCondition(cc);
4283 Register index = ToRegister(instr->index());
4284 Operand length = ToOperand(instr->length());
4285 __ cmp(index, length);
4287 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4289 __ b(NegateCondition(cc), &done);
4290 __ stop("eliminated bounds check failed");
4293 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4298 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4299 Register external_pointer = ToRegister(instr->elements());
4300 Register key = no_reg;
4301 ElementsKind elements_kind = instr->elements_kind();
4302 bool key_is_constant = instr->key()->IsConstantOperand();
4303 int constant_key = 0;
4304 if (key_is_constant) {
4305 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4306 if (constant_key & 0xF0000000) {
4307 Abort(kArrayIndexConstantValueTooBig);
4310 key = ToRegister(instr->key());
4312 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4313 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4314 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4315 int base_offset = instr->base_offset();
4317 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4318 elements_kind == FLOAT32_ELEMENTS ||
4319 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4320 elements_kind == FLOAT64_ELEMENTS) {
4321 Register address = scratch0();
4322 DwVfpRegister value(ToDoubleRegister(instr->value()));
4323 if (key_is_constant) {
4324 if (constant_key != 0) {
4325 __ add(address, external_pointer,
4326 Operand(constant_key << element_size_shift));
4328 address = external_pointer;
4331 __ add(address, external_pointer, Operand(key, LSL, shift_size));
4333 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4334 elements_kind == FLOAT32_ELEMENTS) {
4335 __ vcvt_f32_f64(double_scratch0().low(), value);
4336 __ vstr(double_scratch0().low(), address, base_offset);
4337 } else { // Storing doubles, not floats.
4338 __ vstr(value, address, base_offset);
4341 Register value(ToRegister(instr->value()));
4342 MemOperand mem_operand = PrepareKeyedOperand(
4343 key, external_pointer, key_is_constant, constant_key,
4344 element_size_shift, shift_size,
4346 switch (elements_kind) {
4347 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4348 case EXTERNAL_INT8_ELEMENTS:
4349 case EXTERNAL_UINT8_ELEMENTS:
4350 case UINT8_ELEMENTS:
4351 case UINT8_CLAMPED_ELEMENTS:
4353 __ strb(value, mem_operand);
4355 case EXTERNAL_INT16_ELEMENTS:
4356 case EXTERNAL_UINT16_ELEMENTS:
4357 case INT16_ELEMENTS:
4358 case UINT16_ELEMENTS:
4359 __ strh(value, mem_operand);
4361 case EXTERNAL_INT32_ELEMENTS:
4362 case EXTERNAL_UINT32_ELEMENTS:
4363 case INT32_ELEMENTS:
4364 case UINT32_ELEMENTS:
4365 __ str(value, mem_operand);
4367 case FLOAT32_ELEMENTS:
4368 case FLOAT64_ELEMENTS:
4369 case EXTERNAL_FLOAT32_ELEMENTS:
4370 case EXTERNAL_FLOAT64_ELEMENTS:
4371 case FAST_DOUBLE_ELEMENTS:
4373 case FAST_SMI_ELEMENTS:
4374 case FAST_HOLEY_DOUBLE_ELEMENTS:
4375 case FAST_HOLEY_ELEMENTS:
4376 case FAST_HOLEY_SMI_ELEMENTS:
4377 case DICTIONARY_ELEMENTS:
4378 case SLOPPY_ARGUMENTS_ELEMENTS:
4386 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4387 DwVfpRegister value = ToDoubleRegister(instr->value());
4388 Register elements = ToRegister(instr->elements());
4389 Register scratch = scratch0();
4390 DwVfpRegister double_scratch = double_scratch0();
4391 bool key_is_constant = instr->key()->IsConstantOperand();
4392 int base_offset = instr->base_offset();
4394 // Calculate the effective address of the slot in the array to store the
4396 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4397 if (key_is_constant) {
4398 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4399 if (constant_key & 0xF0000000) {
4400 Abort(kArrayIndexConstantValueTooBig);
4402 __ add(scratch, elements,
4403 Operand((constant_key << element_size_shift) + base_offset));
4405 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4406 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4407 __ add(scratch, elements, Operand(base_offset));
4408 __ add(scratch, scratch,
4409 Operand(ToRegister(instr->key()), LSL, shift_size));
4412 if (instr->NeedsCanonicalization()) {
4413 // Force a canonical NaN.
4414 if (masm()->emit_debug_code()) {
4416 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4417 __ Assert(ne, kDefaultNaNModeNotSet);
4419 __ VFPCanonicalizeNaN(double_scratch, value);
4420 __ vstr(double_scratch, scratch, 0);
4422 __ vstr(value, scratch, 0);
4427 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4428 Register value = ToRegister(instr->value());
4429 Register elements = ToRegister(instr->elements());
4430 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4432 Register scratch = scratch0();
4433 Register store_base = scratch;
4434 int offset = instr->base_offset();
4437 if (instr->key()->IsConstantOperand()) {
4438 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4439 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4440 offset += ToInteger32(const_operand) * kPointerSize;
4441 store_base = elements;
4443 // Even though the HLoadKeyed instruction forces the input
4444 // representation for the key to be an integer, the input gets replaced
4445 // during bound check elimination with the index argument to the bounds
4446 // check, which can be tagged, so that case must be handled here, too.
4447 if (instr->hydrogen()->key()->representation().IsSmi()) {
4448 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4450 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4453 __ str(value, MemOperand(store_base, offset));
4455 if (instr->hydrogen()->NeedsWriteBarrier()) {
4456 SmiCheck check_needed =
4457 instr->hydrogen()->value()->type().IsHeapObject()
4458 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4459 // Compute address of modified element and store it into key register.
4460 __ add(key, store_base, Operand(offset));
4461 __ RecordWrite(elements,
4464 GetLinkRegisterState(),
4466 EMIT_REMEMBERED_SET,
4468 instr->hydrogen()->PointersToHereCheckForValue());
4473 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4474 // By cases: external, fast double
4475 if (instr->is_typed_elements()) {
4476 DoStoreKeyedExternalArray(instr);
4477 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4478 DoStoreKeyedFixedDoubleArray(instr);
4480 DoStoreKeyedFixedArray(instr);
4485 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4486 DCHECK(ToRegister(instr->context()).is(cp));
4487 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4488 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4489 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4491 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4492 isolate(), instr->language_mode(),
4493 instr->hydrogen()->initialization_state()).code();
4494 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4498 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4499 Register object_reg = ToRegister(instr->object());
4500 Register scratch = scratch0();
4502 Handle<Map> from_map = instr->original_map();
4503 Handle<Map> to_map = instr->transitioned_map();
4504 ElementsKind from_kind = instr->from_kind();
4505 ElementsKind to_kind = instr->to_kind();
4507 Label not_applicable;
4508 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4509 __ cmp(scratch, Operand(from_map));
4510 __ b(ne, ¬_applicable);
4512 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4513 Register new_map_reg = ToRegister(instr->new_map_temp());
4514 __ mov(new_map_reg, Operand(to_map));
4515 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4517 __ RecordWriteForMap(object_reg,
4520 GetLinkRegisterState(),
4523 DCHECK(ToRegister(instr->context()).is(cp));
4524 DCHECK(object_reg.is(r0));
4525 PushSafepointRegistersScope scope(this);
4526 __ Move(r1, to_map);
4527 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4528 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4530 RecordSafepointWithRegisters(
4531 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4533 __ bind(¬_applicable);
4537 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4538 Register object = ToRegister(instr->object());
4539 Register temp = ToRegister(instr->temp());
4540 Label no_memento_found;
4541 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4542 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
4543 __ bind(&no_memento_found);
4547 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4548 DCHECK(ToRegister(instr->context()).is(cp));
4549 DCHECK(ToRegister(instr->left()).is(r1));
4550 DCHECK(ToRegister(instr->right()).is(r0));
4551 StringAddStub stub(isolate(),
4552 instr->hydrogen()->flags(),
4553 instr->hydrogen()->pretenure_flag());
4554 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4558 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4559 class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4561 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4562 : LDeferredCode(codegen), instr_(instr) { }
4563 void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
4564 LInstruction* instr() OVERRIDE { return instr_; }
4567 LStringCharCodeAt* instr_;
4570 DeferredStringCharCodeAt* deferred =
4571 new(zone()) DeferredStringCharCodeAt(this, instr);
4573 StringCharLoadGenerator::Generate(masm(),
4574 ToRegister(instr->string()),
4575 ToRegister(instr->index()),
4576 ToRegister(instr->result()),
4578 __ bind(deferred->exit());
4582 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4583 Register string = ToRegister(instr->string());
4584 Register result = ToRegister(instr->result());
4585 Register scratch = scratch0();
4587 // TODO(3095996): Get rid of this. For now, we need to make the
4588 // result register contain a valid pointer because it is already
4589 // contained in the register pointer map.
4590 __ mov(result, Operand::Zero());
4592 PushSafepointRegistersScope scope(this);
4594 // Push the index as a smi. This is safe because of the checks in
4595 // DoStringCharCodeAt above.
4596 if (instr->index()->IsConstantOperand()) {
4597 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4598 __ mov(scratch, Operand(Smi::FromInt(const_index)));
4601 Register index = ToRegister(instr->index());
4605 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4609 __ StoreToSafepointRegisterSlot(r0, result);
4613 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4614 class DeferredStringCharFromCode FINAL : public LDeferredCode {
4616 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4617 : LDeferredCode(codegen), instr_(instr) { }
4618 void Generate() OVERRIDE {
4619 codegen()->DoDeferredStringCharFromCode(instr_);
4621 LInstruction* instr() OVERRIDE { return instr_; }
4624 LStringCharFromCode* instr_;
4627 DeferredStringCharFromCode* deferred =
4628 new(zone()) DeferredStringCharFromCode(this, instr);
4630 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4631 Register char_code = ToRegister(instr->char_code());
4632 Register result = ToRegister(instr->result());
4633 DCHECK(!char_code.is(result));
4635 __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4636 __ b(hi, deferred->entry());
4637 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4638 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4639 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4640 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4642 __ b(eq, deferred->entry());
4643 __ bind(deferred->exit());
4647 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4648 Register char_code = ToRegister(instr->char_code());
4649 Register result = ToRegister(instr->result());
4651 // TODO(3095996): Get rid of this. For now, we need to make the
4652 // result register contain a valid pointer because it is already
4653 // contained in the register pointer map.
4654 __ mov(result, Operand::Zero());
4656 PushSafepointRegistersScope scope(this);
4657 __ SmiTag(char_code);
4659 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4660 __ StoreToSafepointRegisterSlot(r0, result);
4664 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4665 LOperand* input = instr->value();
4666 DCHECK(input->IsRegister() || input->IsStackSlot());
4667 LOperand* output = instr->result();
4668 DCHECK(output->IsDoubleRegister());
4669 SwVfpRegister single_scratch = double_scratch0().low();
4670 if (input->IsStackSlot()) {
4671 Register scratch = scratch0();
4672 __ ldr(scratch, ToMemOperand(input));
4673 __ vmov(single_scratch, scratch);
4675 __ vmov(single_scratch, ToRegister(input));
4677 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4681 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4682 LOperand* input = instr->value();
4683 LOperand* output = instr->result();
4685 SwVfpRegister flt_scratch = double_scratch0().low();
4686 __ vmov(flt_scratch, ToRegister(input));
4687 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4691 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4692 class DeferredNumberTagI FINAL : public LDeferredCode {
4694 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4695 : LDeferredCode(codegen), instr_(instr) { }
4696 void Generate() OVERRIDE {
4697 codegen()->DoDeferredNumberTagIU(instr_,
4703 LInstruction* instr() OVERRIDE { return instr_; }
4706 LNumberTagI* instr_;
4709 Register src = ToRegister(instr->value());
4710 Register dst = ToRegister(instr->result());
4712 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4713 __ SmiTag(dst, src, SetCC);
4714 __ b(vs, deferred->entry());
4715 __ bind(deferred->exit());
4719 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4720 class DeferredNumberTagU FINAL : public LDeferredCode {
4722 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4723 : LDeferredCode(codegen), instr_(instr) { }
4724 void Generate() OVERRIDE {
4725 codegen()->DoDeferredNumberTagIU(instr_,
4731 LInstruction* instr() OVERRIDE { return instr_; }
4734 LNumberTagU* instr_;
4737 Register input = ToRegister(instr->value());
4738 Register result = ToRegister(instr->result());
4740 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4741 __ cmp(input, Operand(Smi::kMaxValue));
4742 __ b(hi, deferred->entry());
4743 __ SmiTag(result, input);
4744 __ bind(deferred->exit());
4748 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4752 IntegerSignedness signedness) {
4754 Register src = ToRegister(value);
4755 Register dst = ToRegister(instr->result());
4756 Register tmp1 = scratch0();
4757 Register tmp2 = ToRegister(temp1);
4758 Register tmp3 = ToRegister(temp2);
4759 LowDwVfpRegister dbl_scratch = double_scratch0();
4761 if (signedness == SIGNED_INT32) {
4762 // There was overflow, so bits 30 and 31 of the original integer
4763 // disagree. Try to allocate a heap number in new space and store
4764 // the value in there. If that fails, call the runtime system.
4766 __ SmiUntag(src, dst);
4767 __ eor(src, src, Operand(0x80000000));
4769 __ vmov(dbl_scratch.low(), src);
4770 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4772 __ vmov(dbl_scratch.low(), src);
4773 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4776 if (FLAG_inline_new) {
4777 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4778 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4782 // Slow case: Call the runtime system to do the number allocation.
4785 // TODO(3095996): Put a valid pointer value in the stack slot where the
4786 // result register is stored, as this register is in the pointer map, but
4787 // contains an integer value.
4788 __ mov(dst, Operand::Zero());
4790 // Preserve the value of all registers.
4791 PushSafepointRegistersScope scope(this);
4793 // NumberTagI and NumberTagD use the context from the frame, rather than
4794 // the environment's HContext or HInlinedContext value.
4795 // They only call Runtime::kAllocateHeapNumber.
4796 // The corresponding HChange instructions are added in a phase that does
4797 // not have easy access to the local context.
4798 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4799 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4800 RecordSafepointWithRegisters(
4801 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4802 __ sub(r0, r0, Operand(kHeapObjectTag));
4803 __ StoreToSafepointRegisterSlot(r0, dst);
4806 // Done. Put the value in dbl_scratch into the value of the allocated heap
4809 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4810 __ add(dst, dst, Operand(kHeapObjectTag));
4814 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4815 class DeferredNumberTagD FINAL : public LDeferredCode {
4817 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4818 : LDeferredCode(codegen), instr_(instr) { }
4819 void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
4820 LInstruction* instr() OVERRIDE { return instr_; }
4823 LNumberTagD* instr_;
4826 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4827 Register scratch = scratch0();
4828 Register reg = ToRegister(instr->result());
4829 Register temp1 = ToRegister(instr->temp());
4830 Register temp2 = ToRegister(instr->temp2());
4832 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4833 if (FLAG_inline_new) {
4834 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4835 // We want the untagged address first for performance
4836 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4839 __ jmp(deferred->entry());
4841 __ bind(deferred->exit());
4842 __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4843 // Now that we have finished with the object's real address tag it
4844 __ add(reg, reg, Operand(kHeapObjectTag));
4848 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4849 // TODO(3095996): Get rid of this. For now, we need to make the
4850 // result register contain a valid pointer because it is already
4851 // contained in the register pointer map.
4852 Register reg = ToRegister(instr->result());
4853 __ mov(reg, Operand::Zero());
4855 PushSafepointRegistersScope scope(this);
4856 // NumberTagI and NumberTagD use the context from the frame, rather than
4857 // the environment's HContext or HInlinedContext value.
4858 // They only call Runtime::kAllocateHeapNumber.
4859 // The corresponding HChange instructions are added in a phase that does
4860 // not have easy access to the local context.
4861 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4862 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4863 RecordSafepointWithRegisters(
4864 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4865 __ sub(r0, r0, Operand(kHeapObjectTag));
4866 __ StoreToSafepointRegisterSlot(r0, reg);
4870 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4871 HChange* hchange = instr->hydrogen();
4872 Register input = ToRegister(instr->value());
4873 Register output = ToRegister(instr->result());
4874 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4875 hchange->value()->CheckFlag(HValue::kUint32)) {
4876 __ tst(input, Operand(0xc0000000));
4877 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
4879 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4880 !hchange->value()->CheckFlag(HValue::kUint32)) {
4881 __ SmiTag(output, input, SetCC);
4882 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
4884 __ SmiTag(output, input);
4889 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4890 Register input = ToRegister(instr->value());
4891 Register result = ToRegister(instr->result());
4892 if (instr->needs_check()) {
4893 STATIC_ASSERT(kHeapObjectTag == 1);
4894 // If the input is a HeapObject, SmiUntag will set the carry flag.
4895 __ SmiUntag(result, input, SetCC);
4896 DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi);
4898 __ SmiUntag(result, input);
4903 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4904 DwVfpRegister result_reg,
4905 NumberUntagDMode mode) {
4906 bool can_convert_undefined_to_nan =
4907 instr->hydrogen()->can_convert_undefined_to_nan();
4908 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4910 Register scratch = scratch0();
4911 SwVfpRegister flt_scratch = double_scratch0().low();
4912 DCHECK(!result_reg.is(double_scratch0()));
4913 Label convert, load_smi, done;
4914 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4916 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4917 // Heap number map check.
4918 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4919 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4920 __ cmp(scratch, Operand(ip));
4921 if (can_convert_undefined_to_nan) {
4924 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4927 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4928 if (deoptimize_on_minus_zero) {
4929 __ VmovLow(scratch, result_reg);
4930 __ cmp(scratch, Operand::Zero());
4932 __ VmovHigh(scratch, result_reg);
4933 __ cmp(scratch, Operand(HeapNumber::kSignMask));
4934 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
4937 if (can_convert_undefined_to_nan) {
4939 // Convert undefined (and hole) to NaN.
4940 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4941 __ cmp(input_reg, Operand(ip));
4942 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
4943 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4944 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
4948 __ SmiUntag(scratch, input_reg);
4949 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4951 // Smi to double register conversion
4953 // scratch: untagged value of input_reg
4954 __ vmov(flt_scratch, scratch);
4955 __ vcvt_f64_s32(result_reg, flt_scratch);
4960 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4961 Register input_reg = ToRegister(instr->value());
4962 Register scratch1 = scratch0();
4963 Register scratch2 = ToRegister(instr->temp());
4964 LowDwVfpRegister double_scratch = double_scratch0();
4965 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4967 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4968 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4972 // The input was optimistically untagged; revert it.
4973 // The carry flag is set when we reach this deferred code as we just executed
4974 // SmiUntag(heap_object, SetCC)
4975 STATIC_ASSERT(kHeapObjectTag == 1);
4976 __ adc(scratch2, input_reg, Operand(input_reg));
4978 // Heap number map check.
4979 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
4980 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4981 __ cmp(scratch1, Operand(ip));
4983 if (instr->truncating()) {
4984 // Performs a truncating conversion of a floating point number as used by
4985 // the JS bitwise operations.
4986 Label no_heap_number, check_bools, check_false;
4987 __ b(ne, &no_heap_number);
4988 __ TruncateHeapNumberToI(input_reg, scratch2);
4991 // Check for Oddballs. Undefined/False is converted to zero and True to one
4992 // for truncating conversions.
4993 __ bind(&no_heap_number);
4994 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4995 __ cmp(scratch2, Operand(ip));
4996 __ b(ne, &check_bools);
4997 __ mov(input_reg, Operand::Zero());
5000 __ bind(&check_bools);
5001 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5002 __ cmp(scratch2, Operand(ip));
5003 __ b(ne, &check_false);
5004 __ mov(input_reg, Operand(1));
5007 __ bind(&check_false);
5008 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5009 __ cmp(scratch2, Operand(ip));
5010 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5011 __ mov(input_reg, Operand::Zero());
5013 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
5015 __ sub(ip, scratch2, Operand(kHeapObjectTag));
5016 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
5017 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
5018 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5020 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5021 __ cmp(input_reg, Operand::Zero());
5023 __ VmovHigh(scratch1, double_scratch2);
5024 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5025 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
5032 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5033 class DeferredTaggedToI FINAL : public LDeferredCode {
5035 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5036 : LDeferredCode(codegen), instr_(instr) { }
5037 void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_); }
5038 LInstruction* instr() OVERRIDE { return instr_; }
5044 LOperand* input = instr->value();
5045 DCHECK(input->IsRegister());
5046 DCHECK(input->Equals(instr->result()));
5048 Register input_reg = ToRegister(input);
5050 if (instr->hydrogen()->value()->representation().IsSmi()) {
5051 __ SmiUntag(input_reg);
5053 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5055 // Optimistically untag the input.
5056 // If the input is a HeapObject, SmiUntag will set the carry flag.
5057 __ SmiUntag(input_reg, SetCC);
5058 // Branch to deferred code if the input was tagged.
5059 // The deferred code will take care of restoring the tag.
5060 __ b(cs, deferred->entry());
5061 __ bind(deferred->exit());
5066 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5067 LOperand* input = instr->value();
5068 DCHECK(input->IsRegister());
5069 LOperand* result = instr->result();
5070 DCHECK(result->IsDoubleRegister());
5072 Register input_reg = ToRegister(input);
5073 DwVfpRegister result_reg = ToDoubleRegister(result);
5075 HValue* value = instr->hydrogen()->value();
5076 NumberUntagDMode mode = value->representation().IsSmi()
5077 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5079 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5083 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5084 Register result_reg = ToRegister(instr->result());
5085 Register scratch1 = scratch0();
5086 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5087 LowDwVfpRegister double_scratch = double_scratch0();
5089 if (instr->truncating()) {
5090 __ TruncateDoubleToI(result_reg, double_input);
5092 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5093 // Deoptimize if the input wasn't a int32 (inside a double).
5094 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5095 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5097 __ cmp(result_reg, Operand::Zero());
5099 __ VmovHigh(scratch1, double_input);
5100 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5101 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
5108 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5109 Register result_reg = ToRegister(instr->result());
5110 Register scratch1 = scratch0();
5111 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5112 LowDwVfpRegister double_scratch = double_scratch0();
5114 if (instr->truncating()) {
5115 __ TruncateDoubleToI(result_reg, double_input);
5117 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5118 // Deoptimize if the input wasn't a int32 (inside a double).
5119 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5120 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5122 __ cmp(result_reg, Operand::Zero());
5124 __ VmovHigh(scratch1, double_input);
5125 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5126 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
5130 __ SmiTag(result_reg, SetCC);
5131 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
5135 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5136 LOperand* input = instr->value();
5137 __ SmiTst(ToRegister(input));
5138 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
5142 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5143 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5144 LOperand* input = instr->value();
5145 __ SmiTst(ToRegister(input));
5146 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
5151 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5152 Register input = ToRegister(instr->value());
5153 Register scratch = scratch0();
5155 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5156 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5158 if (instr->hydrogen()->is_interval_check()) {
5161 instr->hydrogen()->GetCheckInterval(&first, &last);
5163 __ cmp(scratch, Operand(first));
5165 // If there is only one type in the interval check for equality.
5166 if (first == last) {
5167 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5169 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
5170 // Omit check for the last type.
5171 if (last != LAST_TYPE) {
5172 __ cmp(scratch, Operand(last));
5173 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
5179 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5181 if (base::bits::IsPowerOfTwo32(mask)) {
5182 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5183 __ tst(scratch, Operand(mask));
5184 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
5186 __ and_(scratch, scratch, Operand(mask));
5187 __ cmp(scratch, Operand(tag));
5188 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5194 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5195 Register reg = ToRegister(instr->value());
5196 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5197 AllowDeferredHandleDereference smi_check;
5198 if (isolate()->heap()->InNewSpace(*object)) {
5199 Register reg = ToRegister(instr->value());
5200 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5201 __ mov(ip, Operand(cell));
5202 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
5205 __ cmp(reg, Operand(object));
5207 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
5211 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5213 PushSafepointRegistersScope scope(this);
5215 __ mov(cp, Operand::Zero());
5216 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5217 RecordSafepointWithRegisters(
5218 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5219 __ StoreToSafepointRegisterSlot(r0, scratch0());
5221 __ tst(scratch0(), Operand(kSmiTagMask));
5222 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed);
5226 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5227 class DeferredCheckMaps FINAL : public LDeferredCode {
5229 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5230 : LDeferredCode(codegen), instr_(instr), object_(object) {
5231 SetExit(check_maps());
5233 void Generate() OVERRIDE {
5234 codegen()->DoDeferredInstanceMigration(instr_, object_);
5236 Label* check_maps() { return &check_maps_; }
5237 LInstruction* instr() OVERRIDE { return instr_; }
5245 if (instr->hydrogen()->IsStabilityCheck()) {
5246 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5247 for (int i = 0; i < maps->size(); ++i) {
5248 AddStabilityDependency(maps->at(i).handle());
5253 Register map_reg = scratch0();
5255 LOperand* input = instr->value();
5256 DCHECK(input->IsRegister());
5257 Register reg = ToRegister(input);
5259 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5261 DeferredCheckMaps* deferred = NULL;
5262 if (instr->hydrogen()->HasMigrationTarget()) {
5263 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5264 __ bind(deferred->check_maps());
5267 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5269 for (int i = 0; i < maps->size() - 1; i++) {
5270 Handle<Map> map = maps->at(i).handle();
5271 __ CompareMap(map_reg, map, &success);
5275 Handle<Map> map = maps->at(maps->size() - 1).handle();
5276 __ CompareMap(map_reg, map, &success);
5277 if (instr->hydrogen()->HasMigrationTarget()) {
5278 __ b(ne, deferred->entry());
5280 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5287 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5288 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
5289 Register result_reg = ToRegister(instr->result());
5290 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5294 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5295 Register unclamped_reg = ToRegister(instr->unclamped());
5296 Register result_reg = ToRegister(instr->result());
5297 __ ClampUint8(result_reg, unclamped_reg);
5301 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5302 Register scratch = scratch0();
5303 Register input_reg = ToRegister(instr->unclamped());
5304 Register result_reg = ToRegister(instr->result());
5305 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5306 Label is_smi, done, heap_number;
5308 // Both smi and heap number cases are handled.
5309 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5311 // Check for heap number
5312 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5313 __ cmp(scratch, Operand(factory()->heap_number_map()));
5314 __ b(eq, &heap_number);
5316 // Check for undefined. Undefined is converted to zero for clamping
5318 __ cmp(input_reg, Operand(factory()->undefined_value()));
5319 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5320 __ mov(result_reg, Operand::Zero());
5324 __ bind(&heap_number);
5325 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5326 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5331 __ ClampUint8(result_reg, result_reg);
5337 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5338 DwVfpRegister value_reg = ToDoubleRegister(instr->value());
5339 Register result_reg = ToRegister(instr->result());
5340 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5341 __ VmovHigh(result_reg, value_reg);
5343 __ VmovLow(result_reg, value_reg);
5348 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5349 Register hi_reg = ToRegister(instr->hi());
5350 Register lo_reg = ToRegister(instr->lo());
5351 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
5352 __ VmovHigh(result_reg, hi_reg);
5353 __ VmovLow(result_reg, lo_reg);
5357 void LCodeGen::DoAllocate(LAllocate* instr) {
5358 class DeferredAllocate FINAL : public LDeferredCode {
5360 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5361 : LDeferredCode(codegen), instr_(instr) { }
5362 void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
5363 LInstruction* instr() OVERRIDE { return instr_; }
5369 DeferredAllocate* deferred =
5370 new(zone()) DeferredAllocate(this, instr);
5372 Register result = ToRegister(instr->result());
5373 Register scratch = ToRegister(instr->temp1());
5374 Register scratch2 = ToRegister(instr->temp2());
5376 // Allocate memory for the object.
5377 AllocationFlags flags = TAG_OBJECT;
5378 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5379 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5381 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5382 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5383 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5384 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5385 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5386 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5387 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5390 if (instr->size()->IsConstantOperand()) {
5391 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5392 if (size <= Page::kMaxRegularHeapObjectSize) {
5393 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5395 __ jmp(deferred->entry());
5398 Register size = ToRegister(instr->size());
5399 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5402 __ bind(deferred->exit());
5404 if (instr->hydrogen()->MustPrefillWithFiller()) {
5405 STATIC_ASSERT(kHeapObjectTag == 1);
5406 if (instr->size()->IsConstantOperand()) {
5407 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5408 __ mov(scratch, Operand(size - kHeapObjectTag));
5410 __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5412 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5415 __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
5416 __ str(scratch2, MemOperand(result, scratch));
5422 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5423 Register result = ToRegister(instr->result());
5425 // TODO(3095996): Get rid of this. For now, we need to make the
5426 // result register contain a valid pointer because it is already
5427 // contained in the register pointer map.
5428 __ mov(result, Operand(Smi::FromInt(0)));
5430 PushSafepointRegistersScope scope(this);
5431 if (instr->size()->IsRegister()) {
5432 Register size = ToRegister(instr->size());
5433 DCHECK(!size.is(result));
5437 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5438 if (size >= 0 && size <= Smi::kMaxValue) {
5439 __ Push(Smi::FromInt(size));
5441 // We should never get here at runtime => abort
5442 __ stop("invalid allocation size");
5447 int flags = AllocateDoubleAlignFlag::encode(
5448 instr->hydrogen()->MustAllocateDoubleAligned());
5449 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5450 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5451 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5452 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5453 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5454 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5455 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5457 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5459 __ Push(Smi::FromInt(flags));
5461 CallRuntimeFromDeferred(
5462 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5463 __ StoreToSafepointRegisterSlot(r0, result);
5467 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5468 DCHECK(ToRegister(instr->value()).is(r0));
5470 CallRuntime(Runtime::kToFastProperties, 1, instr);
5474 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5475 DCHECK(ToRegister(instr->context()).is(cp));
5477 // Registers will be used as follows:
5478 // r6 = literals array.
5479 // r1 = regexp literal.
5480 // r0 = regexp literal clone.
5481 // r2-5 are used as temporaries.
5482 int literal_offset =
5483 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5484 __ Move(r6, instr->hydrogen()->literals());
5485 __ ldr(r1, FieldMemOperand(r6, literal_offset));
5486 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5488 __ b(ne, &materialized);
5490 // Create regexp literal using runtime function
5491 // Result will be in r0.
5492 __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5493 __ mov(r4, Operand(instr->hydrogen()->pattern()));
5494 __ mov(r3, Operand(instr->hydrogen()->flags()));
5495 __ Push(r6, r5, r4, r3);
5496 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5499 __ bind(&materialized);
5500 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5501 Label allocated, runtime_allocate;
5503 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5506 __ bind(&runtime_allocate);
5507 __ mov(r0, Operand(Smi::FromInt(size)));
5509 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5512 __ bind(&allocated);
5513 // Copy the content into the newly allocated memory.
5514 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
5518 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5519 DCHECK(ToRegister(instr->context()).is(cp));
5520 // Use the fast case closure allocation code that allocates in new
5521 // space for nested functions that don't need literals cloning.
5522 bool pretenure = instr->hydrogen()->pretenure();
5523 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5524 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
5525 instr->hydrogen()->kind());
5526 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5527 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5529 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5530 __ mov(r1, Operand(pretenure ? factory()->true_value()
5531 : factory()->false_value()));
5532 __ Push(cp, r2, r1);
5533 CallRuntime(Runtime::kNewClosure, 3, instr);
5538 void LCodeGen::DoTypeof(LTypeof* instr) {
5539 Register input = ToRegister(instr->value());
5541 CallRuntime(Runtime::kTypeof, 1, instr);
5545 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5546 Register input = ToRegister(instr->value());
5548 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5549 instr->FalseLabel(chunk_),
5551 instr->type_literal());
5552 if (final_branch_condition != kNoCondition) {
5553 EmitBranch(instr, final_branch_condition);
5558 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5561 Handle<String> type_name) {
5562 Condition final_branch_condition = kNoCondition;
5563 Register scratch = scratch0();
5564 Factory* factory = isolate()->factory();
5565 if (String::Equals(type_name, factory->number_string())) {
5566 __ JumpIfSmi(input, true_label);
5567 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5568 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5569 final_branch_condition = eq;
5571 } else if (String::Equals(type_name, factory->string_string())) {
5572 __ JumpIfSmi(input, false_label);
5573 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5574 __ b(ge, false_label);
5575 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5576 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5577 final_branch_condition = eq;
5579 } else if (String::Equals(type_name, factory->symbol_string())) {
5580 __ JumpIfSmi(input, false_label);
5581 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5582 final_branch_condition = eq;
5584 } else if (String::Equals(type_name, factory->boolean_string())) {
5585 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5586 __ b(eq, true_label);
5587 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5588 final_branch_condition = eq;
5590 } else if (String::Equals(type_name, factory->undefined_string())) {
5591 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5592 __ b(eq, true_label);
5593 __ JumpIfSmi(input, false_label);
5594 // Check for undetectable objects => true.
5595 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5596 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5597 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5598 final_branch_condition = ne;
5600 } else if (String::Equals(type_name, factory->function_string())) {
5601 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5602 Register type_reg = scratch;
5603 __ JumpIfSmi(input, false_label);
5604 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5605 __ b(eq, true_label);
5606 __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5607 final_branch_condition = eq;
5609 } else if (String::Equals(type_name, factory->object_string())) {
5610 Register map = scratch;
5611 __ JumpIfSmi(input, false_label);
5612 __ CompareRoot(input, Heap::kNullValueRootIndex);
5613 __ b(eq, true_label);
5614 __ CheckObjectTypeRange(input,
5616 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5617 LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
5619 // Check for undetectable objects => false.
5620 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5621 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5622 final_branch_condition = eq;
5628 return final_branch_condition;
5632 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5633 Register temp1 = ToRegister(instr->temp());
5635 EmitIsConstructCall(temp1, scratch0());
5636 EmitBranch(instr, eq);
5640 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5641 DCHECK(!temp1.is(temp2));
5642 // Get the frame pointer for the calling frame.
5643 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5645 // Skip the arguments adaptor frame if it exists.
5646 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5647 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5648 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
5650 // Check the marker in the calling frame.
5651 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5652 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5656 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5657 if (!info()->IsStub()) {
5658 // Ensure that we have enough space after the previous lazy-bailout
5659 // instruction for patching the code here.
5660 int current_pc = masm()->pc_offset();
5661 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5662 // Block literal pool emission for duration of padding.
5663 Assembler::BlockConstPoolScope block_const_pool(masm());
5664 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5665 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5666 while (padding_size > 0) {
5668 padding_size -= Assembler::kInstrSize;
5672 last_lazy_deopt_pc_ = masm()->pc_offset();
5676 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5677 last_lazy_deopt_pc_ = masm()->pc_offset();
5678 DCHECK(instr->HasEnvironment());
5679 LEnvironment* env = instr->environment();
5680 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5681 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5685 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5686 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5687 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5688 // needed return address), even though the implementation of LAZY and EAGER is
5689 // now identical. When LAZY is eventually completely folded into EAGER, remove
5690 // the special case below.
5691 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5692 type = Deoptimizer::LAZY;
5695 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5699 void LCodeGen::DoDummy(LDummy* instr) {
5700 // Nothing to see here, move on!
5704 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5705 // Nothing to see here, move on!
5709 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5710 PushSafepointRegistersScope scope(this);
5711 LoadContextFromDeferred(instr->context());
5712 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5713 RecordSafepointWithLazyDeopt(
5714 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5715 DCHECK(instr->HasEnvironment());
5716 LEnvironment* env = instr->environment();
5717 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5721 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5722 class DeferredStackCheck FINAL : public LDeferredCode {
5724 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5725 : LDeferredCode(codegen), instr_(instr) { }
5726 void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
5727 LInstruction* instr() OVERRIDE { return instr_; }
5730 LStackCheck* instr_;
5733 DCHECK(instr->HasEnvironment());
5734 LEnvironment* env = instr->environment();
5735 // There is no LLazyBailout instruction for stack-checks. We have to
5736 // prepare for lazy deoptimization explicitly here.
5737 if (instr->hydrogen()->is_function_entry()) {
5738 // Perform stack overflow check.
5740 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5741 __ cmp(sp, Operand(ip));
5743 Handle<Code> stack_check = isolate()->builtins()->StackCheck();
5744 PredictableCodeSizeScope predictable(masm(),
5745 CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
5746 DCHECK(instr->context()->IsRegister());
5747 DCHECK(ToRegister(instr->context()).is(cp));
5748 CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
5751 DCHECK(instr->hydrogen()->is_backwards_branch());
5752 // Perform stack overflow check if this goto needs it before jumping.
5753 DeferredStackCheck* deferred_stack_check =
5754 new(zone()) DeferredStackCheck(this, instr);
5755 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5756 __ cmp(sp, Operand(ip));
5757 __ b(lo, deferred_stack_check->entry());
5758 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5759 __ bind(instr->done_label());
5760 deferred_stack_check->SetExit(instr->done_label());
5761 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5762 // Don't record a deoptimization index for the safepoint here.
5763 // This will be done explicitly when emitting call and the safepoint in
5764 // the deferred code.
5769 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5770 // This is a pseudo-instruction that ensures that the environment here is
5771 // properly registered for deoptimization and records the assembler's PC
5773 LEnvironment* environment = instr->environment();
5775 // If the environment were already registered, we would have no way of
5776 // backpatching it with the spill slot operands.
5777 DCHECK(!environment->HasBeenRegistered());
5778 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5780 GenerateOsrPrologue();
5784 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5785 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5787 DeoptimizeIf(eq, instr, Deoptimizer::kUndefined);
5789 Register null_value = r5;
5790 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5791 __ cmp(r0, null_value);
5792 DeoptimizeIf(eq, instr, Deoptimizer::kNull);
5795 DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
5797 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5798 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5799 DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
5801 Label use_cache, call_runtime;
5802 __ CheckEnumCache(null_value, &call_runtime);
5804 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5807 // Get the set of properties to enumerate.
5808 __ bind(&call_runtime);
5810 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5812 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
5813 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5815 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5816 __ bind(&use_cache);
5820 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5821 Register map = ToRegister(instr->map());
5822 Register result = ToRegister(instr->result());
5823 Label load_cache, done;
5824 __ EnumLength(result, map);
5825 __ cmp(result, Operand(Smi::FromInt(0)));
5826 __ b(ne, &load_cache);
5827 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5830 __ bind(&load_cache);
5831 __ LoadInstanceDescriptors(map, result);
5833 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5835 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5836 __ cmp(result, Operand::Zero());
5837 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
5843 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5844 Register object = ToRegister(instr->value());
5845 Register map = ToRegister(instr->map());
5846 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5847 __ cmp(map, scratch0());
5848 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5852 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5856 PushSafepointRegistersScope scope(this);
5859 __ mov(cp, Operand::Zero());
5860 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5861 RecordSafepointWithRegisters(
5862 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5863 __ StoreToSafepointRegisterSlot(r0, result);
5867 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5868 class DeferredLoadMutableDouble FINAL : public LDeferredCode {
5870 DeferredLoadMutableDouble(LCodeGen* codegen,
5871 LLoadFieldByIndex* instr,
5875 : LDeferredCode(codegen),
5881 void Generate() OVERRIDE {
5882 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
5884 LInstruction* instr() OVERRIDE { return instr_; }
5887 LLoadFieldByIndex* instr_;
5893 Register object = ToRegister(instr->object());
5894 Register index = ToRegister(instr->index());
5895 Register result = ToRegister(instr->result());
5896 Register scratch = scratch0();
5898 DeferredLoadMutableDouble* deferred;
5899 deferred = new(zone()) DeferredLoadMutableDouble(
5900 this, instr, result, object, index);
5902 Label out_of_object, done;
5904 __ tst(index, Operand(Smi::FromInt(1)));
5905 __ b(ne, deferred->entry());
5906 __ mov(index, Operand(index, ASR, 1));
5908 __ cmp(index, Operand::Zero());
5909 __ b(lt, &out_of_object);
5911 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
5912 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5916 __ bind(&out_of_object);
5917 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5918 // Index is equal to negated out of object property index plus 1.
5919 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
5920 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5921 __ ldr(result, FieldMemOperand(scratch,
5922 FixedArray::kHeaderSize - kPointerSize));
5923 __ bind(deferred->exit());
5928 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5929 Register context = ToRegister(instr->context());
5930 __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5934 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5935 Handle<ScopeInfo> scope_info = instr->scope_info();
5936 __ Push(scope_info);
5937 __ push(ToRegister(instr->function()));
5938 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5939 RecordSafepoint(Safepoint::kNoLazyDeopt);
5945 } } // namespace v8::internal