1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "arm/lithium-codegen-arm.h"
31 #include "arm/lithium-gap-resolver-arm.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 #include "hydrogen-osr.h"
40 class SafepointGenerator V8_FINAL : public CallWrapper {
42 SafepointGenerator(LCodeGen* codegen,
43 LPointerMap* pointers,
44 Safepoint::DeoptMode mode)
48 virtual ~SafepointGenerator() {}
50 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
52 virtual void AfterCall() const V8_OVERRIDE {
53 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 LPointerMap* pointers_;
59 Safepoint::DeoptMode deopt_mode_;
65 bool LCodeGen::GenerateCode() {
66 LPhase phase("Z_Code generation", chunk());
70 // Open a frame scope to indicate that there is a frame on the stack. The
71 // NONE indicates that the scope shouldn't actually generate code to set up
72 // the frame (that is done in GeneratePrologue).
73 FrameScope frame_scope(masm_, StackFrame::NONE);
75 return GeneratePrologue() &&
77 GenerateDeferredCode() &&
78 GenerateDeoptJumpTable() &&
79 GenerateSafepointTable();
83 void LCodeGen::FinishCode(Handle<Code> code) {
85 code->set_stack_slots(GetStackSlotCount());
86 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
87 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
88 PopulateDeoptimizationData(code);
89 info()->CommitDependencies(code);
93 void LCodeGen::Abort(BailoutReason reason) {
94 info()->set_bailout_reason(reason);
99 void LCodeGen::SaveCallerDoubles() {
100 ASSERT(info()->saves_caller_doubles());
101 ASSERT(NeedsEagerFrame());
102 Comment(";;; Save clobbered callee double registers");
104 BitVector* doubles = chunk()->allocated_double_registers();
105 BitVector::Iterator save_iterator(doubles);
106 while (!save_iterator.Done()) {
107 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
108 MemOperand(sp, count * kDoubleSize));
109 save_iterator.Advance();
115 void LCodeGen::RestoreCallerDoubles() {
116 ASSERT(info()->saves_caller_doubles());
117 ASSERT(NeedsEagerFrame());
118 Comment(";;; Restore clobbered callee double registers");
119 BitVector* doubles = chunk()->allocated_double_registers();
120 BitVector::Iterator save_iterator(doubles);
122 while (!save_iterator.Done()) {
123 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
124 MemOperand(sp, count * kDoubleSize));
125 save_iterator.Advance();
131 bool LCodeGen::GeneratePrologue() {
132 ASSERT(is_generating());
134 if (info()->IsOptimizing()) {
135 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
138 if (strlen(FLAG_stop_at) > 0 &&
139 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
144 // r1: Callee's JS function.
145 // cp: Callee's context.
146 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
147 // fp: Caller's frame pointer.
150 // Sloppy mode functions and builtins need to replace the receiver with the
151 // global proxy when called as functions (without an explicit receiver
153 if (info_->this_has_uses() &&
154 info_->strict_mode() == SLOPPY &&
155 !info_->is_native()) {
157 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
158 __ ldr(r2, MemOperand(sp, receiver_offset));
159 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
162 __ ldr(r2, GlobalObjectOperand());
163 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
165 __ str(r2, MemOperand(sp, receiver_offset));
171 info()->set_prologue_offset(masm_->pc_offset());
172 if (NeedsEagerFrame()) {
173 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
174 frame_is_built_ = true;
175 info_->AddNoFrameRange(0, masm_->pc_offset());
178 // Reserve space for the stack slots needed by the code.
179 int slots = GetStackSlotCount();
181 if (FLAG_debug_code) {
182 __ sub(sp, sp, Operand(slots * kPointerSize));
185 __ add(r0, sp, Operand(slots * kPointerSize));
186 __ mov(r1, Operand(kSlotsZapValue));
189 __ sub(r0, r0, Operand(kPointerSize));
190 __ str(r1, MemOperand(r0, 2 * kPointerSize));
196 __ sub(sp, sp, Operand(slots * kPointerSize));
200 if (info()->saves_caller_doubles()) {
204 // Possibly allocate a local context.
205 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
206 if (heap_slots > 0) {
207 Comment(";;; Allocate local context");
208 // Argument to NewContext is the function, which is in r1.
209 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
210 FastNewContextStub stub(heap_slots);
214 __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
216 RecordSafepoint(Safepoint::kNoLazyDeopt);
217 // Context is returned in both r0 and cp. It replaces the context
218 // passed to us. It's saved in the stack and kept live in cp.
220 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
221 // Copy any necessary parameters into the context.
222 int num_parameters = scope()->num_parameters();
223 for (int i = 0; i < num_parameters; i++) {
224 Variable* var = scope()->parameter(i);
225 if (var->IsContextSlot()) {
226 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
227 (num_parameters - 1 - i) * kPointerSize;
228 // Load parameter from stack.
229 __ ldr(r0, MemOperand(fp, parameter_offset));
230 // Store it in the context.
231 MemOperand target = ContextOperand(cp, var->index());
233 // Update the write barrier. This clobbers r3 and r0.
234 __ RecordWriteContextSlot(
239 GetLinkRegisterState(),
243 Comment(";;; End allocate local context");
247 if (FLAG_trace && info()->IsOptimizing()) {
248 // We have not executed any compiled code yet, so cp still holds the
250 __ CallRuntime(Runtime::kTraceEnter, 0);
252 return !is_aborted();
256 void LCodeGen::GenerateOsrPrologue() {
257 // Generate the OSR entry prologue at the first unknown OSR value, or if there
258 // are none, at the OSR entrypoint instruction.
259 if (osr_pc_offset_ >= 0) return;
261 osr_pc_offset_ = masm()->pc_offset();
263 // Adjust the frame size, subsuming the unoptimized frame into the
265 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
267 __ sub(sp, sp, Operand(slots * kPointerSize));
271 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
272 if (instr->IsCall()) {
273 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
275 if (!instr->IsLazyBailout() && !instr->IsGap()) {
276 safepoints_.BumpLastLazySafepointIndex();
281 bool LCodeGen::GenerateDeferredCode() {
282 ASSERT(is_generating());
283 if (deferred_.length() > 0) {
284 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
285 LDeferredCode* code = deferred_[i];
288 instructions_->at(code->instruction_index())->hydrogen_value();
289 RecordAndWritePosition(
290 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
292 Comment(";;; <@%d,#%d> "
293 "-------------------- Deferred %s --------------------",
294 code->instruction_index(),
295 code->instr()->hydrogen_value()->id(),
296 code->instr()->Mnemonic());
297 __ bind(code->entry());
298 if (NeedsDeferredFrame()) {
299 Comment(";;; Build frame");
300 ASSERT(!frame_is_built_);
301 ASSERT(info()->IsStub());
302 frame_is_built_ = true;
304 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
306 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
307 Comment(";;; Deferred code");
310 if (NeedsDeferredFrame()) {
311 Comment(";;; Destroy frame");
312 ASSERT(frame_is_built_);
315 frame_is_built_ = false;
317 __ jmp(code->exit());
321 // Force constant pool emission at the end of the deferred code to make
322 // sure that no constant pools are emitted after.
323 masm()->CheckConstPool(true, false);
325 return !is_aborted();
329 bool LCodeGen::GenerateDeoptJumpTable() {
330 // Check that the jump table is accessible from everywhere in the function
331 // code, i.e. that offsets to the table can be encoded in the 24bit signed
332 // immediate of a branch instruction.
333 // To simplify we consider the code size from the first instruction to the
334 // end of the jump table. We also don't consider the pc load delta.
335 // Each entry in the jump table generates one instruction and inlines one
336 // 32bit data after it.
337 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
338 deopt_jump_table_.length() * 7)) {
339 Abort(kGeneratedCodeIsTooLarge);
342 if (deopt_jump_table_.length() > 0) {
343 Comment(";;; -------------------- Jump table --------------------");
346 __ bind(&table_start);
348 for (int i = 0; i < deopt_jump_table_.length(); i++) {
349 __ bind(&deopt_jump_table_[i].label);
350 Address entry = deopt_jump_table_[i].address;
351 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
352 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
353 if (id == Deoptimizer::kNotDeoptimizationEntry) {
354 Comment(";;; jump table entry %d.", i);
356 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
358 if (deopt_jump_table_[i].needs_frame) {
359 ASSERT(!info()->saves_caller_doubles());
360 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
361 if (needs_frame.is_bound()) {
364 __ bind(&needs_frame);
366 // This variant of deopt can only be used with stubs. Since we don't
367 // have a function pointer to install in the stack frame that we're
368 // building, install a special marker there instead.
369 ASSERT(info()->IsStub());
370 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
372 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
373 __ mov(lr, Operand(pc), LeaveCC, al);
377 if (info()->saves_caller_doubles()) {
378 ASSERT(info()->IsStub());
379 RestoreCallerDoubles();
381 __ mov(lr, Operand(pc), LeaveCC, al);
382 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
384 masm()->CheckConstPool(false, false);
387 // Force constant pool emission at the end of the deopt jump table to make
388 // sure that no constant pools are emitted after.
389 masm()->CheckConstPool(true, false);
391 // The deoptimization jump table is the last part of the instruction
392 // sequence. Mark the generated code as done unless we bailed out.
393 if (!is_aborted()) status_ = DONE;
394 return !is_aborted();
398 bool LCodeGen::GenerateSafepointTable() {
400 safepoints_.Emit(masm(), GetStackSlotCount());
401 return !is_aborted();
405 Register LCodeGen::ToRegister(int index) const {
406 return Register::FromAllocationIndex(index);
410 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
411 return DwVfpRegister::FromAllocationIndex(index);
415 Register LCodeGen::ToRegister(LOperand* op) const {
416 ASSERT(op->IsRegister());
417 return ToRegister(op->index());
421 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
422 if (op->IsRegister()) {
423 return ToRegister(op->index());
424 } else if (op->IsConstantOperand()) {
425 LConstantOperand* const_op = LConstantOperand::cast(op);
426 HConstant* constant = chunk_->LookupConstant(const_op);
427 Handle<Object> literal = constant->handle(isolate());
428 Representation r = chunk_->LookupLiteralRepresentation(const_op);
429 if (r.IsInteger32()) {
430 ASSERT(literal->IsNumber());
431 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
432 } else if (r.IsDouble()) {
433 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
435 ASSERT(r.IsSmiOrTagged());
436 __ Move(scratch, literal);
439 } else if (op->IsStackSlot()) {
440 __ ldr(scratch, ToMemOperand(op));
448 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
449 ASSERT(op->IsDoubleRegister());
450 return ToDoubleRegister(op->index());
454 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
455 SwVfpRegister flt_scratch,
456 DwVfpRegister dbl_scratch) {
457 if (op->IsDoubleRegister()) {
458 return ToDoubleRegister(op->index());
459 } else if (op->IsConstantOperand()) {
460 LConstantOperand* const_op = LConstantOperand::cast(op);
461 HConstant* constant = chunk_->LookupConstant(const_op);
462 Handle<Object> literal = constant->handle(isolate());
463 Representation r = chunk_->LookupLiteralRepresentation(const_op);
464 if (r.IsInteger32()) {
465 ASSERT(literal->IsNumber());
466 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
467 __ vmov(flt_scratch, ip);
468 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
470 } else if (r.IsDouble()) {
471 Abort(kUnsupportedDoubleImmediate);
472 } else if (r.IsTagged()) {
473 Abort(kUnsupportedTaggedImmediate);
475 } else if (op->IsStackSlot()) {
476 // TODO(regis): Why is vldr not taking a MemOperand?
477 // __ vldr(dbl_scratch, ToMemOperand(op));
478 MemOperand mem_op = ToMemOperand(op);
479 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
487 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
488 HConstant* constant = chunk_->LookupConstant(op);
489 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
490 return constant->handle(isolate());
494 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
495 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
499 bool LCodeGen::IsSmi(LConstantOperand* op) const {
500 return chunk_->LookupLiteralRepresentation(op).IsSmi();
504 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
505 return ToRepresentation(op, Representation::Integer32());
509 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
510 const Representation& r) const {
511 HConstant* constant = chunk_->LookupConstant(op);
512 int32_t value = constant->Integer32Value();
513 if (r.IsInteger32()) return value;
514 ASSERT(r.IsSmiOrTagged());
515 return reinterpret_cast<int32_t>(Smi::FromInt(value));
519 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
520 HConstant* constant = chunk_->LookupConstant(op);
521 return Smi::FromInt(constant->Integer32Value());
525 double LCodeGen::ToDouble(LConstantOperand* op) const {
526 HConstant* constant = chunk_->LookupConstant(op);
527 ASSERT(constant->HasDoubleValue());
528 return constant->DoubleValue();
532 Operand LCodeGen::ToOperand(LOperand* op) {
533 if (op->IsConstantOperand()) {
534 LConstantOperand* const_op = LConstantOperand::cast(op);
535 HConstant* constant = chunk()->LookupConstant(const_op);
536 Representation r = chunk_->LookupLiteralRepresentation(const_op);
538 ASSERT(constant->HasSmiValue());
539 return Operand(Smi::FromInt(constant->Integer32Value()));
540 } else if (r.IsInteger32()) {
541 ASSERT(constant->HasInteger32Value());
542 return Operand(constant->Integer32Value());
543 } else if (r.IsDouble()) {
544 Abort(kToOperandUnsupportedDoubleImmediate);
546 ASSERT(r.IsTagged());
547 return Operand(constant->handle(isolate()));
548 } else if (op->IsRegister()) {
549 return Operand(ToRegister(op));
550 } else if (op->IsDoubleRegister()) {
551 Abort(kToOperandIsDoubleRegisterUnimplemented);
552 return Operand::Zero();
554 // Stack slots not implemented, use ToMemOperand instead.
556 return Operand::Zero();
560 static int ArgumentsOffsetWithoutFrame(int index) {
562 return -(index + 1) * kPointerSize;
566 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
567 ASSERT(!op->IsRegister());
568 ASSERT(!op->IsDoubleRegister());
569 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
570 if (NeedsEagerFrame()) {
571 return MemOperand(fp, StackSlotOffset(op->index()));
573 // Retrieve parameter without eager stack-frame relative to the
575 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
580 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
581 ASSERT(op->IsDoubleStackSlot());
582 if (NeedsEagerFrame()) {
583 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
585 // Retrieve parameter without eager stack-frame relative to the
588 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
593 void LCodeGen::WriteTranslation(LEnvironment* environment,
594 Translation* translation) {
595 if (environment == NULL) return;
597 // The translation includes one command per value in the environment.
598 int translation_size = environment->translation_size();
599 // The output frame height does not include the parameters.
600 int height = translation_size - environment->parameter_count();
602 WriteTranslation(environment->outer(), translation);
603 bool has_closure_id = !info()->closure().is_null() &&
604 !info()->closure().is_identical_to(environment->closure());
605 int closure_id = has_closure_id
606 ? DefineDeoptimizationLiteral(environment->closure())
607 : Translation::kSelfLiteralId;
609 switch (environment->frame_type()) {
611 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
614 translation->BeginConstructStubFrame(closure_id, translation_size);
617 ASSERT(translation_size == 1);
619 translation->BeginGetterStubFrame(closure_id);
622 ASSERT(translation_size == 2);
624 translation->BeginSetterStubFrame(closure_id);
627 translation->BeginCompiledStubFrame();
629 case ARGUMENTS_ADAPTOR:
630 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
634 int object_index = 0;
635 int dematerialized_index = 0;
636 for (int i = 0; i < translation_size; ++i) {
637 LOperand* value = environment->values()->at(i);
638 AddToTranslation(environment,
641 environment->HasTaggedValueAt(i),
642 environment->HasUint32ValueAt(i),
644 &dematerialized_index);
649 void LCodeGen::AddToTranslation(LEnvironment* environment,
650 Translation* translation,
654 int* object_index_pointer,
655 int* dematerialized_index_pointer) {
656 if (op == LEnvironment::materialization_marker()) {
657 int object_index = (*object_index_pointer)++;
658 if (environment->ObjectIsDuplicateAt(object_index)) {
659 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
660 translation->DuplicateObject(dupe_of);
663 int object_length = environment->ObjectLengthAt(object_index);
664 if (environment->ObjectIsArgumentsAt(object_index)) {
665 translation->BeginArgumentsObject(object_length);
667 translation->BeginCapturedObject(object_length);
669 int dematerialized_index = *dematerialized_index_pointer;
670 int env_offset = environment->translation_size() + dematerialized_index;
671 *dematerialized_index_pointer += object_length;
672 for (int i = 0; i < object_length; ++i) {
673 LOperand* value = environment->values()->at(env_offset + i);
674 AddToTranslation(environment,
677 environment->HasTaggedValueAt(env_offset + i),
678 environment->HasUint32ValueAt(env_offset + i),
679 object_index_pointer,
680 dematerialized_index_pointer);
685 if (op->IsStackSlot()) {
687 translation->StoreStackSlot(op->index());
688 } else if (is_uint32) {
689 translation->StoreUint32StackSlot(op->index());
691 translation->StoreInt32StackSlot(op->index());
693 } else if (op->IsDoubleStackSlot()) {
694 translation->StoreDoubleStackSlot(op->index());
695 } else if (op->IsRegister()) {
696 Register reg = ToRegister(op);
698 translation->StoreRegister(reg);
699 } else if (is_uint32) {
700 translation->StoreUint32Register(reg);
702 translation->StoreInt32Register(reg);
704 } else if (op->IsDoubleRegister()) {
705 DoubleRegister reg = ToDoubleRegister(op);
706 translation->StoreDoubleRegister(reg);
707 } else if (op->IsConstantOperand()) {
708 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
709 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
710 translation->StoreLiteral(src_index);
717 void LCodeGen::CallCode(Handle<Code> code,
718 RelocInfo::Mode mode,
720 TargetAddressStorageMode storage_mode) {
721 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
725 void LCodeGen::CallCodeGeneric(Handle<Code> code,
726 RelocInfo::Mode mode,
728 SafepointMode safepoint_mode,
729 TargetAddressStorageMode storage_mode) {
730 ASSERT(instr != NULL);
731 // Block literal pool emission to ensure nop indicating no inlined smi code
732 // is in the correct position.
733 Assembler::BlockConstPoolScope block_const_pool(masm());
734 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
735 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
737 // Signal that we don't inline smi code before these stubs in the
738 // optimizing code generator.
739 if (code->kind() == Code::BINARY_OP_IC ||
740 code->kind() == Code::COMPARE_IC) {
746 void LCodeGen::CallRuntime(const Runtime::Function* function,
749 SaveFPRegsMode save_doubles) {
750 ASSERT(instr != NULL);
752 __ CallRuntime(function, num_arguments, save_doubles);
754 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
758 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
759 if (context->IsRegister()) {
760 __ Move(cp, ToRegister(context));
761 } else if (context->IsStackSlot()) {
762 __ ldr(cp, ToMemOperand(context));
763 } else if (context->IsConstantOperand()) {
764 HConstant* constant =
765 chunk_->LookupConstant(LConstantOperand::cast(context));
766 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
773 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
777 LoadContextFromDeferred(context);
778 __ CallRuntimeSaveDoubles(id);
779 RecordSafepointWithRegisters(
780 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
784 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
785 Safepoint::DeoptMode mode) {
786 if (!environment->HasBeenRegistered()) {
787 // Physical stack frame layout:
788 // -x ............. -4 0 ..................................... y
789 // [incoming arguments] [spill slots] [pushed outgoing arguments]
791 // Layout of the environment:
792 // 0 ..................................................... size-1
793 // [parameters] [locals] [expression stack including arguments]
795 // Layout of the translation:
796 // 0 ........................................................ size - 1 + 4
797 // [expression stack including arguments] [locals] [4 words] [parameters]
798 // |>------------ translation_size ------------<|
801 int jsframe_count = 0;
802 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
804 if (e->frame_type() == JS_FUNCTION) {
808 Translation translation(&translations_, frame_count, jsframe_count, zone());
809 WriteTranslation(environment, &translation);
810 int deoptimization_index = deoptimizations_.length();
811 int pc_offset = masm()->pc_offset();
812 environment->Register(deoptimization_index,
814 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
815 deoptimizations_.Add(environment, zone());
820 void LCodeGen::DeoptimizeIf(Condition condition,
821 LEnvironment* environment,
822 Deoptimizer::BailoutType bailout_type) {
823 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
824 ASSERT(environment->HasBeenRegistered());
825 int id = environment->deoptimization_index();
826 ASSERT(info()->IsOptimizing() || info()->IsStub());
828 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
830 Abort(kBailoutWasNotPrepared);
834 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
835 Register scratch = scratch0();
836 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
838 // Store the condition on the stack if necessary
839 if (condition != al) {
840 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
841 __ mov(scratch, Operand(1), LeaveCC, condition);
846 __ mov(scratch, Operand(count));
847 __ ldr(r1, MemOperand(scratch));
848 __ sub(r1, r1, Operand(1), SetCC);
849 __ movw(r1, FLAG_deopt_every_n_times, eq);
850 __ str(r1, MemOperand(scratch));
853 if (condition != al) {
854 // Clean up the stack before the deoptimizer call
858 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
860 // 'Restore' the condition in a slightly hacky way. (It would be better
861 // to use 'msr' and 'mrs' instructions here, but they are not supported by
862 // our ARM simulator).
863 if (condition != al) {
865 __ cmp(scratch, Operand::Zero());
869 if (info()->ShouldTrapOnDeopt()) {
870 __ stop("trap_on_deopt", condition);
873 ASSERT(info()->IsStub() || frame_is_built_);
874 // Go through jump table if we need to handle condition, build frame, or
875 // restore caller doubles.
876 if (condition == al && frame_is_built_ &&
877 !info()->saves_caller_doubles()) {
878 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
880 // We often have several deopts to the same entry, reuse the last
881 // jump entry if this is the case.
882 if (deopt_jump_table_.is_empty() ||
883 (deopt_jump_table_.last().address != entry) ||
884 (deopt_jump_table_.last().bailout_type != bailout_type) ||
885 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
886 Deoptimizer::JumpTableEntry table_entry(entry,
889 deopt_jump_table_.Add(table_entry, zone());
891 __ b(condition, &deopt_jump_table_.last().label);
896 void LCodeGen::DeoptimizeIf(Condition condition,
897 LEnvironment* environment) {
898 Deoptimizer::BailoutType bailout_type = info()->IsStub()
900 : Deoptimizer::EAGER;
901 DeoptimizeIf(condition, environment, bailout_type);
905 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
906 int length = deoptimizations_.length();
907 if (length == 0) return;
908 Handle<DeoptimizationInputData> data =
909 factory()->NewDeoptimizationInputData(length, TENURED);
911 Handle<ByteArray> translations =
912 translations_.CreateByteArray(isolate()->factory());
913 data->SetTranslationByteArray(*translations);
914 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
915 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
916 if (info_->IsOptimizing()) {
917 // Reference to shared function info does not change between phases.
918 AllowDeferredHandleDereference allow_handle_dereference;
919 data->SetSharedFunctionInfo(*info_->shared_info());
921 data->SetSharedFunctionInfo(Smi::FromInt(0));
924 Handle<FixedArray> literals =
925 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
926 { AllowDeferredHandleDereference copy_handles;
927 for (int i = 0; i < deoptimization_literals_.length(); i++) {
928 literals->set(i, *deoptimization_literals_[i]);
930 data->SetLiteralArray(*literals);
933 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
934 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
936 // Populate the deoptimization entries.
937 for (int i = 0; i < length; i++) {
938 LEnvironment* env = deoptimizations_[i];
939 data->SetAstId(i, env->ast_id());
940 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
941 data->SetArgumentsStackHeight(i,
942 Smi::FromInt(env->arguments_stack_height()));
943 data->SetPc(i, Smi::FromInt(env->pc_offset()));
945 code->set_deoptimization_data(*data);
949 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
950 int result = deoptimization_literals_.length();
951 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
952 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
954 deoptimization_literals_.Add(literal, zone());
959 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
960 ASSERT(deoptimization_literals_.length() == 0);
962 const ZoneList<Handle<JSFunction> >* inlined_closures =
963 chunk()->inlined_closures();
965 for (int i = 0, length = inlined_closures->length();
968 DefineDeoptimizationLiteral(inlined_closures->at(i));
971 inlined_function_count_ = deoptimization_literals_.length();
975 void LCodeGen::RecordSafepointWithLazyDeopt(
976 LInstruction* instr, SafepointMode safepoint_mode) {
977 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
978 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
980 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
981 RecordSafepointWithRegisters(
982 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
987 void LCodeGen::RecordSafepoint(
988 LPointerMap* pointers,
989 Safepoint::Kind kind,
991 Safepoint::DeoptMode deopt_mode) {
992 ASSERT(expected_safepoint_kind_ == kind);
994 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
995 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
996 kind, arguments, deopt_mode);
997 for (int i = 0; i < operands->length(); i++) {
998 LOperand* pointer = operands->at(i);
999 if (pointer->IsStackSlot()) {
1000 safepoint.DefinePointerSlot(pointer->index(), zone());
1001 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1002 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1005 if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
1006 // Register pp always contains a pointer to the constant pool.
1007 safepoint.DefinePointerRegister(pp, zone());
1012 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1013 Safepoint::DeoptMode deopt_mode) {
1014 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1018 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1019 LPointerMap empty_pointers(zone());
1020 RecordSafepoint(&empty_pointers, deopt_mode);
1024 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1026 Safepoint::DeoptMode deopt_mode) {
1028 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1032 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
1033 LPointerMap* pointers,
1035 Safepoint::DeoptMode deopt_mode) {
1037 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
1041 void LCodeGen::RecordAndWritePosition(int position) {
1042 if (position == RelocInfo::kNoPosition) return;
1043 masm()->positions_recorder()->RecordPosition(position);
1044 masm()->positions_recorder()->WriteRecordedPositions();
1048 static const char* LabelType(LLabel* label) {
1049 if (label->is_loop_header()) return " (loop header)";
1050 if (label->is_osr_entry()) return " (OSR entry)";
1055 void LCodeGen::DoLabel(LLabel* label) {
1056 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1057 current_instruction_,
1058 label->hydrogen_value()->id(),
1061 __ bind(label->label());
1062 current_block_ = label->block_id();
1067 void LCodeGen::DoParallelMove(LParallelMove* move) {
1068 resolver_.Resolve(move);
1072 void LCodeGen::DoGap(LGap* gap) {
1073 for (int i = LGap::FIRST_INNER_POSITION;
1074 i <= LGap::LAST_INNER_POSITION;
1076 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1077 LParallelMove* move = gap->GetParallelMove(inner_pos);
1078 if (move != NULL) DoParallelMove(move);
1083 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1088 void LCodeGen::DoParameter(LParameter* instr) {
1093 void LCodeGen::DoCallStub(LCallStub* instr) {
1094 ASSERT(ToRegister(instr->context()).is(cp));
1095 ASSERT(ToRegister(instr->result()).is(r0));
1096 switch (instr->hydrogen()->major_key()) {
1097 case CodeStub::RegExpExec: {
1098 RegExpExecStub stub;
1099 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1102 case CodeStub::SubString: {
1104 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1107 case CodeStub::StringCompare: {
1108 StringCompareStub stub;
1109 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1118 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1119 GenerateOsrPrologue();
1123 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1124 Register dividend = ToRegister(instr->dividend());
1125 int32_t divisor = instr->divisor();
1126 ASSERT(dividend.is(ToRegister(instr->result())));
1128 // Theoretically, a variation of the branch-free code for integer division by
1129 // a power of 2 (calculating the remainder via an additional multiplication
1130 // (which gets simplified to an 'and') and subtraction) should be faster, and
1131 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1132 // indicate that positive dividends are heavily favored, so the branching
1133 // version performs better.
1134 HMod* hmod = instr->hydrogen();
1135 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1136 Label dividend_is_not_negative, done;
1137 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1138 __ cmp(dividend, Operand::Zero());
1139 __ b(pl, ÷nd_is_not_negative);
1140 // Note that this is correct even for kMinInt operands.
1141 __ rsb(dividend, dividend, Operand::Zero());
1142 __ and_(dividend, dividend, Operand(mask));
1143 __ rsb(dividend, dividend, Operand::Zero(), SetCC);
1144 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1145 DeoptimizeIf(eq, instr->environment());
1150 __ bind(÷nd_is_not_negative);
1151 __ and_(dividend, dividend, Operand(mask));
1156 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1157 Register dividend = ToRegister(instr->dividend());
1158 int32_t divisor = instr->divisor();
1159 Register result = ToRegister(instr->result());
1160 ASSERT(!dividend.is(result));
1163 DeoptimizeIf(al, instr->environment());
1167 __ TruncatingDiv(result, dividend, Abs(divisor));
1168 __ mov(ip, Operand(Abs(divisor)));
1169 __ smull(result, ip, result, ip);
1170 __ sub(result, dividend, result, SetCC);
1172 // Check for negative zero.
1173 HMod* hmod = instr->hydrogen();
1174 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1175 Label remainder_not_zero;
1176 __ b(ne, &remainder_not_zero);
1177 __ cmp(dividend, Operand::Zero());
1178 DeoptimizeIf(lt, instr->environment());
1179 __ bind(&remainder_not_zero);
1184 void LCodeGen::DoModI(LModI* instr) {
1185 HMod* hmod = instr->hydrogen();
1186 if (CpuFeatures::IsSupported(SUDIV)) {
1187 CpuFeatureScope scope(masm(), SUDIV);
1189 Register left_reg = ToRegister(instr->left());
1190 Register right_reg = ToRegister(instr->right());
1191 Register result_reg = ToRegister(instr->result());
1194 // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1195 // case because we can't return a NaN.
1196 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1197 __ cmp(right_reg, Operand::Zero());
1198 DeoptimizeIf(eq, instr->environment());
1201 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1202 // want. We have to deopt if we care about -0, because we can't return that.
1203 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1204 Label no_overflow_possible;
1205 __ cmp(left_reg, Operand(kMinInt));
1206 __ b(ne, &no_overflow_possible);
1207 __ cmp(right_reg, Operand(-1));
1208 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1209 DeoptimizeIf(eq, instr->environment());
1211 __ b(ne, &no_overflow_possible);
1212 __ mov(result_reg, Operand::Zero());
1215 __ bind(&no_overflow_possible);
1218 // For 'r3 = r1 % r2' we can have the following ARM code:
1220 // mls r3, r3, r2, r1
1222 __ sdiv(result_reg, left_reg, right_reg);
1223 __ mls(result_reg, result_reg, right_reg, left_reg);
1225 // If we care about -0, test if the dividend is <0 and the result is 0.
1226 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1227 __ cmp(result_reg, Operand::Zero());
1229 __ cmp(left_reg, Operand::Zero());
1230 DeoptimizeIf(lt, instr->environment());
1235 // General case, without any SDIV support.
1236 Register left_reg = ToRegister(instr->left());
1237 Register right_reg = ToRegister(instr->right());
1238 Register result_reg = ToRegister(instr->result());
1239 Register scratch = scratch0();
1240 ASSERT(!scratch.is(left_reg));
1241 ASSERT(!scratch.is(right_reg));
1242 ASSERT(!scratch.is(result_reg));
1243 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1244 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1245 ASSERT(!divisor.is(dividend));
1246 LowDwVfpRegister quotient = double_scratch0();
1247 ASSERT(!quotient.is(dividend));
1248 ASSERT(!quotient.is(divisor));
1251 // Check for x % 0, we have to deopt in this case because we can't return a
1253 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1254 __ cmp(right_reg, Operand::Zero());
1255 DeoptimizeIf(eq, instr->environment());
1258 __ Move(result_reg, left_reg);
1259 // Load the arguments in VFP registers. The divisor value is preloaded
1260 // before. Be careful that 'right_reg' is only live on entry.
1261 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1262 __ vmov(double_scratch0().low(), left_reg);
1263 __ vcvt_f64_s32(dividend, double_scratch0().low());
1264 __ vmov(double_scratch0().low(), right_reg);
1265 __ vcvt_f64_s32(divisor, double_scratch0().low());
1267 // We do not care about the sign of the divisor. Note that we still handle
1268 // the kMinInt % -1 case correctly, though.
1269 __ vabs(divisor, divisor);
1270 // Compute the quotient and round it to a 32bit integer.
1271 __ vdiv(quotient, dividend, divisor);
1272 __ vcvt_s32_f64(quotient.low(), quotient);
1273 __ vcvt_f64_s32(quotient, quotient.low());
1275 // Compute the remainder in result.
1276 __ vmul(double_scratch0(), divisor, quotient);
1277 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1278 __ vmov(scratch, double_scratch0().low());
1279 __ sub(result_reg, left_reg, scratch, SetCC);
1281 // If we care about -0, test if the dividend is <0 and the result is 0.
1282 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1284 __ cmp(left_reg, Operand::Zero());
1285 DeoptimizeIf(mi, instr->environment());
1292 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1293 Register dividend = ToRegister(instr->dividend());
1294 int32_t divisor = instr->divisor();
1295 Register result = ToRegister(instr->result());
1296 ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
1297 ASSERT(!result.is(dividend));
1299 // Check for (0 / -x) that will produce negative zero.
1300 HDiv* hdiv = instr->hydrogen();
1301 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1302 __ cmp(dividend, Operand::Zero());
1303 DeoptimizeIf(eq, instr->environment());
1305 // Check for (kMinInt / -1).
1306 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1307 __ cmp(dividend, Operand(kMinInt));
1308 DeoptimizeIf(eq, instr->environment());
1310 // Deoptimize if remainder will not be 0.
1311 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1312 divisor != 1 && divisor != -1) {
1313 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1314 __ tst(dividend, Operand(mask));
1315 DeoptimizeIf(ne, instr->environment());
1318 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1319 __ rsb(result, dividend, Operand(0));
1322 int32_t shift = WhichPowerOf2Abs(divisor);
1324 __ mov(result, dividend);
1325 } else if (shift == 1) {
1326 __ add(result, dividend, Operand(dividend, LSR, 31));
1328 __ mov(result, Operand(dividend, ASR, 31));
1329 __ add(result, dividend, Operand(result, LSR, 32 - shift));
1331 if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1332 if (divisor < 0) __ rsb(result, result, Operand(0));
1336 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1337 Register dividend = ToRegister(instr->dividend());
1338 int32_t divisor = instr->divisor();
1339 Register result = ToRegister(instr->result());
1340 ASSERT(!dividend.is(result));
1343 DeoptimizeIf(al, instr->environment());
1347 // Check for (0 / -x) that will produce negative zero.
1348 HDiv* hdiv = instr->hydrogen();
1349 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1350 __ cmp(dividend, Operand::Zero());
1351 DeoptimizeIf(eq, instr->environment());
1354 __ TruncatingDiv(result, dividend, Abs(divisor));
1355 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1357 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1358 __ mov(ip, Operand(divisor));
1359 __ smull(scratch0(), ip, result, ip);
1360 __ sub(scratch0(), scratch0(), dividend, SetCC);
1361 DeoptimizeIf(ne, instr->environment());
1366 void LCodeGen::DoDivI(LDivI* instr) {
1367 HBinaryOperation* hdiv = instr->hydrogen();
1368 Register left = ToRegister(instr->left());
1369 Register right = ToRegister(instr->right());
1370 Register result = ToRegister(instr->result());
1373 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1374 __ cmp(right, Operand::Zero());
1375 DeoptimizeIf(eq, instr->environment());
1378 // Check for (0 / -x) that will produce negative zero.
1379 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1381 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1382 // Do the test only if it hadn't be done above.
1383 __ cmp(right, Operand::Zero());
1385 __ b(pl, &positive);
1386 __ cmp(left, Operand::Zero());
1387 DeoptimizeIf(eq, instr->environment());
1391 // Check for (kMinInt / -1).
1392 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1393 (!CpuFeatures::IsSupported(SUDIV) ||
1394 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1395 // We don't need to check for overflow when truncating with sdiv
1396 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1397 __ cmp(left, Operand(kMinInt));
1398 __ cmp(right, Operand(-1), eq);
1399 DeoptimizeIf(eq, instr->environment());
1402 if (CpuFeatures::IsSupported(SUDIV)) {
1403 CpuFeatureScope scope(masm(), SUDIV);
1404 __ sdiv(result, left, right);
1406 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1407 DoubleRegister vright = double_scratch0();
1408 __ vmov(double_scratch0().low(), left);
1409 __ vcvt_f64_s32(vleft, double_scratch0().low());
1410 __ vmov(double_scratch0().low(), right);
1411 __ vcvt_f64_s32(vright, double_scratch0().low());
1412 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1413 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1414 __ vmov(result, double_scratch0().low());
1417 if (hdiv->IsMathFloorOfDiv()) {
1419 Register remainder = scratch0();
1420 __ mls(remainder, result, right, left);
1421 __ cmp(remainder, Operand::Zero());
1423 __ eor(remainder, remainder, Operand(right));
1424 __ add(result, result, Operand(remainder, ASR, 31));
1426 } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1427 // Compute remainder and deopt if it's not zero.
1428 Register remainder = scratch0();
1429 __ mls(remainder, result, right, left);
1430 __ cmp(remainder, Operand::Zero());
1431 DeoptimizeIf(ne, instr->environment());
1436 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1437 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1438 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1439 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1441 // This is computed in-place.
1442 ASSERT(addend.is(ToDoubleRegister(instr->result())));
1444 __ vmla(addend, multiplier, multiplicand);
1448 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1449 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1450 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1451 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1453 // This is computed in-place.
1454 ASSERT(minuend.is(ToDoubleRegister(instr->result())));
1456 __ vmls(minuend, multiplier, multiplicand);
1460 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1461 Register dividend = ToRegister(instr->dividend());
1462 Register result = ToRegister(instr->result());
1463 int32_t divisor = instr->divisor();
1465 // If the divisor is positive, things are easy: There can be no deopts and we
1466 // can simply do an arithmetic right shift.
1467 if (divisor == 1) return;
1468 int32_t shift = WhichPowerOf2Abs(divisor);
1470 __ mov(result, Operand(dividend, ASR, shift));
1474 // If the divisor is negative, we have to negate and handle edge cases.
1475 __ rsb(result, dividend, Operand::Zero(), SetCC);
1476 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1477 DeoptimizeIf(eq, instr->environment());
1480 // If the negation could not overflow, simply shifting is OK.
1481 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1482 __ mov(result, Operand(dividend, ASR, shift));
1486 // Dividing by -1 is basically negation, unless we overflow.
1487 if (divisor == -1) {
1488 DeoptimizeIf(vs, instr->environment());
1492 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
1493 __ mov(result, Operand(dividend, ASR, shift), LeaveCC, vc);
1497 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1498 Register dividend = ToRegister(instr->dividend());
1499 int32_t divisor = instr->divisor();
1500 Register result = ToRegister(instr->result());
1501 ASSERT(!dividend.is(result));
1504 DeoptimizeIf(al, instr->environment());
1508 // Check for (0 / -x) that will produce negative zero.
1509 HMathFloorOfDiv* hdiv = instr->hydrogen();
1510 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1511 __ cmp(dividend, Operand::Zero());
1512 DeoptimizeIf(eq, instr->environment());
1515 // Easy case: We need no dynamic check for the dividend and the flooring
1516 // division is the same as the truncating division.
1517 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1518 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1519 __ TruncatingDiv(result, dividend, Abs(divisor));
1520 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1524 // In the general case we may need to adjust before and after the truncating
1525 // division to get a flooring division.
1526 Register temp = ToRegister(instr->temp());
1527 ASSERT(!temp.is(dividend) && !temp.is(result));
1528 Label needs_adjustment, done;
1529 __ cmp(dividend, Operand::Zero());
1530 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1531 __ TruncatingDiv(result, dividend, Abs(divisor));
1532 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1534 __ bind(&needs_adjustment);
1535 __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1536 __ TruncatingDiv(result, temp, Abs(divisor));
1537 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1538 __ sub(result, result, Operand(1));
1543 void LCodeGen::DoMulI(LMulI* instr) {
1544 Register result = ToRegister(instr->result());
1545 // Note that result may alias left.
1546 Register left = ToRegister(instr->left());
1547 LOperand* right_op = instr->right();
1549 bool bailout_on_minus_zero =
1550 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1551 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1553 if (right_op->IsConstantOperand()) {
1554 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1556 if (bailout_on_minus_zero && (constant < 0)) {
1557 // The case of a null constant will be handled separately.
1558 // If constant is negative and left is null, the result should be -0.
1559 __ cmp(left, Operand::Zero());
1560 DeoptimizeIf(eq, instr->environment());
1566 __ rsb(result, left, Operand::Zero(), SetCC);
1567 DeoptimizeIf(vs, instr->environment());
1569 __ rsb(result, left, Operand::Zero());
1573 if (bailout_on_minus_zero) {
1574 // If left is strictly negative and the constant is null, the
1575 // result is -0. Deoptimize if required, otherwise return 0.
1576 __ cmp(left, Operand::Zero());
1577 DeoptimizeIf(mi, instr->environment());
1579 __ mov(result, Operand::Zero());
1582 __ Move(result, left);
1585 // Multiplying by powers of two and powers of two plus or minus
1586 // one can be done faster with shifted operands.
1587 // For other constants we emit standard code.
1588 int32_t mask = constant >> 31;
1589 uint32_t constant_abs = (constant + mask) ^ mask;
1591 if (IsPowerOf2(constant_abs)) {
1592 int32_t shift = WhichPowerOf2(constant_abs);
1593 __ mov(result, Operand(left, LSL, shift));
1594 // Correct the sign of the result is the constant is negative.
1595 if (constant < 0) __ rsb(result, result, Operand::Zero());
1596 } else if (IsPowerOf2(constant_abs - 1)) {
1597 int32_t shift = WhichPowerOf2(constant_abs - 1);
1598 __ add(result, left, Operand(left, LSL, shift));
1599 // Correct the sign of the result is the constant is negative.
1600 if (constant < 0) __ rsb(result, result, Operand::Zero());
1601 } else if (IsPowerOf2(constant_abs + 1)) {
1602 int32_t shift = WhichPowerOf2(constant_abs + 1);
1603 __ rsb(result, left, Operand(left, LSL, shift));
1604 // Correct the sign of the result is the constant is negative.
1605 if (constant < 0) __ rsb(result, result, Operand::Zero());
1607 // Generate standard code.
1608 __ mov(ip, Operand(constant));
1609 __ mul(result, left, ip);
1614 ASSERT(right_op->IsRegister());
1615 Register right = ToRegister(right_op);
1618 Register scratch = scratch0();
1619 // scratch:result = left * right.
1620 if (instr->hydrogen()->representation().IsSmi()) {
1621 __ SmiUntag(result, left);
1622 __ smull(result, scratch, result, right);
1624 __ smull(result, scratch, left, right);
1626 __ cmp(scratch, Operand(result, ASR, 31));
1627 DeoptimizeIf(ne, instr->environment());
1629 if (instr->hydrogen()->representation().IsSmi()) {
1630 __ SmiUntag(result, left);
1631 __ mul(result, result, right);
1633 __ mul(result, left, right);
1637 if (bailout_on_minus_zero) {
1639 __ teq(left, Operand(right));
1641 // Bail out if the result is minus zero.
1642 __ cmp(result, Operand::Zero());
1643 DeoptimizeIf(eq, instr->environment());
1650 void LCodeGen::DoBitI(LBitI* instr) {
1651 LOperand* left_op = instr->left();
1652 LOperand* right_op = instr->right();
1653 ASSERT(left_op->IsRegister());
1654 Register left = ToRegister(left_op);
1655 Register result = ToRegister(instr->result());
1656 Operand right(no_reg);
1658 if (right_op->IsStackSlot()) {
1659 right = Operand(EmitLoadRegister(right_op, ip));
1661 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1662 right = ToOperand(right_op);
1665 switch (instr->op()) {
1666 case Token::BIT_AND:
1667 __ and_(result, left, right);
1670 __ orr(result, left, right);
1672 case Token::BIT_XOR:
1673 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1674 __ mvn(result, Operand(left));
1676 __ eor(result, left, right);
1686 void LCodeGen::DoShiftI(LShiftI* instr) {
1687 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1688 // result may alias either of them.
1689 LOperand* right_op = instr->right();
1690 Register left = ToRegister(instr->left());
1691 Register result = ToRegister(instr->result());
1692 Register scratch = scratch0();
1693 if (right_op->IsRegister()) {
1694 // Mask the right_op operand.
1695 __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1696 switch (instr->op()) {
1698 __ mov(result, Operand(left, ROR, scratch));
1701 __ mov(result, Operand(left, ASR, scratch));
1704 if (instr->can_deopt()) {
1705 __ mov(result, Operand(left, LSR, scratch), SetCC);
1706 DeoptimizeIf(mi, instr->environment());
1708 __ mov(result, Operand(left, LSR, scratch));
1712 __ mov(result, Operand(left, LSL, scratch));
1719 // Mask the right_op operand.
1720 int value = ToInteger32(LConstantOperand::cast(right_op));
1721 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1722 switch (instr->op()) {
1724 if (shift_count != 0) {
1725 __ mov(result, Operand(left, ROR, shift_count));
1727 __ Move(result, left);
1731 if (shift_count != 0) {
1732 __ mov(result, Operand(left, ASR, shift_count));
1734 __ Move(result, left);
1738 if (shift_count != 0) {
1739 __ mov(result, Operand(left, LSR, shift_count));
1741 if (instr->can_deopt()) {
1742 __ tst(left, Operand(0x80000000));
1743 DeoptimizeIf(ne, instr->environment());
1745 __ Move(result, left);
1749 if (shift_count != 0) {
1750 if (instr->hydrogen_value()->representation().IsSmi() &&
1751 instr->can_deopt()) {
1752 if (shift_count != 1) {
1753 __ mov(result, Operand(left, LSL, shift_count - 1));
1754 __ SmiTag(result, result, SetCC);
1756 __ SmiTag(result, left, SetCC);
1758 DeoptimizeIf(vs, instr->environment());
1760 __ mov(result, Operand(left, LSL, shift_count));
1763 __ Move(result, left);
1774 void LCodeGen::DoSubI(LSubI* instr) {
1775 LOperand* left = instr->left();
1776 LOperand* right = instr->right();
1777 LOperand* result = instr->result();
1778 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1779 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1781 if (right->IsStackSlot()) {
1782 Register right_reg = EmitLoadRegister(right, ip);
1783 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1785 ASSERT(right->IsRegister() || right->IsConstantOperand());
1786 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1790 DeoptimizeIf(vs, instr->environment());
1795 void LCodeGen::DoRSubI(LRSubI* instr) {
1796 LOperand* left = instr->left();
1797 LOperand* right = instr->right();
1798 LOperand* result = instr->result();
1799 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1800 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1802 if (right->IsStackSlot()) {
1803 Register right_reg = EmitLoadRegister(right, ip);
1804 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1806 ASSERT(right->IsRegister() || right->IsConstantOperand());
1807 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1811 DeoptimizeIf(vs, instr->environment());
1816 void LCodeGen::DoConstantI(LConstantI* instr) {
1817 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1821 void LCodeGen::DoConstantS(LConstantS* instr) {
1822 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1826 void LCodeGen::DoConstantD(LConstantD* instr) {
1827 ASSERT(instr->result()->IsDoubleRegister());
1828 DwVfpRegister result = ToDoubleRegister(instr->result());
1829 double v = instr->value();
1830 __ Vmov(result, v, scratch0());
1834 void LCodeGen::DoConstantE(LConstantE* instr) {
1835 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1839 void LCodeGen::DoConstantT(LConstantT* instr) {
1840 Handle<Object> value = instr->value(isolate());
1841 AllowDeferredHandleDereference smi_check;
1842 __ Move(ToRegister(instr->result()), value);
1846 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1847 Register result = ToRegister(instr->result());
1848 Register map = ToRegister(instr->value());
1849 __ EnumLength(result, map);
1853 void LCodeGen::DoDateField(LDateField* instr) {
1854 Register object = ToRegister(instr->date());
1855 Register result = ToRegister(instr->result());
1856 Register scratch = ToRegister(instr->temp());
1857 Smi* index = instr->index();
1858 Label runtime, done;
1859 ASSERT(object.is(result));
1860 ASSERT(object.is(r0));
1861 ASSERT(!scratch.is(scratch0()));
1862 ASSERT(!scratch.is(object));
1865 DeoptimizeIf(eq, instr->environment());
1866 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1867 DeoptimizeIf(ne, instr->environment());
1869 if (index->value() == 0) {
1870 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1872 if (index->value() < JSDate::kFirstUncachedField) {
1873 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1874 __ mov(scratch, Operand(stamp));
1875 __ ldr(scratch, MemOperand(scratch));
1876 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1877 __ cmp(scratch, scratch0());
1879 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1880 kPointerSize * index->value()));
1884 __ PrepareCallCFunction(2, scratch);
1885 __ mov(r1, Operand(index));
1886 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1892 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1894 String::Encoding encoding) {
1895 if (index->IsConstantOperand()) {
1896 int offset = ToInteger32(LConstantOperand::cast(index));
1897 if (encoding == String::TWO_BYTE_ENCODING) {
1898 offset *= kUC16Size;
1900 STATIC_ASSERT(kCharSize == 1);
1901 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1903 Register scratch = scratch0();
1904 ASSERT(!scratch.is(string));
1905 ASSERT(!scratch.is(ToRegister(index)));
1906 if (encoding == String::ONE_BYTE_ENCODING) {
1907 __ add(scratch, string, Operand(ToRegister(index)));
1909 STATIC_ASSERT(kUC16Size == 2);
1910 __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
1912 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1916 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1917 String::Encoding encoding = instr->hydrogen()->encoding();
1918 Register string = ToRegister(instr->string());
1919 Register result = ToRegister(instr->result());
1921 if (FLAG_debug_code) {
1922 Register scratch = scratch0();
1923 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1924 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1926 __ and_(scratch, scratch,
1927 Operand(kStringRepresentationMask | kStringEncodingMask));
1928 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1929 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1930 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1931 ? one_byte_seq_type : two_byte_seq_type));
1932 __ Check(eq, kUnexpectedStringType);
1935 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1936 if (encoding == String::ONE_BYTE_ENCODING) {
1937 __ ldrb(result, operand);
1939 __ ldrh(result, operand);
1944 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1945 String::Encoding encoding = instr->hydrogen()->encoding();
1946 Register string = ToRegister(instr->string());
1947 Register value = ToRegister(instr->value());
1949 if (FLAG_debug_code) {
1950 Register index = ToRegister(instr->index());
1951 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1952 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1954 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1955 ? one_byte_seq_type : two_byte_seq_type;
1956 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1959 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1960 if (encoding == String::ONE_BYTE_ENCODING) {
1961 __ strb(value, operand);
1963 __ strh(value, operand);
1968 void LCodeGen::DoAddI(LAddI* instr) {
1969 LOperand* left = instr->left();
1970 LOperand* right = instr->right();
1971 LOperand* result = instr->result();
1972 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1973 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1975 if (right->IsStackSlot()) {
1976 Register right_reg = EmitLoadRegister(right, ip);
1977 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1979 ASSERT(right->IsRegister() || right->IsConstantOperand());
1980 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1984 DeoptimizeIf(vs, instr->environment());
1989 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1990 LOperand* left = instr->left();
1991 LOperand* right = instr->right();
1992 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1993 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1994 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1995 Register left_reg = ToRegister(left);
1996 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1998 : Operand(EmitLoadRegister(right, ip));
1999 Register result_reg = ToRegister(instr->result());
2000 __ cmp(left_reg, right_op);
2001 __ Move(result_reg, left_reg, condition);
2002 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
2004 ASSERT(instr->hydrogen()->representation().IsDouble());
2005 DwVfpRegister left_reg = ToDoubleRegister(left);
2006 DwVfpRegister right_reg = ToDoubleRegister(right);
2007 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
2008 Label result_is_nan, return_left, return_right, check_zero, done;
2009 __ VFPCompareAndSetFlags(left_reg, right_reg);
2010 if (operation == HMathMinMax::kMathMin) {
2011 __ b(mi, &return_left);
2012 __ b(gt, &return_right);
2014 __ b(mi, &return_right);
2015 __ b(gt, &return_left);
2017 __ b(vs, &result_is_nan);
2018 // Left equals right => check for -0.
2019 __ VFPCompareAndSetFlags(left_reg, 0.0);
2020 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2021 __ b(ne, &done); // left == right != 0.
2023 __ b(ne, &return_left); // left == right != 0.
2025 // At this point, both left and right are either 0 or -0.
2026 if (operation == HMathMinMax::kMathMin) {
2027 // We could use a single 'vorr' instruction here if we had NEON support.
2028 __ vneg(left_reg, left_reg);
2029 __ vsub(result_reg, left_reg, right_reg);
2030 __ vneg(result_reg, result_reg);
2032 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2033 // the decision for vadd is easy because vand is a NEON instruction.
2034 __ vadd(result_reg, left_reg, right_reg);
2038 __ bind(&result_is_nan);
2039 __ vadd(result_reg, left_reg, right_reg);
2042 __ bind(&return_right);
2043 __ Move(result_reg, right_reg);
2044 if (!left_reg.is(result_reg)) {
2048 __ bind(&return_left);
2049 __ Move(result_reg, left_reg);
2056 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2057 DwVfpRegister left = ToDoubleRegister(instr->left());
2058 DwVfpRegister right = ToDoubleRegister(instr->right());
2059 DwVfpRegister result = ToDoubleRegister(instr->result());
2060 switch (instr->op()) {
2062 __ vadd(result, left, right);
2065 __ vsub(result, left, right);
2068 __ vmul(result, left, right);
2071 __ vdiv(result, left, right);
2074 __ PrepareCallCFunction(0, 2, scratch0());
2075 __ MovToFloatParameters(left, right);
2077 ExternalReference::mod_two_doubles_operation(isolate()),
2079 // Move the result in the double result register.
2080 __ MovFromFloatResult(result);
2090 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2091 ASSERT(ToRegister(instr->context()).is(cp));
2092 ASSERT(ToRegister(instr->left()).is(r1));
2093 ASSERT(ToRegister(instr->right()).is(r0));
2094 ASSERT(ToRegister(instr->result()).is(r0));
2096 BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
2097 // Block literal pool emission to ensure nop indicating no inlined smi code
2098 // is in the correct position.
2099 Assembler::BlockConstPoolScope block_const_pool(masm());
2100 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2104 template<class InstrType>
2105 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2106 int left_block = instr->TrueDestination(chunk_);
2107 int right_block = instr->FalseDestination(chunk_);
2109 int next_block = GetNextEmittedBlock();
2111 if (right_block == left_block || condition == al) {
2112 EmitGoto(left_block);
2113 } else if (left_block == next_block) {
2114 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
2115 } else if (right_block == next_block) {
2116 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2118 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2119 __ b(chunk_->GetAssemblyLabel(right_block));
2124 template<class InstrType>
2125 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2126 int false_block = instr->FalseDestination(chunk_);
2127 __ b(condition, chunk_->GetAssemblyLabel(false_block));
2131 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2136 void LCodeGen::DoBranch(LBranch* instr) {
2137 Representation r = instr->hydrogen()->value()->representation();
2138 if (r.IsInteger32() || r.IsSmi()) {
2139 ASSERT(!info()->IsStub());
2140 Register reg = ToRegister(instr->value());
2141 __ cmp(reg, Operand::Zero());
2142 EmitBranch(instr, ne);
2143 } else if (r.IsDouble()) {
2144 ASSERT(!info()->IsStub());
2145 DwVfpRegister reg = ToDoubleRegister(instr->value());
2146 // Test the double value. Zero and NaN are false.
2147 __ VFPCompareAndSetFlags(reg, 0.0);
2148 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
2149 EmitBranch(instr, ne);
2151 ASSERT(r.IsTagged());
2152 Register reg = ToRegister(instr->value());
2153 HType type = instr->hydrogen()->value()->type();
2154 if (type.IsBoolean()) {
2155 ASSERT(!info()->IsStub());
2156 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2157 EmitBranch(instr, eq);
2158 } else if (type.IsSmi()) {
2159 ASSERT(!info()->IsStub());
2160 __ cmp(reg, Operand::Zero());
2161 EmitBranch(instr, ne);
2162 } else if (type.IsJSArray()) {
2163 ASSERT(!info()->IsStub());
2164 EmitBranch(instr, al);
2165 } else if (type.IsHeapNumber()) {
2166 ASSERT(!info()->IsStub());
2167 DwVfpRegister dbl_scratch = double_scratch0();
2168 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2169 // Test the double value. Zero and NaN are false.
2170 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2171 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
2172 EmitBranch(instr, ne);
2173 } else if (type.IsString()) {
2174 ASSERT(!info()->IsStub());
2175 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2176 __ cmp(ip, Operand::Zero());
2177 EmitBranch(instr, ne);
2179 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2180 // Avoid deopts in the case where we've never executed this path before.
2181 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2183 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2184 // undefined -> false.
2185 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2186 __ b(eq, instr->FalseLabel(chunk_));
2188 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2189 // Boolean -> its value.
2190 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2191 __ b(eq, instr->TrueLabel(chunk_));
2192 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2193 __ b(eq, instr->FalseLabel(chunk_));
2195 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2197 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2198 __ b(eq, instr->FalseLabel(chunk_));
2201 if (expected.Contains(ToBooleanStub::SMI)) {
2202 // Smis: 0 -> false, all other -> true.
2203 __ cmp(reg, Operand::Zero());
2204 __ b(eq, instr->FalseLabel(chunk_));
2205 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2206 } else if (expected.NeedsMap()) {
2207 // If we need a map later and have a Smi -> deopt.
2209 DeoptimizeIf(eq, instr->environment());
2212 const Register map = scratch0();
2213 if (expected.NeedsMap()) {
2214 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2216 if (expected.CanBeUndetectable()) {
2217 // Undetectable -> false.
2218 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2219 __ tst(ip, Operand(1 << Map::kIsUndetectable));
2220 __ b(ne, instr->FalseLabel(chunk_));
2224 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2225 // spec object -> true.
2226 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2227 __ b(ge, instr->TrueLabel(chunk_));
2230 if (expected.Contains(ToBooleanStub::STRING)) {
2231 // String value -> false iff empty.
2233 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2234 __ b(ge, ¬_string);
2235 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2236 __ cmp(ip, Operand::Zero());
2237 __ b(ne, instr->TrueLabel(chunk_));
2238 __ b(instr->FalseLabel(chunk_));
2239 __ bind(¬_string);
2242 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2243 // Symbol value -> true.
2244 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2245 __ b(eq, instr->TrueLabel(chunk_));
2248 if (expected.Contains(ToBooleanStub::FLOAT32x4)) {
2249 // Float32x4 value -> true.
2250 __ CompareInstanceType(map, ip, FLOAT32x4_TYPE);
2251 __ b(eq, instr->TrueLabel(chunk_));
2254 if (expected.Contains(ToBooleanStub::INT32x4)) {
2255 // Int32x4 value -> true.
2256 __ CompareInstanceType(map, ip, INT32x4_TYPE);
2257 __ b(eq, instr->TrueLabel(chunk_));
2260 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2261 // heap number -> false iff +0, -0, or NaN.
2262 DwVfpRegister dbl_scratch = double_scratch0();
2263 Label not_heap_number;
2264 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2265 __ b(ne, ¬_heap_number);
2266 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2267 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2268 __ cmp(r0, r0, vs); // NaN -> false.
2269 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
2270 __ b(instr->TrueLabel(chunk_));
2271 __ bind(¬_heap_number);
2274 if (!expected.IsGeneric()) {
2275 // We've seen something for the first time -> deopt.
2276 // This can only happen if we are not generic already.
2277 DeoptimizeIf(al, instr->environment());
2284 void LCodeGen::EmitGoto(int block) {
2285 if (!IsNextEmittedBlock(block)) {
2286 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2291 void LCodeGen::DoGoto(LGoto* instr) {
2292 EmitGoto(instr->block_id());
2296 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2297 Condition cond = kNoCondition;
2300 case Token::EQ_STRICT:
2304 case Token::NE_STRICT:
2308 cond = is_unsigned ? lo : lt;
2311 cond = is_unsigned ? hi : gt;
2314 cond = is_unsigned ? ls : le;
2317 cond = is_unsigned ? hs : ge;
2320 case Token::INSTANCEOF:
2328 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2329 LOperand* left = instr->left();
2330 LOperand* right = instr->right();
2331 Condition cond = TokenToCondition(instr->op(), false);
2333 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2334 // We can statically evaluate the comparison.
2335 double left_val = ToDouble(LConstantOperand::cast(left));
2336 double right_val = ToDouble(LConstantOperand::cast(right));
2337 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2338 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2339 EmitGoto(next_block);
2341 if (instr->is_double()) {
2342 // Compare left and right operands as doubles and load the
2343 // resulting flags into the normal status register.
2344 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2345 // If a NaN is involved, i.e. the result is unordered (V set),
2346 // jump to false block label.
2347 __ b(vs, instr->FalseLabel(chunk_));
2349 if (right->IsConstantOperand()) {
2350 int32_t value = ToInteger32(LConstantOperand::cast(right));
2351 if (instr->hydrogen_value()->representation().IsSmi()) {
2352 __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2354 __ cmp(ToRegister(left), Operand(value));
2356 } else if (left->IsConstantOperand()) {
2357 int32_t value = ToInteger32(LConstantOperand::cast(left));
2358 if (instr->hydrogen_value()->representation().IsSmi()) {
2359 __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2361 __ cmp(ToRegister(right), Operand(value));
2363 // We transposed the operands. Reverse the condition.
2364 cond = ReverseCondition(cond);
2366 __ cmp(ToRegister(left), ToRegister(right));
2369 EmitBranch(instr, cond);
2374 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2375 Register left = ToRegister(instr->left());
2376 Register right = ToRegister(instr->right());
2378 __ cmp(left, Operand(right));
2379 EmitBranch(instr, eq);
2383 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2384 if (instr->hydrogen()->representation().IsTagged()) {
2385 Register input_reg = ToRegister(instr->object());
2386 __ mov(ip, Operand(factory()->the_hole_value()));
2387 __ cmp(input_reg, ip);
2388 EmitBranch(instr, eq);
2392 DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2393 __ VFPCompareAndSetFlags(input_reg, input_reg);
2394 EmitFalseBranch(instr, vc);
2396 Register scratch = scratch0();
2397 __ VmovHigh(scratch, input_reg);
2398 __ cmp(scratch, Operand(kHoleNanUpper32));
2399 EmitBranch(instr, eq);
2403 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2404 Representation rep = instr->hydrogen()->value()->representation();
2405 ASSERT(!rep.IsInteger32());
2406 Register scratch = ToRegister(instr->temp());
2408 if (rep.IsDouble()) {
2409 DwVfpRegister value = ToDoubleRegister(instr->value());
2410 __ VFPCompareAndSetFlags(value, 0.0);
2411 EmitFalseBranch(instr, ne);
2412 __ VmovHigh(scratch, value);
2413 __ cmp(scratch, Operand(0x80000000));
2415 Register value = ToRegister(instr->value());
2418 Heap::kHeapNumberMapRootIndex,
2419 instr->FalseLabel(chunk()),
2421 __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2422 __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2423 __ cmp(scratch, Operand(0x80000000));
2424 __ cmp(ip, Operand(0x00000000), eq);
2426 EmitBranch(instr, eq);
2430 Condition LCodeGen::EmitIsObject(Register input,
2432 Label* is_not_object,
2434 Register temp2 = scratch0();
2435 __ JumpIfSmi(input, is_not_object);
2437 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2438 __ cmp(input, temp2);
2439 __ b(eq, is_object);
2442 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2443 // Undetectable objects behave like undefined.
2444 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2445 __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2446 __ b(ne, is_not_object);
2448 // Load instance type and check that it is in object type range.
2449 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2450 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2451 __ b(lt, is_not_object);
2452 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2457 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2458 Register reg = ToRegister(instr->value());
2459 Register temp1 = ToRegister(instr->temp());
2461 Condition true_cond =
2462 EmitIsObject(reg, temp1,
2463 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2465 EmitBranch(instr, true_cond);
2469 Condition LCodeGen::EmitIsString(Register input,
2471 Label* is_not_string,
2472 SmiCheck check_needed = INLINE_SMI_CHECK) {
2473 if (check_needed == INLINE_SMI_CHECK) {
2474 __ JumpIfSmi(input, is_not_string);
2476 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2482 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2483 Register reg = ToRegister(instr->value());
2484 Register temp1 = ToRegister(instr->temp());
2486 SmiCheck check_needed =
2487 instr->hydrogen()->value()->IsHeapObject()
2488 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2489 Condition true_cond =
2490 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2492 EmitBranch(instr, true_cond);
2496 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2497 Register input_reg = EmitLoadRegister(instr->value(), ip);
2498 __ SmiTst(input_reg);
2499 EmitBranch(instr, eq);
2503 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2504 Register input = ToRegister(instr->value());
2505 Register temp = ToRegister(instr->temp());
2507 if (!instr->hydrogen()->value()->IsHeapObject()) {
2508 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2510 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2511 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2512 __ tst(temp, Operand(1 << Map::kIsUndetectable));
2513 EmitBranch(instr, ne);
2517 static Condition ComputeCompareCondition(Token::Value op) {
2519 case Token::EQ_STRICT:
2532 return kNoCondition;
2537 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2538 ASSERT(ToRegister(instr->context()).is(cp));
2539 Token::Value op = instr->op();
2541 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2542 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2543 // This instruction also signals no smi code inlined.
2544 __ cmp(r0, Operand::Zero());
2546 Condition condition = ComputeCompareCondition(op);
2548 EmitBranch(instr, condition);
2552 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2553 InstanceType from = instr->from();
2554 InstanceType to = instr->to();
2555 if (from == FIRST_TYPE) return to;
2556 ASSERT(from == to || to == LAST_TYPE);
2561 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2562 InstanceType from = instr->from();
2563 InstanceType to = instr->to();
2564 if (from == to) return eq;
2565 if (to == LAST_TYPE) return hs;
2566 if (from == FIRST_TYPE) return ls;
2572 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2573 Register scratch = scratch0();
2574 Register input = ToRegister(instr->value());
2576 if (!instr->hydrogen()->value()->IsHeapObject()) {
2577 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2580 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2581 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2585 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2586 Register input = ToRegister(instr->value());
2587 Register result = ToRegister(instr->result());
2589 __ AssertString(input);
2591 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2592 __ IndexFromHash(result, result);
2596 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2597 LHasCachedArrayIndexAndBranch* instr) {
2598 Register input = ToRegister(instr->value());
2599 Register scratch = scratch0();
2602 FieldMemOperand(input, String::kHashFieldOffset));
2603 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2604 EmitBranch(instr, eq);
2608 // Branches to a label or falls through with the answer in flags. Trashes
2609 // the temp registers, but not the input.
2610 void LCodeGen::EmitClassOfTest(Label* is_true,
2612 Handle<String>class_name,
2616 ASSERT(!input.is(temp));
2617 ASSERT(!input.is(temp2));
2618 ASSERT(!temp.is(temp2));
2620 __ JumpIfSmi(input, is_false);
2622 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2623 // Assuming the following assertions, we can use the same compares to test
2624 // for both being a function type and being in the object type range.
2625 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2626 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2627 FIRST_SPEC_OBJECT_TYPE + 1);
2628 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2629 LAST_SPEC_OBJECT_TYPE - 1);
2630 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2631 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2634 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2637 // Faster code path to avoid two compares: subtract lower bound from the
2638 // actual type and do a signed compare with the width of the type range.
2639 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2640 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2641 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2642 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2643 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2647 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2648 // Check if the constructor in the map is a function.
2649 __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2651 // Objects with a non-function constructor have class 'Object'.
2652 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2653 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2659 // temp now contains the constructor function. Grab the
2660 // instance class name from there.
2661 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2662 __ ldr(temp, FieldMemOperand(temp,
2663 SharedFunctionInfo::kInstanceClassNameOffset));
2664 // The class name we are testing against is internalized since it's a literal.
2665 // The name in the constructor is internalized because of the way the context
2666 // is booted. This routine isn't expected to work for random API-created
2667 // classes and it doesn't have to because you can't access it with natives
2668 // syntax. Since both sides are internalized it is sufficient to use an
2669 // identity comparison.
2670 __ cmp(temp, Operand(class_name));
2671 // End with the answer in flags.
2675 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2676 Register input = ToRegister(instr->value());
2677 Register temp = scratch0();
2678 Register temp2 = ToRegister(instr->temp());
2679 Handle<String> class_name = instr->hydrogen()->class_name();
2681 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2682 class_name, input, temp, temp2);
2684 EmitBranch(instr, eq);
2688 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2689 Register reg = ToRegister(instr->value());
2690 Register temp = ToRegister(instr->temp());
2692 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2693 __ cmp(temp, Operand(instr->map()));
2694 EmitBranch(instr, eq);
2698 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2699 ASSERT(ToRegister(instr->context()).is(cp));
2700 ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
2701 ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
2703 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2704 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2706 __ cmp(r0, Operand::Zero());
2707 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2708 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2712 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2713 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2715 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2716 LInstanceOfKnownGlobal* instr)
2717 : LDeferredCode(codegen), instr_(instr) { }
2718 virtual void Generate() V8_OVERRIDE {
2719 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2721 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2722 Label* map_check() { return &map_check_; }
2724 LInstanceOfKnownGlobal* instr_;
2728 DeferredInstanceOfKnownGlobal* deferred;
2729 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2731 Label done, false_result;
2732 Register object = ToRegister(instr->value());
2733 Register temp = ToRegister(instr->temp());
2734 Register result = ToRegister(instr->result());
2736 // A Smi is not instance of anything.
2737 __ JumpIfSmi(object, &false_result);
2739 // This is the inlined call site instanceof cache. The two occurences of the
2740 // hole value will be patched to the last map/result pair generated by the
2743 Register map = temp;
2744 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2746 // Block constant pool emission to ensure the positions of instructions are
2747 // as expected by the patcher. See InstanceofStub::Generate().
2748 Assembler::BlockConstPoolScope block_const_pool(masm());
2749 __ bind(deferred->map_check()); // Label for calculating code patching.
2750 // We use Factory::the_hole_value() on purpose instead of loading from the
2751 // root array to force relocation to be able to later patch with
2753 PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
2754 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2755 __ mov(ip, Operand(Handle<Object>(cell)));
2756 __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
2757 __ cmp(map, Operand(ip));
2758 __ b(ne, &cache_miss);
2759 // We use Factory::the_hole_value() on purpose instead of loading from the
2760 // root array to force relocation to be able to later patch
2761 // with true or false.
2762 __ mov(result, Operand(factory()->the_hole_value()));
2766 // The inlined call site cache did not match. Check null and string before
2767 // calling the deferred code.
2768 __ bind(&cache_miss);
2769 // Null is not instance of anything.
2770 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2771 __ cmp(object, Operand(ip));
2772 __ b(eq, &false_result);
2774 // String values is not instance of anything.
2775 Condition is_string = masm_->IsObjectStringType(object, temp);
2776 __ b(is_string, &false_result);
2778 // Go to the deferred code.
2779 __ b(deferred->entry());
2781 __ bind(&false_result);
2782 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2784 // Here result has either true or false. Deferred code also produces true or
2786 __ bind(deferred->exit());
2791 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2793 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2794 flags = static_cast<InstanceofStub::Flags>(
2795 flags | InstanceofStub::kArgsInRegisters);
2796 flags = static_cast<InstanceofStub::Flags>(
2797 flags | InstanceofStub::kCallSiteInlineCheck);
2798 flags = static_cast<InstanceofStub::Flags>(
2799 flags | InstanceofStub::kReturnTrueFalseObject);
2800 InstanceofStub stub(flags);
2802 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2803 LoadContextFromDeferred(instr->context());
2805 __ Move(InstanceofStub::right(), instr->function());
2806 static const int kAdditionalDelta = 4;
2807 // Make sure that code size is predicable, since we use specific constants
2808 // offsets in the code to find embedded values..
2809 PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
2810 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2811 Label before_push_delta;
2812 __ bind(&before_push_delta);
2813 __ BlockConstPoolFor(kAdditionalDelta);
2814 // r5 is used to communicate the offset to the location of the map check.
2815 __ mov(r5, Operand(delta * kPointerSize));
2816 // The mov above can generate one or two instructions. The delta was computed
2817 // for two instructions, so we need to pad here in case of one instruction.
2818 if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
2819 ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
2822 CallCodeGeneric(stub.GetCode(isolate()),
2823 RelocInfo::CODE_TARGET,
2825 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2826 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2827 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2828 // Put the result value (r0) into the result register slot and
2829 // restore all registers.
2830 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
2834 void LCodeGen::DoCmpT(LCmpT* instr) {
2835 ASSERT(ToRegister(instr->context()).is(cp));
2836 Token::Value op = instr->op();
2838 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2839 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2840 // This instruction also signals no smi code inlined.
2841 __ cmp(r0, Operand::Zero());
2843 Condition condition = ComputeCompareCondition(op);
2844 __ LoadRoot(ToRegister(instr->result()),
2845 Heap::kTrueValueRootIndex,
2847 __ LoadRoot(ToRegister(instr->result()),
2848 Heap::kFalseValueRootIndex,
2849 NegateCondition(condition));
2853 void LCodeGen::DoReturn(LReturn* instr) {
2854 if (FLAG_trace && info()->IsOptimizing()) {
2855 // Push the return value on the stack as the parameter.
2856 // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2857 // managed by the register allocator and tearing down the frame, it's
2858 // safe to write to the context register.
2860 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2861 __ CallRuntime(Runtime::kTraceExit, 1);
2863 if (info()->saves_caller_doubles()) {
2864 RestoreCallerDoubles();
2866 int no_frame_start = -1;
2867 if (NeedsEagerFrame()) {
2868 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2870 if (instr->has_constant_parameter_count()) {
2871 int parameter_count = ToInteger32(instr->constant_parameter_count());
2872 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2873 if (sp_delta != 0) {
2874 __ add(sp, sp, Operand(sp_delta));
2877 Register reg = ToRegister(instr->parameter_count());
2878 // The argument count parameter is a smi
2880 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2885 if (no_frame_start != -1) {
2886 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2891 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2892 Register result = ToRegister(instr->result());
2893 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2894 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
2895 if (instr->hydrogen()->RequiresHoleCheck()) {
2896 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2898 DeoptimizeIf(eq, instr->environment());
2903 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2904 ASSERT(ToRegister(instr->context()).is(cp));
2905 ASSERT(ToRegister(instr->global_object()).is(r0));
2906 ASSERT(ToRegister(instr->result()).is(r0));
2908 __ mov(r2, Operand(instr->name()));
2909 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2910 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2911 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2915 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2916 Register value = ToRegister(instr->value());
2917 Register cell = scratch0();
2920 __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
2922 // If the cell we are storing to contains the hole it could have
2923 // been deleted from the property dictionary. In that case, we need
2924 // to update the property details in the property dictionary to mark
2925 // it as no longer deleted.
2926 if (instr->hydrogen()->RequiresHoleCheck()) {
2927 // We use a temp to check the payload (CompareRoot might clobber ip).
2928 Register payload = ToRegister(instr->temp());
2929 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
2930 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2931 DeoptimizeIf(eq, instr->environment());
2935 __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
2936 // Cells are always rescanned, so no write barrier here.
2940 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2941 Register context = ToRegister(instr->context());
2942 Register result = ToRegister(instr->result());
2943 __ ldr(result, ContextOperand(context, instr->slot_index()));
2944 if (instr->hydrogen()->RequiresHoleCheck()) {
2945 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2947 if (instr->hydrogen()->DeoptimizesOnHole()) {
2948 DeoptimizeIf(eq, instr->environment());
2950 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
2956 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2957 Register context = ToRegister(instr->context());
2958 Register value = ToRegister(instr->value());
2959 Register scratch = scratch0();
2960 MemOperand target = ContextOperand(context, instr->slot_index());
2962 Label skip_assignment;
2964 if (instr->hydrogen()->RequiresHoleCheck()) {
2965 __ ldr(scratch, target);
2966 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2967 __ cmp(scratch, ip);
2968 if (instr->hydrogen()->DeoptimizesOnHole()) {
2969 DeoptimizeIf(eq, instr->environment());
2971 __ b(ne, &skip_assignment);
2975 __ str(value, target);
2976 if (instr->hydrogen()->NeedsWriteBarrier()) {
2977 SmiCheck check_needed =
2978 instr->hydrogen()->value()->IsHeapObject()
2979 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2980 __ RecordWriteContextSlot(context,
2984 GetLinkRegisterState(),
2986 EMIT_REMEMBERED_SET,
2990 __ bind(&skip_assignment);
2994 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2995 HObjectAccess access = instr->hydrogen()->access();
2996 int offset = access.offset();
2997 Register object = ToRegister(instr->object());
2999 if (access.IsExternalMemory()) {
3000 Register result = ToRegister(instr->result());
3001 MemOperand operand = MemOperand(object, offset);
3002 __ Load(result, operand, access.representation());
3006 if (instr->hydrogen()->representation().IsDouble()) {
3007 DwVfpRegister result = ToDoubleRegister(instr->result());
3008 __ vldr(result, FieldMemOperand(object, offset));
3012 Register result = ToRegister(instr->result());
3013 if (!access.IsInobject()) {
3014 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3017 MemOperand operand = FieldMemOperand(object, offset);
3018 __ Load(result, operand, access.representation());
3022 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3023 ASSERT(ToRegister(instr->context()).is(cp));
3024 ASSERT(ToRegister(instr->object()).is(r0));
3025 ASSERT(ToRegister(instr->result()).is(r0));
3027 // Name is always in r2.
3028 __ mov(r2, Operand(instr->name()));
3029 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3030 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3034 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3035 Register scratch = scratch0();
3036 Register function = ToRegister(instr->function());
3037 Register result = ToRegister(instr->result());
3039 // Check that the function really is a function. Load map into the
3041 __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
3042 DeoptimizeIf(ne, instr->environment());
3044 // Make sure that the function has an instance prototype.
3046 __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3047 __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
3048 __ b(ne, &non_instance);
3050 // Get the prototype or initial map from the function.
3052 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3054 // Check that the function has a prototype or an initial map.
3055 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3057 DeoptimizeIf(eq, instr->environment());
3059 // If the function does not have an initial map, we're done.
3061 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3064 // Get the prototype from the initial map.
3065 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3068 // Non-instance prototype: Fetch prototype from constructor field
3070 __ bind(&non_instance);
3071 __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3078 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3079 Register result = ToRegister(instr->result());
3080 __ LoadRoot(result, instr->index());
3084 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3085 Register arguments = ToRegister(instr->arguments());
3086 Register result = ToRegister(instr->result());
3087 // There are two words between the frame pointer and the last argument.
3088 // Subtracting from length accounts for one of them add one more.
3089 if (instr->length()->IsConstantOperand()) {
3090 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3091 if (instr->index()->IsConstantOperand()) {
3092 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3093 int index = (const_length - const_index) + 1;
3094 __ ldr(result, MemOperand(arguments, index * kPointerSize));
3096 Register index = ToRegister(instr->index());
3097 __ rsb(result, index, Operand(const_length + 1));
3098 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3100 } else if (instr->index()->IsConstantOperand()) {
3101 Register length = ToRegister(instr->length());
3102 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3103 int loc = const_index - 1;
3105 __ sub(result, length, Operand(loc));
3106 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3108 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3111 Register length = ToRegister(instr->length());
3112 Register index = ToRegister(instr->index());
3113 __ sub(result, length, index);
3114 __ add(result, result, Operand(1));
3115 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3120 void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
3121 Runtime::FunctionId id) {
3122 // TODO(3095996): Get rid of this. For now, we need to make the
3123 // result register contain a valid pointer because it is already
3124 // contained in the register pointer map.
3125 Register reg = ToRegister(instr->result());
3126 __ mov(reg, Operand::Zero());
3128 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3129 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3130 __ CallRuntimeSaveDoubles(id);
3131 RecordSafepointWithRegisters(
3132 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
3133 __ sub(r0, r0, Operand(kHeapObjectTag));
3134 __ StoreToSafepointRegisterSlot(r0, reg);
3139 void LCodeGen::DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr) {
3140 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
3142 DeferredSIMD128ToTagged(LCodeGen* codegen, LInstruction* instr,
3143 Runtime::FunctionId id)
3144 : LDeferredCode(codegen), instr_(instr), id_(id) { }
3145 virtual void Generate() V8_OVERRIDE {
3146 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
3148 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3150 LInstruction* instr_;
3151 Runtime::FunctionId id_;
3154 // Allocate a SIMD128 object on the heap.
3155 Register reg = ToRegister(instr->result());
3156 Register temp = ToRegister(instr->temp());
3157 Register temp2 = ToRegister(instr->temp2());
3158 Register scratch = scratch0();
3160 DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
3161 this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
3162 if (FLAG_inline_new) {
3163 __ LoadRoot(scratch, static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
3164 __ AllocateSIMDHeapObject(T::kSize, reg, temp, temp2, scratch,
3165 deferred->entry(), DONT_TAG_RESULT);
3167 __ jmp(deferred->entry());
3169 __ bind(deferred->exit());
3171 // Copy the SIMD128 value from the external array to the heap object.
3172 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
3173 Register external_pointer = ToRegister(instr->elements());
3174 Register key = no_reg;
3175 ElementsKind elements_kind = instr->elements_kind();
3176 bool key_is_constant = instr->key()->IsConstantOperand();
3177 int constant_key = 0;
3178 if (key_is_constant) {
3179 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3180 if (constant_key & 0xF0000000) {
3181 Abort(kArrayIndexConstantValueTooBig);
3184 key = ToRegister(instr->key());
3186 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3187 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3188 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3189 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
3190 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
3193 (instr->additional_index() << element_size_shift) + additional_offset;
3194 Operand operand = key_is_constant
3195 ? Operand(constant_key << element_size_shift)
3196 : Operand(key, LSL, shift_size);
3198 __ add(scratch, external_pointer, operand);
3199 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
3200 __ ldr(temp, MemOperand(scratch, base_offset + offset));
3201 __ str(temp, MemOperand(reg, T::kValueOffset + offset));
3204 // Now that we have finished with the object's real address tag it
3205 __ add(reg, reg, Operand(kHeapObjectTag));
3209 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3210 Register external_pointer = ToRegister(instr->elements());
3211 Register key = no_reg;
3212 ElementsKind elements_kind = instr->elements_kind();
3213 bool key_is_constant = instr->key()->IsConstantOperand();
3214 int constant_key = 0;
3215 if (key_is_constant) {
3216 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3217 if (constant_key & 0xF0000000) {
3218 Abort(kArrayIndexConstantValueTooBig);
3221 key = ToRegister(instr->key());
3223 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3224 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3225 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3226 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
3227 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
3231 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3232 elements_kind == FLOAT32_ELEMENTS ||
3233 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3234 elements_kind == FLOAT64_ELEMENTS) {
3236 (instr->additional_index() << element_size_shift) + additional_offset;
3237 DwVfpRegister result = ToDoubleRegister(instr->result());
3238 Operand operand = key_is_constant
3239 ? Operand(constant_key << element_size_shift)
3240 : Operand(key, LSL, shift_size);
3241 __ add(scratch0(), external_pointer, operand);
3242 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3243 elements_kind == FLOAT32_ELEMENTS) {
3244 __ vldr(double_scratch0().low(), scratch0(), base_offset);
3245 __ vcvt_f64_f32(result, double_scratch0().low());
3246 } else { // loading doubles, not floats.
3247 __ vldr(result, scratch0(), base_offset);
3249 } else if (IsFloat32x4ElementsKind(elements_kind)) {
3250 DoLoadKeyedSIMD128ExternalArray<Float32x4>(instr);
3251 } else if (IsInt32x4ElementsKind(elements_kind)) {
3252 DoLoadKeyedSIMD128ExternalArray<Int32x4>(instr);
3254 Register result = ToRegister(instr->result());
3255 MemOperand mem_operand = PrepareKeyedOperand(
3256 key, external_pointer, key_is_constant, constant_key,
3257 element_size_shift, shift_size,
3258 instr->additional_index(), additional_offset);
3259 switch (elements_kind) {
3260 case EXTERNAL_INT8_ELEMENTS:
3262 __ ldrsb(result, mem_operand);
3264 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3265 case EXTERNAL_UINT8_ELEMENTS:
3266 case UINT8_ELEMENTS:
3267 case UINT8_CLAMPED_ELEMENTS:
3268 __ ldrb(result, mem_operand);
3270 case EXTERNAL_INT16_ELEMENTS:
3271 case INT16_ELEMENTS:
3272 __ ldrsh(result, mem_operand);
3274 case EXTERNAL_UINT16_ELEMENTS:
3275 case UINT16_ELEMENTS:
3276 __ ldrh(result, mem_operand);
3278 case EXTERNAL_INT32_ELEMENTS:
3279 case INT32_ELEMENTS:
3280 __ ldr(result, mem_operand);
3282 case EXTERNAL_UINT32_ELEMENTS:
3283 case UINT32_ELEMENTS:
3284 __ ldr(result, mem_operand);
3285 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3286 __ cmp(result, Operand(0x80000000));
3287 DeoptimizeIf(cs, instr->environment());
3290 case FLOAT32_ELEMENTS:
3291 case FLOAT64_ELEMENTS:
3292 case EXTERNAL_FLOAT32_ELEMENTS:
3293 case EXTERNAL_FLOAT64_ELEMENTS:
3294 case FLOAT32x4_ELEMENTS:
3295 case INT32x4_ELEMENTS:
3296 case EXTERNAL_FLOAT32x4_ELEMENTS:
3297 case EXTERNAL_INT32x4_ELEMENTS:
3298 case FAST_HOLEY_DOUBLE_ELEMENTS:
3299 case FAST_HOLEY_ELEMENTS:
3300 case FAST_HOLEY_SMI_ELEMENTS:
3301 case FAST_DOUBLE_ELEMENTS:
3303 case FAST_SMI_ELEMENTS:
3304 case DICTIONARY_ELEMENTS:
3305 case SLOPPY_ARGUMENTS_ELEMENTS:
3313 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3314 Register elements = ToRegister(instr->elements());
3315 bool key_is_constant = instr->key()->IsConstantOperand();
3316 Register key = no_reg;
3317 DwVfpRegister result = ToDoubleRegister(instr->result());
3318 Register scratch = scratch0();
3320 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3323 FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3324 (instr->additional_index() << element_size_shift);
3325 if (key_is_constant) {
3326 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3327 if (constant_key & 0xF0000000) {
3328 Abort(kArrayIndexConstantValueTooBig);
3330 base_offset += constant_key << element_size_shift;
3332 __ add(scratch, elements, Operand(base_offset));
3334 if (!key_is_constant) {
3335 key = ToRegister(instr->key());
3336 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3337 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3338 __ add(scratch, scratch, Operand(key, LSL, shift_size));
3341 __ vldr(result, scratch, 0);
3343 if (instr->hydrogen()->RequiresHoleCheck()) {
3344 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3345 __ cmp(scratch, Operand(kHoleNanUpper32));
3346 DeoptimizeIf(eq, instr->environment());
3351 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3352 Register elements = ToRegister(instr->elements());
3353 Register result = ToRegister(instr->result());
3354 Register scratch = scratch0();
3355 Register store_base = scratch;
3358 if (instr->key()->IsConstantOperand()) {
3359 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3360 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3361 instr->additional_index());
3362 store_base = elements;
3364 Register key = ToRegister(instr->key());
3365 // Even though the HLoadKeyed instruction forces the input
3366 // representation for the key to be an integer, the input gets replaced
3367 // during bound check elimination with the index argument to the bounds
3368 // check, which can be tagged, so that case must be handled here, too.
3369 if (instr->hydrogen()->key()->representation().IsSmi()) {
3370 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
3372 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3374 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3376 __ ldr(result, FieldMemOperand(store_base, offset));
3378 // Check for the hole value.
3379 if (instr->hydrogen()->RequiresHoleCheck()) {
3380 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3382 DeoptimizeIf(ne, instr->environment());
3384 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3385 __ cmp(result, scratch);
3386 DeoptimizeIf(eq, instr->environment());
3392 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3393 if (instr->is_typed_elements()) {
3394 DoLoadKeyedExternalArray(instr);
3395 } else if (instr->hydrogen()->representation().IsDouble()) {
3396 DoLoadKeyedFixedDoubleArray(instr);
3398 DoLoadKeyedFixedArray(instr);
3403 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3405 bool key_is_constant,
3409 int additional_index,
3410 int additional_offset) {
3411 int base_offset = (additional_index << element_size) + additional_offset;
3412 if (key_is_constant) {
3413 return MemOperand(base,
3414 base_offset + (constant_key << element_size));
3417 if (additional_offset != 0) {
3418 __ mov(scratch0(), Operand(base_offset));
3419 if (shift_size >= 0) {
3420 __ add(scratch0(), scratch0(), Operand(key, LSL, shift_size));
3422 ASSERT_EQ(-1, shift_size);
3423 // key can be negative, so using ASR here.
3424 __ add(scratch0(), scratch0(), Operand(key, ASR, 1));
3426 return MemOperand(base, scratch0());
3429 if (additional_index != 0) {
3430 additional_index *= 1 << (element_size - shift_size);
3431 __ add(scratch0(), key, Operand(additional_index));
3434 if (additional_index == 0) {
3435 if (shift_size >= 0) {
3436 return MemOperand(base, key, LSL, shift_size);
3438 ASSERT_EQ(-1, shift_size);
3439 return MemOperand(base, key, LSR, 1);
3443 if (shift_size >= 0) {
3444 return MemOperand(base, scratch0(), LSL, shift_size);
3446 ASSERT_EQ(-1, shift_size);
3447 return MemOperand(base, scratch0(), LSR, 1);
3452 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3453 ASSERT(ToRegister(instr->context()).is(cp));
3454 ASSERT(ToRegister(instr->object()).is(r1));
3455 ASSERT(ToRegister(instr->key()).is(r0));
3457 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3458 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3462 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3463 Register scratch = scratch0();
3464 Register result = ToRegister(instr->result());
3466 if (instr->hydrogen()->from_inlined()) {
3467 __ sub(result, sp, Operand(2 * kPointerSize));
3469 // Check if the calling frame is an arguments adaptor frame.
3470 Label done, adapted;
3471 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3472 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3473 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3475 // Result is the frame pointer for the frame if not adapted and for the real
3476 // frame below the adaptor frame if adapted.
3477 __ mov(result, fp, LeaveCC, ne);
3478 __ mov(result, scratch, LeaveCC, eq);
3483 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3484 Register elem = ToRegister(instr->elements());
3485 Register result = ToRegister(instr->result());
3489 // If no arguments adaptor frame the number of arguments is fixed.
3491 __ mov(result, Operand(scope()->num_parameters()));
3494 // Arguments adaptor frame present. Get argument length from there.
3495 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3497 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3498 __ SmiUntag(result);
3500 // Argument length is in result register.
3505 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3506 Register receiver = ToRegister(instr->receiver());
3507 Register function = ToRegister(instr->function());
3508 Register result = ToRegister(instr->result());
3509 Register scratch = scratch0();
3511 // If the receiver is null or undefined, we have to pass the global
3512 // object as a receiver to normal functions. Values have to be
3513 // passed unchanged to builtins and strict-mode functions.
3514 Label global_object, result_in_receiver;
3516 if (!instr->hydrogen()->known_function()) {
3517 // Do not transform the receiver to object for strict mode
3520 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3522 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3523 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3524 __ tst(scratch, Operand(mask));
3525 __ b(ne, &result_in_receiver);
3527 // Do not transform the receiver to object for builtins.
3528 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3529 __ b(ne, &result_in_receiver);
3532 // Normal function. Replace undefined or null with global receiver.
3533 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3534 __ cmp(receiver, scratch);
3535 __ b(eq, &global_object);
3536 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3537 __ cmp(receiver, scratch);
3538 __ b(eq, &global_object);
3540 // Deoptimize if the receiver is not a JS object.
3541 __ SmiTst(receiver);
3542 DeoptimizeIf(eq, instr->environment());
3543 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3544 DeoptimizeIf(lt, instr->environment());
3546 __ b(&result_in_receiver);
3547 __ bind(&global_object);
3548 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
3550 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3552 FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
3554 if (result.is(receiver)) {
3555 __ bind(&result_in_receiver);
3559 __ bind(&result_in_receiver);
3560 __ mov(result, receiver);
3561 __ bind(&result_ok);
3566 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3567 Register receiver = ToRegister(instr->receiver());
3568 Register function = ToRegister(instr->function());
3569 Register length = ToRegister(instr->length());
3570 Register elements = ToRegister(instr->elements());
3571 Register scratch = scratch0();
3572 ASSERT(receiver.is(r0)); // Used for parameter count.
3573 ASSERT(function.is(r1)); // Required by InvokeFunction.
3574 ASSERT(ToRegister(instr->result()).is(r0));
3576 // Copy the arguments to this function possibly from the
3577 // adaptor frame below it.
3578 const uint32_t kArgumentsLimit = 1 * KB;
3579 __ cmp(length, Operand(kArgumentsLimit));
3580 DeoptimizeIf(hi, instr->environment());
3582 // Push the receiver and use the register to keep the original
3583 // number of arguments.
3585 __ mov(receiver, length);
3586 // The arguments are at a one pointer size offset from elements.
3587 __ add(elements, elements, Operand(1 * kPointerSize));
3589 // Loop through the arguments pushing them onto the execution
3592 // length is a small non-negative integer, due to the test above.
3593 __ cmp(length, Operand::Zero());
3596 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3598 __ sub(length, length, Operand(1), SetCC);
3602 ASSERT(instr->HasPointerMap());
3603 LPointerMap* pointers = instr->pointer_map();
3604 SafepointGenerator safepoint_generator(
3605 this, pointers, Safepoint::kLazyDeopt);
3606 // The number of arguments is stored in receiver which is r0, as expected
3607 // by InvokeFunction.
3608 ParameterCount actual(receiver);
3609 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3613 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3614 LOperand* argument = instr->value();
3615 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3616 Abort(kDoPushArgumentNotImplementedForDoubleType);
3618 Register argument_reg = EmitLoadRegister(argument, ip);
3619 __ push(argument_reg);
3624 void LCodeGen::DoDrop(LDrop* instr) {
3625 __ Drop(instr->count());
3629 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3630 Register result = ToRegister(instr->result());
3631 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3635 void LCodeGen::DoContext(LContext* instr) {
3636 // If there is a non-return use, the context must be moved to a register.
3637 Register result = ToRegister(instr->result());
3638 if (info()->IsOptimizing()) {
3639 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3641 // If there is no frame, the context must be in cp.
3642 ASSERT(result.is(cp));
3647 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3648 ASSERT(ToRegister(instr->context()).is(cp));
3649 __ push(cp); // The context is the first argument.
3650 __ Move(scratch0(), instr->hydrogen()->pairs());
3651 __ push(scratch0());
3652 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3653 __ push(scratch0());
3654 CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
3658 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3659 int formal_parameter_count,
3661 LInstruction* instr,
3663 bool dont_adapt_arguments =
3664 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3665 bool can_invoke_directly =
3666 dont_adapt_arguments || formal_parameter_count == arity;
3668 LPointerMap* pointers = instr->pointer_map();
3670 if (can_invoke_directly) {
3671 if (r1_state == R1_UNINITIALIZED) {
3672 __ Move(r1, function);
3676 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
3678 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3679 // is available to write to at this point.
3680 if (dont_adapt_arguments) {
3681 __ mov(r0, Operand(arity));
3685 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
3688 // Set up deoptimization.
3689 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3691 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3692 ParameterCount count(arity);
3693 ParameterCount expected(formal_parameter_count);
3694 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3699 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3700 ASSERT(instr->context() != NULL);
3701 ASSERT(ToRegister(instr->context()).is(cp));
3702 Register input = ToRegister(instr->value());
3703 Register result = ToRegister(instr->result());
3704 Register scratch = scratch0();
3706 // Deoptimize if not a heap number.
3707 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3708 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3709 __ cmp(scratch, Operand(ip));
3710 DeoptimizeIf(ne, instr->environment());
3713 Register exponent = scratch0();
3715 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3716 // Check the sign of the argument. If the argument is positive, just
3718 __ tst(exponent, Operand(HeapNumber::kSignMask));
3719 // Move the input to the result if necessary.
3720 __ Move(result, input);
3723 // Input is negative. Reverse its sign.
3724 // Preserve the value of all registers.
3726 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3728 // Registers were saved at the safepoint, so we can use
3729 // many scratch registers.
3730 Register tmp1 = input.is(r1) ? r0 : r1;
3731 Register tmp2 = input.is(r2) ? r0 : r2;
3732 Register tmp3 = input.is(r3) ? r0 : r3;
3733 Register tmp4 = input.is(r4) ? r0 : r4;
3735 // exponent: floating point exponent value.
3737 Label allocated, slow;
3738 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3739 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3742 // Slow case: Call the runtime system to do the number allocation.
3745 CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
3747 // Set the pointer to the new heap number in tmp.
3748 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3749 // Restore input_reg after call to runtime.
3750 __ LoadFromSafepointRegisterSlot(input, input);
3751 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3753 __ bind(&allocated);
3754 // exponent: floating point exponent value.
3755 // tmp1: allocated heap number.
3756 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3757 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3758 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3759 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3761 __ StoreToSafepointRegisterSlot(tmp1, result);
3768 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3769 Register input = ToRegister(instr->value());
3770 Register result = ToRegister(instr->result());
3771 __ cmp(input, Operand::Zero());
3772 __ Move(result, input, pl);
3773 // We can make rsb conditional because the previous cmp instruction
3774 // will clear the V (overflow) flag and rsb won't set this flag
3775 // if input is positive.
3776 __ rsb(result, input, Operand::Zero(), SetCC, mi);
3777 // Deoptimize on overflow.
3778 DeoptimizeIf(vs, instr->environment());
3782 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3783 // Class for deferred case.
3784 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3786 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3787 : LDeferredCode(codegen), instr_(instr) { }
3788 virtual void Generate() V8_OVERRIDE {
3789 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3791 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3796 Representation r = instr->hydrogen()->value()->representation();
3798 DwVfpRegister input = ToDoubleRegister(instr->value());
3799 DwVfpRegister result = ToDoubleRegister(instr->result());
3800 __ vabs(result, input);
3801 } else if (r.IsSmiOrInteger32()) {
3802 EmitIntegerMathAbs(instr);
3804 // Representation is tagged.
3805 DeferredMathAbsTaggedHeapNumber* deferred =
3806 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3807 Register input = ToRegister(instr->value());
3809 __ JumpIfNotSmi(input, deferred->entry());
3810 // If smi, handle it directly.
3811 EmitIntegerMathAbs(instr);
3812 __ bind(deferred->exit());
3817 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3818 DwVfpRegister input = ToDoubleRegister(instr->value());
3819 Register result = ToRegister(instr->result());
3820 Register input_high = scratch0();
3823 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3824 DeoptimizeIf(al, instr->environment());
3827 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3829 __ cmp(result, Operand::Zero());
3831 __ cmp(input_high, Operand::Zero());
3832 DeoptimizeIf(mi, instr->environment());
3838 void LCodeGen::DoMathRound(LMathRound* instr) {
3839 DwVfpRegister input = ToDoubleRegister(instr->value());
3840 Register result = ToRegister(instr->result());
3841 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3842 DwVfpRegister input_plus_dot_five = double_scratch1;
3843 Register input_high = scratch0();
3844 DwVfpRegister dot_five = double_scratch0();
3845 Label convert, done;
3847 __ Vmov(dot_five, 0.5, scratch0());
3848 __ vabs(double_scratch1, input);
3849 __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3850 // If input is in [-0.5, -0], the result is -0.
3851 // If input is in [+0, +0.5[, the result is +0.
3852 // If the input is +0.5, the result is 1.
3853 __ b(hi, &convert); // Out of [-0.5, +0.5].
3854 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3855 __ VmovHigh(input_high, input);
3856 __ cmp(input_high, Operand::Zero());
3857 DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
3859 __ VFPCompareAndSetFlags(input, dot_five);
3860 __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
3861 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3862 // flag kBailoutOnMinusZero.
3863 __ mov(result, Operand::Zero(), LeaveCC, ne);
3867 __ vadd(input_plus_dot_five, input, dot_five);
3868 // Reuse dot_five (double_scratch0) as we no longer need this value.
3869 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3871 DeoptimizeIf(al, instr->environment());
3876 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3877 DwVfpRegister input = ToDoubleRegister(instr->value());
3878 DwVfpRegister result = ToDoubleRegister(instr->result());
3879 __ vsqrt(result, input);
3883 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3884 DwVfpRegister input = ToDoubleRegister(instr->value());
3885 DwVfpRegister result = ToDoubleRegister(instr->result());
3886 DwVfpRegister temp = double_scratch0();
3888 // Note that according to ECMA-262 15.8.2.13:
3889 // Math.pow(-Infinity, 0.5) == Infinity
3890 // Math.sqrt(-Infinity) == NaN
3892 __ vmov(temp, -V8_INFINITY, scratch0());
3893 __ VFPCompareAndSetFlags(input, temp);
3894 __ vneg(result, temp, eq);
3897 // Add +0 to convert -0 to +0.
3898 __ vadd(result, input, kDoubleRegZero);
3899 __ vsqrt(result, result);
3904 void LCodeGen::DoPower(LPower* instr) {
3905 Representation exponent_type = instr->hydrogen()->right()->representation();
3906 // Having marked this as a call, we can use any registers.
3907 // Just make sure that the input/output registers are the expected ones.
3908 ASSERT(!instr->right()->IsDoubleRegister() ||
3909 ToDoubleRegister(instr->right()).is(d1));
3910 ASSERT(!instr->right()->IsRegister() ||
3911 ToRegister(instr->right()).is(r2));
3912 ASSERT(ToDoubleRegister(instr->left()).is(d0));
3913 ASSERT(ToDoubleRegister(instr->result()).is(d2));
3915 if (exponent_type.IsSmi()) {
3916 MathPowStub stub(MathPowStub::TAGGED);
3918 } else if (exponent_type.IsTagged()) {
3920 __ JumpIfSmi(r2, &no_deopt);
3921 __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
3922 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3923 __ cmp(r6, Operand(ip));
3924 DeoptimizeIf(ne, instr->environment());
3926 MathPowStub stub(MathPowStub::TAGGED);
3928 } else if (exponent_type.IsInteger32()) {
3929 MathPowStub stub(MathPowStub::INTEGER);
3932 ASSERT(exponent_type.IsDouble());
3933 MathPowStub stub(MathPowStub::DOUBLE);
3939 void LCodeGen::DoMathExp(LMathExp* instr) {
3940 DwVfpRegister input = ToDoubleRegister(instr->value());
3941 DwVfpRegister result = ToDoubleRegister(instr->result());
3942 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3943 DwVfpRegister double_scratch2 = double_scratch0();
3944 Register temp1 = ToRegister(instr->temp1());
3945 Register temp2 = ToRegister(instr->temp2());
3947 MathExpGenerator::EmitMathExp(
3948 masm(), input, result, double_scratch1, double_scratch2,
3949 temp1, temp2, scratch0());
3953 void LCodeGen::DoMathLog(LMathLog* instr) {
3954 __ PrepareCallCFunction(0, 1, scratch0());
3955 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3956 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3958 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3962 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3963 Register input = ToRegister(instr->value());
3964 Register result = ToRegister(instr->result());
3965 __ clz(result, input);
3969 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3970 ASSERT(ToRegister(instr->context()).is(cp));
3971 ASSERT(ToRegister(instr->function()).is(r1));
3972 ASSERT(instr->HasPointerMap());
3974 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3975 if (known_function.is_null()) {
3976 LPointerMap* pointers = instr->pointer_map();
3977 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3978 ParameterCount count(instr->arity());
3979 __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
3981 CallKnownFunction(known_function,
3982 instr->hydrogen()->formal_parameter_count(),
3985 R1_CONTAINS_TARGET);
3990 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3991 ASSERT(ToRegister(instr->result()).is(r0));
3993 LPointerMap* pointers = instr->pointer_map();
3994 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3996 if (instr->target()->IsConstantOperand()) {
3997 LConstantOperand* target = LConstantOperand::cast(instr->target());
3998 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3999 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4000 PlatformCallInterfaceDescriptor* call_descriptor =
4001 instr->descriptor()->platform_specific_descriptor();
4002 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
4003 call_descriptor->storage_mode());
4005 ASSERT(instr->target()->IsRegister());
4006 Register target = ToRegister(instr->target());
4007 generator.BeforeCall(__ CallSize(target));
4008 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4011 generator.AfterCall();
4015 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4016 ASSERT(ToRegister(instr->function()).is(r1));
4017 ASSERT(ToRegister(instr->result()).is(r0));
4019 if (instr->hydrogen()->pass_argument_count()) {
4020 __ mov(r0, Operand(instr->arity()));
4024 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
4026 // Load the code entry address
4027 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
4030 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4034 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4035 ASSERT(ToRegister(instr->context()).is(cp));
4036 ASSERT(ToRegister(instr->function()).is(r1));
4037 ASSERT(ToRegister(instr->result()).is(r0));
4039 int arity = instr->arity();
4040 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
4041 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4045 void LCodeGen::DoCallNew(LCallNew* instr) {
4046 ASSERT(ToRegister(instr->context()).is(cp));
4047 ASSERT(ToRegister(instr->constructor()).is(r1));
4048 ASSERT(ToRegister(instr->result()).is(r0));
4050 __ mov(r0, Operand(instr->arity()));
4051 // No cell in r2 for construct type feedback in optimized code
4052 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4053 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4054 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4058 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4059 ASSERT(ToRegister(instr->context()).is(cp));
4060 ASSERT(ToRegister(instr->constructor()).is(r1));
4061 ASSERT(ToRegister(instr->result()).is(r0));
4063 __ mov(r0, Operand(instr->arity()));
4064 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
4065 ElementsKind kind = instr->hydrogen()->elements_kind();
4066 AllocationSiteOverrideMode override_mode =
4067 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4068 ? DISABLE_ALLOCATION_SITES
4071 if (instr->arity() == 0) {
4072 ArrayNoArgumentConstructorStub stub(kind, override_mode);
4073 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4074 } else if (instr->arity() == 1) {
4076 if (IsFastPackedElementsKind(kind)) {
4078 // We might need a change here
4079 // look at the first argument
4080 __ ldr(r5, MemOperand(sp, 0));
4081 __ cmp(r5, Operand::Zero());
4082 __ b(eq, &packed_case);
4084 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4085 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
4086 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4088 __ bind(&packed_case);
4091 ArraySingleArgumentConstructorStub stub(kind, override_mode);
4092 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4095 ArrayNArgumentsConstructorStub stub(kind, override_mode);
4096 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4101 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4102 CallRuntime(instr->function(), instr->arity(), instr);
4106 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4107 Register function = ToRegister(instr->function());
4108 Register code_object = ToRegister(instr->code_object());
4109 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
4111 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4115 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4116 Register result = ToRegister(instr->result());
4117 Register base = ToRegister(instr->base_object());
4118 if (instr->offset()->IsConstantOperand()) {
4119 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4120 __ add(result, base, Operand(ToInteger32(offset)));
4122 Register offset = ToRegister(instr->offset());
4123 __ add(result, base, offset);
4128 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4129 Representation representation = instr->representation();
4131 Register object = ToRegister(instr->object());
4132 Register scratch = scratch0();
4133 HObjectAccess access = instr->hydrogen()->access();
4134 int offset = access.offset();
4136 if (access.IsExternalMemory()) {
4137 Register value = ToRegister(instr->value());
4138 MemOperand operand = MemOperand(object, offset);
4139 __ Store(value, operand, representation);
4143 Handle<Map> transition = instr->transition();
4144 SmiCheck check_needed =
4145 instr->hydrogen()->value()->IsHeapObject()
4146 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4148 ASSERT(!(representation.IsSmi() &&
4149 instr->value()->IsConstantOperand() &&
4150 !IsSmi(LConstantOperand::cast(instr->value()))));
4151 if (representation.IsHeapObject()) {
4152 Register value = ToRegister(instr->value());
4153 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4155 DeoptimizeIf(eq, instr->environment());
4157 // We know that value is a smi now, so we can omit the check below.
4158 check_needed = OMIT_SMI_CHECK;
4160 } else if (representation.IsDouble()) {
4161 ASSERT(transition.is_null());
4162 ASSERT(access.IsInobject());
4163 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4164 DwVfpRegister value = ToDoubleRegister(instr->value());
4165 __ vstr(value, FieldMemOperand(object, offset));
4169 if (!transition.is_null()) {
4170 __ mov(scratch, Operand(transition));
4171 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4172 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4173 Register temp = ToRegister(instr->temp());
4174 // Update the write barrier for the map field.
4175 __ RecordWriteField(object,
4176 HeapObject::kMapOffset,
4179 GetLinkRegisterState(),
4181 OMIT_REMEMBERED_SET,
4187 Register value = ToRegister(instr->value());
4188 if (access.IsInobject()) {
4189 MemOperand operand = FieldMemOperand(object, offset);
4190 __ Store(value, operand, representation);
4191 if (instr->hydrogen()->NeedsWriteBarrier()) {
4192 // Update the write barrier for the object for in-object properties.
4193 __ RecordWriteField(object,
4197 GetLinkRegisterState(),
4199 EMIT_REMEMBERED_SET,
4203 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4204 MemOperand operand = FieldMemOperand(scratch, offset);
4205 __ Store(value, operand, representation);
4206 if (instr->hydrogen()->NeedsWriteBarrier()) {
4207 // Update the write barrier for the properties array.
4208 // object is used as a scratch register.
4209 __ RecordWriteField(scratch,
4213 GetLinkRegisterState(),
4215 EMIT_REMEMBERED_SET,
4222 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4223 ASSERT(ToRegister(instr->context()).is(cp));
4224 ASSERT(ToRegister(instr->object()).is(r1));
4225 ASSERT(ToRegister(instr->value()).is(r0));
4227 // Name is always in r2.
4228 __ mov(r2, Operand(instr->name()));
4229 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4230 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4234 void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) {
4235 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4237 __ b(NegateCondition(condition), &done);
4238 __ stop("eliminated bounds check failed");
4241 DeoptimizeIf(condition, check->environment());
4246 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4247 if (instr->hydrogen()->skip_check()) return;
4249 if (instr->index()->IsConstantOperand()) {
4250 int constant_index =
4251 ToInteger32(LConstantOperand::cast(instr->index()));
4252 if (instr->hydrogen()->length()->representation().IsSmi()) {
4253 __ mov(ip, Operand(Smi::FromInt(constant_index)));
4255 __ mov(ip, Operand(constant_index));
4257 __ cmp(ip, ToRegister(instr->length()));
4259 __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
4261 Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
4262 ApplyCheckIf(condition, instr);
4267 void LCodeGen::DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr) {
4268 ASSERT(instr->value()->IsRegister());
4269 Register temp = ToRegister(instr->temp());
4270 Register input_reg = ToRegister(instr->value());
4271 __ SmiTst(input_reg);
4272 DeoptimizeIf(eq, instr->environment());
4273 __ CompareObjectType(input_reg, temp, no_reg, T::kInstanceType);
4274 DeoptimizeIf(ne, instr->environment());
4276 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
4277 Register external_pointer = ToRegister(instr->elements());
4278 Register key = no_reg;
4279 ElementsKind elements_kind = instr->elements_kind();
4280 bool key_is_constant = instr->key()->IsConstantOperand();
4281 int constant_key = 0;
4282 if (key_is_constant) {
4283 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4284 if (constant_key & 0xF0000000) {
4285 Abort(kArrayIndexConstantValueTooBig);
4288 key = ToRegister(instr->key());
4290 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4291 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4292 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4293 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
4294 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
4298 (instr->additional_index() << element_size_shift) + additional_offset;
4299 Register address = scratch0();
4300 if (key_is_constant) {
4301 if (constant_key != 0) {
4302 __ add(address, external_pointer,
4303 Operand(constant_key << element_size_shift));
4305 address = external_pointer;
4308 __ add(address, external_pointer, Operand(key, LSL, shift_size));
4311 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
4312 __ ldr(temp, MemOperand(input_reg,
4313 T::kValueOffset - kHeapObjectTag + offset));
4314 __ str(temp, MemOperand(address, base_offset + offset));
4319 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4320 Register external_pointer = ToRegister(instr->elements());
4321 Register key = no_reg;
4322 ElementsKind elements_kind = instr->elements_kind();
4323 bool key_is_constant = instr->key()->IsConstantOperand();
4324 int constant_key = 0;
4325 if (key_is_constant) {
4326 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4327 if (constant_key & 0xF0000000) {
4328 Abort(kArrayIndexConstantValueTooBig);
4331 key = ToRegister(instr->key());
4333 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4334 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4335 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4336 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
4337 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
4340 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4341 elements_kind == FLOAT32_ELEMENTS ||
4342 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4343 elements_kind == FLOAT64_ELEMENTS) {
4345 (instr->additional_index() << element_size_shift) + additional_offset;
4346 Register address = scratch0();
4347 DwVfpRegister value(ToDoubleRegister(instr->value()));
4348 if (key_is_constant) {
4349 if (constant_key != 0) {
4350 __ add(address, external_pointer,
4351 Operand(constant_key << element_size_shift));
4353 address = external_pointer;
4356 __ add(address, external_pointer, Operand(key, LSL, shift_size));
4358 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4359 elements_kind == FLOAT32_ELEMENTS) {
4360 __ vcvt_f32_f64(double_scratch0().low(), value);
4361 __ vstr(double_scratch0().low(), address, base_offset);
4362 } else { // Storing doubles, not floats.
4363 __ vstr(value, address, base_offset);
4365 } else if (IsFloat32x4ElementsKind(elements_kind)) {
4366 DoStoreKeyedSIMD128ExternalArray<Float32x4>(instr);
4367 } else if (IsInt32x4ElementsKind(elements_kind)) {
4368 DoStoreKeyedSIMD128ExternalArray<Int32x4>(instr);
4370 Register value(ToRegister(instr->value()));
4371 MemOperand mem_operand = PrepareKeyedOperand(
4372 key, external_pointer, key_is_constant, constant_key,
4373 element_size_shift, shift_size,
4374 instr->additional_index(), additional_offset);
4375 switch (elements_kind) {
4376 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4377 case EXTERNAL_INT8_ELEMENTS:
4378 case EXTERNAL_UINT8_ELEMENTS:
4379 case UINT8_ELEMENTS:
4380 case UINT8_CLAMPED_ELEMENTS:
4382 __ strb(value, mem_operand);
4384 case EXTERNAL_INT16_ELEMENTS:
4385 case EXTERNAL_UINT16_ELEMENTS:
4386 case INT16_ELEMENTS:
4387 case UINT16_ELEMENTS:
4388 __ strh(value, mem_operand);
4390 case EXTERNAL_INT32_ELEMENTS:
4391 case EXTERNAL_UINT32_ELEMENTS:
4392 case INT32_ELEMENTS:
4393 case UINT32_ELEMENTS:
4394 __ str(value, mem_operand);
4396 case FLOAT32_ELEMENTS:
4397 case FLOAT64_ELEMENTS:
4398 case EXTERNAL_FLOAT32_ELEMENTS:
4399 case EXTERNAL_FLOAT64_ELEMENTS:
4400 case FLOAT32x4_ELEMENTS:
4401 case INT32x4_ELEMENTS:
4402 case EXTERNAL_FLOAT32x4_ELEMENTS:
4403 case EXTERNAL_INT32x4_ELEMENTS:
4404 case FAST_DOUBLE_ELEMENTS:
4406 case FAST_SMI_ELEMENTS:
4407 case FAST_HOLEY_DOUBLE_ELEMENTS:
4408 case FAST_HOLEY_ELEMENTS:
4409 case FAST_HOLEY_SMI_ELEMENTS:
4410 case DICTIONARY_ELEMENTS:
4411 case SLOPPY_ARGUMENTS_ELEMENTS:
4419 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4420 DwVfpRegister value = ToDoubleRegister(instr->value());
4421 Register elements = ToRegister(instr->elements());
4422 Register scratch = scratch0();
4423 DwVfpRegister double_scratch = double_scratch0();
4424 bool key_is_constant = instr->key()->IsConstantOperand();
4426 // Calculate the effective address of the slot in the array to store the
4428 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4429 if (key_is_constant) {
4430 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4431 if (constant_key & 0xF0000000) {
4432 Abort(kArrayIndexConstantValueTooBig);
4434 __ add(scratch, elements,
4435 Operand((constant_key << element_size_shift) +
4436 FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4438 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4439 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4440 __ add(scratch, elements,
4441 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4442 __ add(scratch, scratch,
4443 Operand(ToRegister(instr->key()), LSL, shift_size));
4446 if (instr->NeedsCanonicalization()) {
4447 // Force a canonical NaN.
4448 if (masm()->emit_debug_code()) {
4450 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4451 __ Assert(ne, kDefaultNaNModeNotSet);
4453 __ VFPCanonicalizeNaN(double_scratch, value);
4454 __ vstr(double_scratch, scratch,
4455 instr->additional_index() << element_size_shift);
4457 __ vstr(value, scratch, instr->additional_index() << element_size_shift);
4462 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4463 Register value = ToRegister(instr->value());
4464 Register elements = ToRegister(instr->elements());
4465 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4467 Register scratch = scratch0();
4468 Register store_base = scratch;
4472 if (instr->key()->IsConstantOperand()) {
4473 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4474 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4475 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4476 instr->additional_index());
4477 store_base = elements;
4479 // Even though the HLoadKeyed instruction forces the input
4480 // representation for the key to be an integer, the input gets replaced
4481 // during bound check elimination with the index argument to the bounds
4482 // check, which can be tagged, so that case must be handled here, too.
4483 if (instr->hydrogen()->key()->representation().IsSmi()) {
4484 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4486 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4488 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4490 __ str(value, FieldMemOperand(store_base, offset));
4492 if (instr->hydrogen()->NeedsWriteBarrier()) {
4493 SmiCheck check_needed =
4494 instr->hydrogen()->value()->IsHeapObject()
4495 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4496 // Compute address of modified element and store it into key register.
4497 __ add(key, store_base, Operand(offset - kHeapObjectTag));
4498 __ RecordWrite(elements,
4501 GetLinkRegisterState(),
4503 EMIT_REMEMBERED_SET,
4509 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4510 // By cases: external, fast double
4511 if (instr->is_typed_elements()) {
4512 DoStoreKeyedExternalArray(instr);
4513 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4514 DoStoreKeyedFixedDoubleArray(instr);
4516 DoStoreKeyedFixedArray(instr);
4521 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4522 ASSERT(ToRegister(instr->context()).is(cp));
4523 ASSERT(ToRegister(instr->object()).is(r2));
4524 ASSERT(ToRegister(instr->key()).is(r1));
4525 ASSERT(ToRegister(instr->value()).is(r0));
4527 Handle<Code> ic = instr->strict_mode() == STRICT
4528 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4529 : isolate()->builtins()->KeyedStoreIC_Initialize();
4530 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4534 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4535 Register object_reg = ToRegister(instr->object());
4536 Register scratch = scratch0();
4538 Handle<Map> from_map = instr->original_map();
4539 Handle<Map> to_map = instr->transitioned_map();
4540 ElementsKind from_kind = instr->from_kind();
4541 ElementsKind to_kind = instr->to_kind();
4543 Label not_applicable;
4544 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4545 __ cmp(scratch, Operand(from_map));
4546 __ b(ne, ¬_applicable);
4548 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4549 Register new_map_reg = ToRegister(instr->new_map_temp());
4550 __ mov(new_map_reg, Operand(to_map));
4551 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4553 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4554 scratch, GetLinkRegisterState(), kDontSaveFPRegs);
4556 ASSERT(ToRegister(instr->context()).is(cp));
4557 PushSafepointRegistersScope scope(
4558 this, Safepoint::kWithRegistersAndDoubles);
4559 __ Move(r0, object_reg);
4560 __ Move(r1, to_map);
4561 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4562 TransitionElementsKindStub stub(from_kind, to_kind, is_js_array);
4564 RecordSafepointWithRegistersAndDoubles(
4565 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4567 __ bind(¬_applicable);
4571 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4572 Register object = ToRegister(instr->object());
4573 Register temp = ToRegister(instr->temp());
4574 Label no_memento_found;
4575 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4576 DeoptimizeIf(eq, instr->environment());
4577 __ bind(&no_memento_found);
4581 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4582 ASSERT(ToRegister(instr->context()).is(cp));
4583 ASSERT(ToRegister(instr->left()).is(r1));
4584 ASSERT(ToRegister(instr->right()).is(r0));
4585 StringAddStub stub(instr->hydrogen()->flags(),
4586 instr->hydrogen()->pretenure_flag());
4587 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4591 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4592 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4594 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4595 : LDeferredCode(codegen), instr_(instr) { }
4596 virtual void Generate() V8_OVERRIDE {
4597 codegen()->DoDeferredStringCharCodeAt(instr_);
4599 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4601 LStringCharCodeAt* instr_;
4604 DeferredStringCharCodeAt* deferred =
4605 new(zone()) DeferredStringCharCodeAt(this, instr);
4607 StringCharLoadGenerator::Generate(masm(),
4608 ToRegister(instr->string()),
4609 ToRegister(instr->index()),
4610 ToRegister(instr->result()),
4612 __ bind(deferred->exit());
4616 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4617 Register string = ToRegister(instr->string());
4618 Register result = ToRegister(instr->result());
4619 Register scratch = scratch0();
4621 // TODO(3095996): Get rid of this. For now, we need to make the
4622 // result register contain a valid pointer because it is already
4623 // contained in the register pointer map.
4624 __ mov(result, Operand::Zero());
4626 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4628 // Push the index as a smi. This is safe because of the checks in
4629 // DoStringCharCodeAt above.
4630 if (instr->index()->IsConstantOperand()) {
4631 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4632 __ mov(scratch, Operand(Smi::FromInt(const_index)));
4635 Register index = ToRegister(instr->index());
4639 CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
4643 __ StoreToSafepointRegisterSlot(r0, result);
4647 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4648 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4650 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4651 : LDeferredCode(codegen), instr_(instr) { }
4652 virtual void Generate() V8_OVERRIDE {
4653 codegen()->DoDeferredStringCharFromCode(instr_);
4655 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4657 LStringCharFromCode* instr_;
4660 DeferredStringCharFromCode* deferred =
4661 new(zone()) DeferredStringCharFromCode(this, instr);
4663 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4664 Register char_code = ToRegister(instr->char_code());
4665 Register result = ToRegister(instr->result());
4666 ASSERT(!char_code.is(result));
4668 __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4669 __ b(hi, deferred->entry());
4670 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4671 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4672 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4673 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4675 __ b(eq, deferred->entry());
4676 __ bind(deferred->exit());
4680 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4681 Register char_code = ToRegister(instr->char_code());
4682 Register result = ToRegister(instr->result());
4684 // TODO(3095996): Get rid of this. For now, we need to make the
4685 // result register contain a valid pointer because it is already
4686 // contained in the register pointer map.
4687 __ mov(result, Operand::Zero());
4689 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4690 __ SmiTag(char_code);
4692 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4693 __ StoreToSafepointRegisterSlot(r0, result);
4697 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4698 LOperand* input = instr->value();
4699 ASSERT(input->IsRegister() || input->IsStackSlot());
4700 LOperand* output = instr->result();
4701 ASSERT(output->IsDoubleRegister());
4702 SwVfpRegister single_scratch = double_scratch0().low();
4703 if (input->IsStackSlot()) {
4704 Register scratch = scratch0();
4705 __ ldr(scratch, ToMemOperand(input));
4706 __ vmov(single_scratch, scratch);
4708 __ vmov(single_scratch, ToRegister(input));
4710 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4714 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4715 LOperand* input = instr->value();
4716 LOperand* output = instr->result();
4718 SwVfpRegister flt_scratch = double_scratch0().low();
4719 __ vmov(flt_scratch, ToRegister(input));
4720 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4724 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4725 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4727 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4728 : LDeferredCode(codegen), instr_(instr) { }
4729 virtual void Generate() V8_OVERRIDE {
4730 codegen()->DoDeferredNumberTagIU(instr_,
4736 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4738 LNumberTagI* instr_;
4741 Register src = ToRegister(instr->value());
4742 Register dst = ToRegister(instr->result());
4744 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4745 __ SmiTag(dst, src, SetCC);
4746 __ b(vs, deferred->entry());
4747 __ bind(deferred->exit());
4751 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4752 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4754 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4755 : LDeferredCode(codegen), instr_(instr) { }
4756 virtual void Generate() V8_OVERRIDE {
4757 codegen()->DoDeferredNumberTagIU(instr_,
4763 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4765 LNumberTagU* instr_;
4768 Register input = ToRegister(instr->value());
4769 Register result = ToRegister(instr->result());
4771 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4772 __ cmp(input, Operand(Smi::kMaxValue));
4773 __ b(hi, deferred->entry());
4774 __ SmiTag(result, input);
4775 __ bind(deferred->exit());
4779 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4783 IntegerSignedness signedness) {
4785 Register src = ToRegister(value);
4786 Register dst = ToRegister(instr->result());
4787 Register tmp1 = scratch0();
4788 Register tmp2 = ToRegister(temp1);
4789 Register tmp3 = ToRegister(temp2);
4790 LowDwVfpRegister dbl_scratch = double_scratch0();
4792 if (signedness == SIGNED_INT32) {
4793 // There was overflow, so bits 30 and 31 of the original integer
4794 // disagree. Try to allocate a heap number in new space and store
4795 // the value in there. If that fails, call the runtime system.
4797 __ SmiUntag(src, dst);
4798 __ eor(src, src, Operand(0x80000000));
4800 __ vmov(dbl_scratch.low(), src);
4801 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4803 __ vmov(dbl_scratch.low(), src);
4804 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4807 if (FLAG_inline_new) {
4808 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4809 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4813 // Slow case: Call the runtime system to do the number allocation.
4816 // TODO(3095996): Put a valid pointer value in the stack slot where the
4817 // result register is stored, as this register is in the pointer map, but
4818 // contains an integer value.
4819 __ mov(dst, Operand::Zero());
4821 // Preserve the value of all registers.
4822 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4824 // NumberTagI and NumberTagD use the context from the frame, rather than
4825 // the environment's HContext or HInlinedContext value.
4826 // They only call Runtime::kHiddenAllocateHeapNumber.
4827 // The corresponding HChange instructions are added in a phase that does
4828 // not have easy access to the local context.
4829 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4830 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4831 RecordSafepointWithRegisters(
4832 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4833 __ sub(r0, r0, Operand(kHeapObjectTag));
4834 __ StoreToSafepointRegisterSlot(r0, dst);
4837 // Done. Put the value in dbl_scratch into the value of the allocated heap
4840 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4841 __ add(dst, dst, Operand(kHeapObjectTag));
4845 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4846 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4848 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4849 : LDeferredCode(codegen), instr_(instr) { }
4850 virtual void Generate() V8_OVERRIDE {
4851 codegen()->DoDeferredNumberTagD(instr_);
4853 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4855 LNumberTagD* instr_;
4858 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4859 Register scratch = scratch0();
4860 Register reg = ToRegister(instr->result());
4861 Register temp1 = ToRegister(instr->temp());
4862 Register temp2 = ToRegister(instr->temp2());
4864 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4865 if (FLAG_inline_new) {
4866 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4867 // We want the untagged address first for performance
4868 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4871 __ jmp(deferred->entry());
4873 __ bind(deferred->exit());
4874 __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4875 // Now that we have finished with the object's real address tag it
4876 __ add(reg, reg, Operand(kHeapObjectTag));
4880 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4881 // TODO(3095996): Get rid of this. For now, we need to make the
4882 // result register contain a valid pointer because it is already
4883 // contained in the register pointer map.
4884 Register reg = ToRegister(instr->result());
4885 __ mov(reg, Operand::Zero());
4887 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4888 // NumberTagI and NumberTagD use the context from the frame, rather than
4889 // the environment's HContext or HInlinedContext value.
4890 // They only call Runtime::kHiddenAllocateHeapNumber.
4891 // The corresponding HChange instructions are added in a phase that does
4892 // not have easy access to the local context.
4893 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4894 __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
4895 RecordSafepointWithRegisters(
4896 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4897 __ sub(r0, r0, Operand(kHeapObjectTag));
4898 __ StoreToSafepointRegisterSlot(r0, reg);
4902 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4903 HChange* hchange = instr->hydrogen();
4904 Register input = ToRegister(instr->value());
4905 Register output = ToRegister(instr->result());
4906 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4907 hchange->value()->CheckFlag(HValue::kUint32)) {
4908 __ tst(input, Operand(0xc0000000));
4909 DeoptimizeIf(ne, instr->environment());
4911 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4912 !hchange->value()->CheckFlag(HValue::kUint32)) {
4913 __ SmiTag(output, input, SetCC);
4914 DeoptimizeIf(vs, instr->environment());
4916 __ SmiTag(output, input);
4921 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4922 Register input = ToRegister(instr->value());
4923 Register result = ToRegister(instr->result());
4924 if (instr->needs_check()) {
4925 STATIC_ASSERT(kHeapObjectTag == 1);
4926 // If the input is a HeapObject, SmiUntag will set the carry flag.
4927 __ SmiUntag(result, input, SetCC);
4928 DeoptimizeIf(cs, instr->environment());
4930 __ SmiUntag(result, input);
4935 void LCodeGen::EmitNumberUntagD(Register input_reg,
4936 DwVfpRegister result_reg,
4937 bool can_convert_undefined_to_nan,
4938 bool deoptimize_on_minus_zero,
4940 NumberUntagDMode mode) {
4941 Register scratch = scratch0();
4942 SwVfpRegister flt_scratch = double_scratch0().low();
4943 ASSERT(!result_reg.is(double_scratch0()));
4944 Label convert, load_smi, done;
4945 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4947 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4948 // Heap number map check.
4949 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4950 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4951 __ cmp(scratch, Operand(ip));
4952 if (can_convert_undefined_to_nan) {
4955 DeoptimizeIf(ne, env);
4958 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4959 if (deoptimize_on_minus_zero) {
4960 __ VmovLow(scratch, result_reg);
4961 __ cmp(scratch, Operand::Zero());
4963 __ VmovHigh(scratch, result_reg);
4964 __ cmp(scratch, Operand(HeapNumber::kSignMask));
4965 DeoptimizeIf(eq, env);
4968 if (can_convert_undefined_to_nan) {
4970 // Convert undefined (and hole) to NaN.
4971 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4972 __ cmp(input_reg, Operand(ip));
4973 DeoptimizeIf(ne, env);
4974 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4975 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
4979 __ SmiUntag(scratch, input_reg);
4980 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4982 // Smi to double register conversion
4984 // scratch: untagged value of input_reg
4985 __ vmov(flt_scratch, scratch);
4986 __ vcvt_f64_s32(result_reg, flt_scratch);
4991 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4992 Register input_reg = ToRegister(instr->value());
4993 Register scratch1 = scratch0();
4994 Register scratch2 = ToRegister(instr->temp());
4995 LowDwVfpRegister double_scratch = double_scratch0();
4996 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4998 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4999 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
5003 // The input was optimistically untagged; revert it.
5004 // The carry flag is set when we reach this deferred code as we just executed
5005 // SmiUntag(heap_object, SetCC)
5006 STATIC_ASSERT(kHeapObjectTag == 1);
5007 __ adc(scratch2, input_reg, Operand(input_reg));
5009 // Heap number map check.
5010 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
5011 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5012 __ cmp(scratch1, Operand(ip));
5014 if (instr->truncating()) {
5015 // Performs a truncating conversion of a floating point number as used by
5016 // the JS bitwise operations.
5017 Label no_heap_number, check_bools, check_false;
5018 __ b(ne, &no_heap_number);
5019 __ TruncateHeapNumberToI(input_reg, scratch2);
5022 // Check for Oddballs. Undefined/False is converted to zero and True to one
5023 // for truncating conversions.
5024 __ bind(&no_heap_number);
5025 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5026 __ cmp(scratch2, Operand(ip));
5027 __ b(ne, &check_bools);
5028 __ mov(input_reg, Operand::Zero());
5031 __ bind(&check_bools);
5032 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5033 __ cmp(scratch2, Operand(ip));
5034 __ b(ne, &check_false);
5035 __ mov(input_reg, Operand(1));
5038 __ bind(&check_false);
5039 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5040 __ cmp(scratch2, Operand(ip));
5041 DeoptimizeIf(ne, instr->environment());
5042 __ mov(input_reg, Operand::Zero());
5045 // Deoptimize if we don't have a heap number.
5046 DeoptimizeIf(ne, instr->environment());
5048 __ sub(ip, scratch2, Operand(kHeapObjectTag));
5049 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
5050 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
5051 DeoptimizeIf(ne, instr->environment());
5053 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5054 __ cmp(input_reg, Operand::Zero());
5056 __ VmovHigh(scratch1, double_scratch2);
5057 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5058 DeoptimizeIf(ne, instr->environment());
5065 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5066 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5068 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5069 : LDeferredCode(codegen), instr_(instr) { }
5070 virtual void Generate() V8_OVERRIDE {
5071 codegen()->DoDeferredTaggedToI(instr_);
5073 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5078 LOperand* input = instr->value();
5079 ASSERT(input->IsRegister());
5080 ASSERT(input->Equals(instr->result()));
5082 Register input_reg = ToRegister(input);
5084 if (instr->hydrogen()->value()->representation().IsSmi()) {
5085 __ SmiUntag(input_reg);
5087 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5089 // Optimistically untag the input.
5090 // If the input is a HeapObject, SmiUntag will set the carry flag.
5091 __ SmiUntag(input_reg, SetCC);
5092 // Branch to deferred code if the input was tagged.
5093 // The deferred code will take care of restoring the tag.
5094 __ b(cs, deferred->entry());
5095 __ bind(deferred->exit());
5100 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5101 LOperand* input = instr->value();
5102 ASSERT(input->IsRegister());
5103 LOperand* result = instr->result();
5104 ASSERT(result->IsDoubleRegister());
5106 Register input_reg = ToRegister(input);
5107 DwVfpRegister result_reg = ToDoubleRegister(result);
5109 HValue* value = instr->hydrogen()->value();
5110 NumberUntagDMode mode = value->representation().IsSmi()
5111 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5113 EmitNumberUntagD(input_reg, result_reg,
5114 instr->hydrogen()->can_convert_undefined_to_nan(),
5115 instr->hydrogen()->deoptimize_on_minus_zero(),
5116 instr->environment(),
5121 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5122 Register result_reg = ToRegister(instr->result());
5123 Register scratch1 = scratch0();
5124 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5125 LowDwVfpRegister double_scratch = double_scratch0();
5127 if (instr->truncating()) {
5128 __ TruncateDoubleToI(result_reg, double_input);
5130 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5131 // Deoptimize if the input wasn't a int32 (inside a double).
5132 DeoptimizeIf(ne, instr->environment());
5133 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5135 __ cmp(result_reg, Operand::Zero());
5137 __ VmovHigh(scratch1, double_input);
5138 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5139 DeoptimizeIf(ne, instr->environment());
5146 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5147 Register result_reg = ToRegister(instr->result());
5148 Register scratch1 = scratch0();
5149 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5150 LowDwVfpRegister double_scratch = double_scratch0();
5152 if (instr->truncating()) {
5153 __ TruncateDoubleToI(result_reg, double_input);
5155 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5156 // Deoptimize if the input wasn't a int32 (inside a double).
5157 DeoptimizeIf(ne, instr->environment());
5158 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5160 __ cmp(result_reg, Operand::Zero());
5162 __ VmovHigh(scratch1, double_input);
5163 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5164 DeoptimizeIf(ne, instr->environment());
5168 __ SmiTag(result_reg, SetCC);
5169 DeoptimizeIf(vs, instr->environment());
5173 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5174 LOperand* input = instr->value();
5175 __ SmiTst(ToRegister(input));
5176 DeoptimizeIf(ne, instr->environment());
5180 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5181 if (!instr->hydrogen()->value()->IsHeapObject()) {
5182 LOperand* input = instr->value();
5183 __ SmiTst(ToRegister(input));
5184 DeoptimizeIf(eq, instr->environment());
5189 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5190 Register input = ToRegister(instr->value());
5191 Register scratch = scratch0();
5193 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5194 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5196 if (instr->hydrogen()->is_interval_check()) {
5199 instr->hydrogen()->GetCheckInterval(&first, &last);
5201 __ cmp(scratch, Operand(first));
5203 // If there is only one type in the interval check for equality.
5204 if (first == last) {
5205 DeoptimizeIf(ne, instr->environment());
5207 DeoptimizeIf(lo, instr->environment());
5208 // Omit check for the last type.
5209 if (last != LAST_TYPE) {
5210 __ cmp(scratch, Operand(last));
5211 DeoptimizeIf(hi, instr->environment());
5217 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5219 if (IsPowerOf2(mask)) {
5220 ASSERT(tag == 0 || IsPowerOf2(tag));
5221 __ tst(scratch, Operand(mask));
5222 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
5224 __ and_(scratch, scratch, Operand(mask));
5225 __ cmp(scratch, Operand(tag));
5226 DeoptimizeIf(ne, instr->environment());
5232 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5233 Register reg = ToRegister(instr->value());
5234 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5235 AllowDeferredHandleDereference smi_check;
5236 if (isolate()->heap()->InNewSpace(*object)) {
5237 Register reg = ToRegister(instr->value());
5238 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5239 __ mov(ip, Operand(Handle<Object>(cell)));
5240 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
5243 __ cmp(reg, Operand(object));
5245 DeoptimizeIf(ne, instr->environment());
5249 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5251 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5253 __ mov(cp, Operand::Zero());
5254 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5255 RecordSafepointWithRegisters(
5256 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5257 __ StoreToSafepointRegisterSlot(r0, scratch0());
5259 __ tst(scratch0(), Operand(kSmiTagMask));
5260 DeoptimizeIf(eq, instr->environment());
5264 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5265 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5267 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5268 : LDeferredCode(codegen), instr_(instr), object_(object) {
5269 SetExit(check_maps());
5271 virtual void Generate() V8_OVERRIDE {
5272 codegen()->DoDeferredInstanceMigration(instr_, object_);
5274 Label* check_maps() { return &check_maps_; }
5275 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5282 if (instr->hydrogen()->CanOmitMapChecks()) return;
5283 Register map_reg = scratch0();
5285 LOperand* input = instr->value();
5286 ASSERT(input->IsRegister());
5287 Register reg = ToRegister(input);
5289 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5291 DeferredCheckMaps* deferred = NULL;
5292 if (instr->hydrogen()->has_migration_target()) {
5293 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5294 __ bind(deferred->check_maps());
5297 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5299 for (int i = 0; i < map_set.size() - 1; i++) {
5300 Handle<Map> map = map_set.at(i).handle();
5301 __ CompareMap(map_reg, map, &success);
5305 Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5306 __ CompareMap(map_reg, map, &success);
5307 if (instr->hydrogen()->has_migration_target()) {
5308 __ b(ne, deferred->entry());
5310 DeoptimizeIf(ne, instr->environment());
5317 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5318 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
5319 Register result_reg = ToRegister(instr->result());
5320 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5324 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5325 Register unclamped_reg = ToRegister(instr->unclamped());
5326 Register result_reg = ToRegister(instr->result());
5327 __ ClampUint8(result_reg, unclamped_reg);
5331 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5332 Register scratch = scratch0();
5333 Register input_reg = ToRegister(instr->unclamped());
5334 Register result_reg = ToRegister(instr->result());
5335 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5336 Label is_smi, done, heap_number;
5338 // Both smi and heap number cases are handled.
5339 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5341 // Check for heap number
5342 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5343 __ cmp(scratch, Operand(factory()->heap_number_map()));
5344 __ b(eq, &heap_number);
5346 // Check for undefined. Undefined is converted to zero for clamping
5348 __ cmp(input_reg, Operand(factory()->undefined_value()));
5349 DeoptimizeIf(ne, instr->environment());
5350 __ mov(result_reg, Operand::Zero());
5354 __ bind(&heap_number);
5355 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5356 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5361 __ ClampUint8(result_reg, result_reg);
5367 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5368 DwVfpRegister value_reg = ToDoubleRegister(instr->value());
5369 Register result_reg = ToRegister(instr->result());
5370 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5371 __ VmovHigh(result_reg, value_reg);
5373 __ VmovLow(result_reg, value_reg);
5378 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5379 Register hi_reg = ToRegister(instr->hi());
5380 Register lo_reg = ToRegister(instr->lo());
5381 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
5382 __ VmovHigh(result_reg, hi_reg);
5383 __ VmovLow(result_reg, lo_reg);
5387 void LCodeGen::DoAllocate(LAllocate* instr) {
5388 class DeferredAllocate V8_FINAL : public LDeferredCode {
5390 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5391 : LDeferredCode(codegen), instr_(instr) { }
5392 virtual void Generate() V8_OVERRIDE {
5393 codegen()->DoDeferredAllocate(instr_);
5395 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5400 DeferredAllocate* deferred =
5401 new(zone()) DeferredAllocate(this, instr);
5403 Register result = ToRegister(instr->result());
5404 Register scratch = ToRegister(instr->temp1());
5405 Register scratch2 = ToRegister(instr->temp2());
5407 // Allocate memory for the object.
5408 AllocationFlags flags = TAG_OBJECT;
5409 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5410 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5412 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5413 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5414 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5415 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5416 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5417 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5418 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5421 if (instr->size()->IsConstantOperand()) {
5422 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5423 if (size <= Page::kMaxRegularHeapObjectSize) {
5424 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5426 __ jmp(deferred->entry());
5429 Register size = ToRegister(instr->size());
5438 __ bind(deferred->exit());
5440 if (instr->hydrogen()->MustPrefillWithFiller()) {
5441 if (instr->size()->IsConstantOperand()) {
5442 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5443 __ mov(scratch, Operand(size));
5445 scratch = ToRegister(instr->size());
5447 __ sub(scratch, scratch, Operand(kPointerSize));
5448 __ sub(result, result, Operand(kHeapObjectTag));
5451 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5452 __ str(scratch2, MemOperand(result, scratch));
5453 __ sub(scratch, scratch, Operand(kPointerSize));
5454 __ cmp(scratch, Operand(0));
5456 __ add(result, result, Operand(kHeapObjectTag));
5461 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5462 Register result = ToRegister(instr->result());
5464 // TODO(3095996): Get rid of this. For now, we need to make the
5465 // result register contain a valid pointer because it is already
5466 // contained in the register pointer map.
5467 __ mov(result, Operand(Smi::FromInt(0)));
5469 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5470 if (instr->size()->IsRegister()) {
5471 Register size = ToRegister(instr->size());
5472 ASSERT(!size.is(result));
5476 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5477 __ Push(Smi::FromInt(size));
5480 int flags = AllocateDoubleAlignFlag::encode(
5481 instr->hydrogen()->MustAllocateDoubleAligned());
5482 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5483 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5484 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5485 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5486 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5487 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5488 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5490 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5492 __ Push(Smi::FromInt(flags));
5494 CallRuntimeFromDeferred(
5495 Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
5496 __ StoreToSafepointRegisterSlot(r0, result);
5500 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5501 ASSERT(ToRegister(instr->value()).is(r0));
5503 CallRuntime(Runtime::kToFastProperties, 1, instr);
5507 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5508 ASSERT(ToRegister(instr->context()).is(cp));
5510 // Registers will be used as follows:
5511 // r6 = literals array.
5512 // r1 = regexp literal.
5513 // r0 = regexp literal clone.
5514 // r2-5 are used as temporaries.
5515 int literal_offset =
5516 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5517 __ Move(r6, instr->hydrogen()->literals());
5518 __ ldr(r1, FieldMemOperand(r6, literal_offset));
5519 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5521 __ b(ne, &materialized);
5523 // Create regexp literal using runtime function
5524 // Result will be in r0.
5525 __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5526 __ mov(r4, Operand(instr->hydrogen()->pattern()));
5527 __ mov(r3, Operand(instr->hydrogen()->flags()));
5528 __ Push(r6, r5, r4, r3);
5529 CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
5532 __ bind(&materialized);
5533 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5534 Label allocated, runtime_allocate;
5536 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5539 __ bind(&runtime_allocate);
5540 __ mov(r0, Operand(Smi::FromInt(size)));
5542 CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
5545 __ bind(&allocated);
5546 // Copy the content into the newly allocated memory.
5547 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
5551 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5552 ASSERT(ToRegister(instr->context()).is(cp));
5553 // Use the fast case closure allocation code that allocates in new
5554 // space for nested functions that don't need literals cloning.
5555 bool pretenure = instr->hydrogen()->pretenure();
5556 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5557 FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
5558 instr->hydrogen()->is_generator());
5559 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5560 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5562 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5563 __ mov(r1, Operand(pretenure ? factory()->true_value()
5564 : factory()->false_value()));
5565 __ Push(cp, r2, r1);
5566 CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
5571 void LCodeGen::DoTypeof(LTypeof* instr) {
5572 Register input = ToRegister(instr->value());
5574 CallRuntime(Runtime::kTypeof, 1, instr);
5578 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5579 Register input = ToRegister(instr->value());
5581 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5582 instr->FalseLabel(chunk_),
5584 instr->type_literal());
5585 if (final_branch_condition != kNoCondition) {
5586 EmitBranch(instr, final_branch_condition);
5591 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5594 Handle<String> type_name) {
5595 Condition final_branch_condition = kNoCondition;
5596 Register scratch = scratch0();
5597 if (type_name->Equals(heap()->number_string())) {
5598 __ JumpIfSmi(input, true_label);
5599 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5600 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5601 final_branch_condition = eq;
5603 } else if (type_name->Equals(heap()->float32x4_string())) {
5604 __ JumpIfSmi(input, false_label);
5605 __ CompareObjectType(input, scratch, no_reg, FLOAT32x4_TYPE);
5606 final_branch_condition = eq;
5608 } else if (type_name->Equals(heap()->int32x4_string())) {
5609 __ JumpIfSmi(input, false_label);
5610 __ CompareObjectType(input, scratch, no_reg, INT32x4_TYPE);
5611 final_branch_condition = eq;
5613 } else if (type_name->Equals(heap()->string_string())) {
5614 __ JumpIfSmi(input, false_label);
5615 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5616 __ b(ge, false_label);
5617 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5618 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5619 final_branch_condition = eq;
5621 } else if (type_name->Equals(heap()->symbol_string())) {
5622 __ JumpIfSmi(input, false_label);
5623 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5624 final_branch_condition = eq;
5626 } else if (type_name->Equals(heap()->boolean_string())) {
5627 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5628 __ b(eq, true_label);
5629 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5630 final_branch_condition = eq;
5632 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5633 __ CompareRoot(input, Heap::kNullValueRootIndex);
5634 final_branch_condition = eq;
5636 } else if (type_name->Equals(heap()->undefined_string())) {
5637 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5638 __ b(eq, true_label);
5639 __ JumpIfSmi(input, false_label);
5640 // Check for undetectable objects => true.
5641 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5642 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5643 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5644 final_branch_condition = ne;
5646 } else if (type_name->Equals(heap()->function_string())) {
5647 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5648 Register type_reg = scratch;
5649 __ JumpIfSmi(input, false_label);
5650 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5651 __ b(eq, true_label);
5652 __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5653 final_branch_condition = eq;
5655 } else if (type_name->Equals(heap()->object_string())) {
5656 Register map = scratch;
5657 __ JumpIfSmi(input, false_label);
5658 if (!FLAG_harmony_typeof) {
5659 __ CompareRoot(input, Heap::kNullValueRootIndex);
5660 __ b(eq, true_label);
5662 __ CheckObjectTypeRange(input,
5664 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5665 LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
5667 // Check for undetectable objects => false.
5668 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5669 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5670 final_branch_condition = eq;
5676 return final_branch_condition;
5680 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5681 Register temp1 = ToRegister(instr->temp());
5683 EmitIsConstructCall(temp1, scratch0());
5684 EmitBranch(instr, eq);
5688 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5689 ASSERT(!temp1.is(temp2));
5690 // Get the frame pointer for the calling frame.
5691 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5693 // Skip the arguments adaptor frame if it exists.
5694 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5695 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5696 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
5698 // Check the marker in the calling frame.
5699 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5700 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5704 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5705 if (!info()->IsStub()) {
5706 // Ensure that we have enough space after the previous lazy-bailout
5707 // instruction for patching the code here.
5708 int current_pc = masm()->pc_offset();
5709 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5710 // Block literal pool emission for duration of padding.
5711 Assembler::BlockConstPoolScope block_const_pool(masm());
5712 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5713 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5714 while (padding_size > 0) {
5716 padding_size -= Assembler::kInstrSize;
5720 last_lazy_deopt_pc_ = masm()->pc_offset();
5724 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5725 last_lazy_deopt_pc_ = masm()->pc_offset();
5726 ASSERT(instr->HasEnvironment());
5727 LEnvironment* env = instr->environment();
5728 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5729 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5733 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5734 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5735 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5736 // needed return address), even though the implementation of LAZY and EAGER is
5737 // now identical. When LAZY is eventually completely folded into EAGER, remove
5738 // the special case below.
5739 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5740 type = Deoptimizer::LAZY;
5743 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5744 DeoptimizeIf(al, instr->environment(), type);
5748 void LCodeGen::DoDummy(LDummy* instr) {
5749 // Nothing to see here, move on!
5753 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5754 // Nothing to see here, move on!
5758 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5759 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5760 LoadContextFromDeferred(instr->context());
5761 __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
5762 RecordSafepointWithLazyDeopt(
5763 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5764 ASSERT(instr->HasEnvironment());
5765 LEnvironment* env = instr->environment();
5766 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5770 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5771 class DeferredStackCheck V8_FINAL : public LDeferredCode {
5773 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5774 : LDeferredCode(codegen), instr_(instr) { }
5775 virtual void Generate() V8_OVERRIDE {
5776 codegen()->DoDeferredStackCheck(instr_);
5778 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5780 LStackCheck* instr_;
5783 ASSERT(instr->HasEnvironment());
5784 LEnvironment* env = instr->environment();
5785 // There is no LLazyBailout instruction for stack-checks. We have to
5786 // prepare for lazy deoptimization explicitly here.
5787 if (instr->hydrogen()->is_function_entry()) {
5788 // Perform stack overflow check.
5790 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5791 __ cmp(sp, Operand(ip));
5793 PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
5794 ASSERT(instr->context()->IsRegister());
5795 ASSERT(ToRegister(instr->context()).is(cp));
5796 CallCode(isolate()->builtins()->StackCheck(),
5797 RelocInfo::CODE_TARGET,
5801 ASSERT(instr->hydrogen()->is_backwards_branch());
5802 // Perform stack overflow check if this goto needs it before jumping.
5803 DeferredStackCheck* deferred_stack_check =
5804 new(zone()) DeferredStackCheck(this, instr);
5805 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5806 __ cmp(sp, Operand(ip));
5807 __ b(lo, deferred_stack_check->entry());
5808 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5809 __ bind(instr->done_label());
5810 deferred_stack_check->SetExit(instr->done_label());
5811 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5812 // Don't record a deoptimization index for the safepoint here.
5813 // This will be done explicitly when emitting call and the safepoint in
5814 // the deferred code.
5819 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5820 // This is a pseudo-instruction that ensures that the environment here is
5821 // properly registered for deoptimization and records the assembler's PC
5823 LEnvironment* environment = instr->environment();
5825 // If the environment were already registered, we would have no way of
5826 // backpatching it with the spill slot operands.
5827 ASSERT(!environment->HasBeenRegistered());
5828 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5830 GenerateOsrPrologue();
5834 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5835 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5837 DeoptimizeIf(eq, instr->environment());
5839 Register null_value = r5;
5840 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5841 __ cmp(r0, null_value);
5842 DeoptimizeIf(eq, instr->environment());
5845 DeoptimizeIf(eq, instr->environment());
5847 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5848 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5849 DeoptimizeIf(le, instr->environment());
5851 Label use_cache, call_runtime;
5852 __ CheckEnumCache(null_value, &call_runtime);
5854 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5857 // Get the set of properties to enumerate.
5858 __ bind(&call_runtime);
5860 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5862 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
5863 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5865 DeoptimizeIf(ne, instr->environment());
5866 __ bind(&use_cache);
5870 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5871 Register map = ToRegister(instr->map());
5872 Register result = ToRegister(instr->result());
5873 Label load_cache, done;
5874 __ EnumLength(result, map);
5875 __ cmp(result, Operand(Smi::FromInt(0)));
5876 __ b(ne, &load_cache);
5877 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5880 __ bind(&load_cache);
5881 __ LoadInstanceDescriptors(map, result);
5883 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5885 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5886 __ cmp(result, Operand::Zero());
5887 DeoptimizeIf(eq, instr->environment());
5893 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5894 Register object = ToRegister(instr->value());
5895 Register map = ToRegister(instr->map());
5896 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5897 __ cmp(map, scratch0());
5898 DeoptimizeIf(ne, instr->environment());
5902 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5903 Register object = ToRegister(instr->object());
5904 Register index = ToRegister(instr->index());
5905 Register result = ToRegister(instr->result());
5906 Register scratch = scratch0();
5908 Label out_of_object, done;
5909 __ cmp(index, Operand::Zero());
5910 __ b(lt, &out_of_object);
5912 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
5913 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5917 __ bind(&out_of_object);
5918 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5919 // Index is equal to negated out of object property index plus 1.
5920 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
5921 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5922 __ ldr(result, FieldMemOperand(scratch,
5923 FixedArray::kHeaderSize - kPointerSize));
5930 } } // namespace v8::internal