1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "arm/lithium-codegen-arm.h"
31 #include "arm/lithium-gap-resolver-arm.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 #include "hydrogen-osr.h"
40 class SafepointGenerator V8_FINAL : public CallWrapper {
42 SafepointGenerator(LCodeGen* codegen,
43 LPointerMap* pointers,
44 Safepoint::DeoptMode mode)
48 virtual ~SafepointGenerator() {}
50 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
52 virtual void AfterCall() const V8_OVERRIDE {
53 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 LPointerMap* pointers_;
59 Safepoint::DeoptMode deopt_mode_;
65 bool LCodeGen::GenerateCode() {
66 LPhase phase("Z_Code generation", chunk());
70 // Open a frame scope to indicate that there is a frame on the stack. The
71 // NONE indicates that the scope shouldn't actually generate code to set up
72 // the frame (that is done in GeneratePrologue).
73 FrameScope frame_scope(masm_, StackFrame::NONE);
75 return GeneratePrologue() &&
77 GenerateDeferredCode() &&
78 GenerateDeoptJumpTable() &&
79 GenerateSafepointTable();
83 void LCodeGen::FinishCode(Handle<Code> code) {
85 code->set_stack_slots(GetStackSlotCount());
86 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
87 RegisterDependentCodeForEmbeddedMaps(code);
88 PopulateDeoptimizationData(code);
89 info()->CommitDependencies(code);
93 void LCodeGen::Abort(BailoutReason reason) {
94 info()->set_bailout_reason(reason);
99 void LCodeGen::SaveCallerDoubles() {
100 ASSERT(info()->saves_caller_doubles());
101 ASSERT(NeedsEagerFrame());
102 Comment(";;; Save clobbered callee double registers");
104 BitVector* doubles = chunk()->allocated_double_registers();
105 BitVector::Iterator save_iterator(doubles);
106 while (!save_iterator.Done()) {
107 __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
108 MemOperand(sp, count * kDoubleSize));
109 save_iterator.Advance();
115 void LCodeGen::RestoreCallerDoubles() {
116 ASSERT(info()->saves_caller_doubles());
117 ASSERT(NeedsEagerFrame());
118 Comment(";;; Restore clobbered callee double registers");
119 BitVector* doubles = chunk()->allocated_double_registers();
120 BitVector::Iterator save_iterator(doubles);
122 while (!save_iterator.Done()) {
123 __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
124 MemOperand(sp, count * kDoubleSize));
125 save_iterator.Advance();
131 bool LCodeGen::GeneratePrologue() {
132 ASSERT(is_generating());
134 if (info()->IsOptimizing()) {
135 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
138 if (strlen(FLAG_stop_at) > 0 &&
139 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
144 // r1: Callee's JS function.
145 // cp: Callee's context.
146 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
147 // fp: Caller's frame pointer.
150 // Classic mode functions and builtins need to replace the receiver with the
151 // global proxy when called as functions (without an explicit receiver
153 if (info_->this_has_uses() &&
154 info_->is_classic_mode() &&
155 !info_->is_native()) {
157 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
158 __ ldr(r2, MemOperand(sp, receiver_offset));
159 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
162 __ ldr(r2, GlobalObjectOperand());
163 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
165 __ str(r2, MemOperand(sp, receiver_offset));
171 info()->set_prologue_offset(masm_->pc_offset());
172 if (NeedsEagerFrame()) {
173 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
174 frame_is_built_ = true;
175 info_->AddNoFrameRange(0, masm_->pc_offset());
176 __ LoadConstantPoolPointerRegister();
179 // Reserve space for the stack slots needed by the code.
180 int slots = GetStackSlotCount();
182 if (FLAG_debug_code) {
183 __ sub(sp, sp, Operand(slots * kPointerSize));
186 __ add(r0, sp, Operand(slots * kPointerSize));
187 __ mov(r1, Operand(kSlotsZapValue));
190 __ sub(r0, r0, Operand(kPointerSize));
191 __ str(r1, MemOperand(r0, 2 * kPointerSize));
197 __ sub(sp, sp, Operand(slots * kPointerSize));
201 if (info()->saves_caller_doubles()) {
205 // Possibly allocate a local context.
206 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
207 if (heap_slots > 0) {
208 Comment(";;; Allocate local context");
209 // Argument to NewContext is the function, which is in r1.
210 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
211 FastNewContextStub stub(heap_slots);
215 __ CallRuntime(Runtime::kNewFunctionContext, 1);
217 RecordSafepoint(Safepoint::kNoLazyDeopt);
218 // Context is returned in both r0 and cp. It replaces the context
219 // passed to us. It's saved in the stack and kept live in cp.
221 __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
222 // Copy any necessary parameters into the context.
223 int num_parameters = scope()->num_parameters();
224 for (int i = 0; i < num_parameters; i++) {
225 Variable* var = scope()->parameter(i);
226 if (var->IsContextSlot()) {
227 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
228 (num_parameters - 1 - i) * kPointerSize;
229 // Load parameter from stack.
230 __ ldr(r0, MemOperand(fp, parameter_offset));
231 // Store it in the context.
232 MemOperand target = ContextOperand(cp, var->index());
234 // Update the write barrier. This clobbers r3 and r0.
235 __ RecordWriteContextSlot(
240 GetLinkRegisterState(),
244 Comment(";;; End allocate local context");
248 if (FLAG_trace && info()->IsOptimizing()) {
249 // We have not executed any compiled code yet, so cp still holds the
251 __ CallRuntime(Runtime::kTraceEnter, 0);
253 return !is_aborted();
257 void LCodeGen::GenerateOsrPrologue() {
258 // Generate the OSR entry prologue at the first unknown OSR value, or if there
259 // are none, at the OSR entrypoint instruction.
260 if (osr_pc_offset_ >= 0) return;
262 osr_pc_offset_ = masm()->pc_offset();
264 // Adjust the frame size, subsuming the unoptimized frame into the
266 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
268 __ sub(sp, sp, Operand(slots * kPointerSize));
272 bool LCodeGen::GenerateDeferredCode() {
273 ASSERT(is_generating());
274 if (deferred_.length() > 0) {
275 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
276 LDeferredCode* code = deferred_[i];
279 instructions_->at(code->instruction_index())->hydrogen_value();
280 RecordAndWritePosition(value->position());
282 Comment(";;; <@%d,#%d> "
283 "-------------------- Deferred %s --------------------",
284 code->instruction_index(),
285 code->instr()->hydrogen_value()->id(),
286 code->instr()->Mnemonic());
287 __ bind(code->entry());
288 if (NeedsDeferredFrame()) {
289 Comment(";;; Build frame");
290 ASSERT(!frame_is_built_);
291 ASSERT(info()->IsStub());
292 frame_is_built_ = true;
294 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
296 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
297 Comment(";;; Deferred code");
300 if (NeedsDeferredFrame()) {
301 Comment(";;; Destroy frame");
302 ASSERT(frame_is_built_);
305 frame_is_built_ = false;
307 __ jmp(code->exit());
311 // Force constant pool emission at the end of the deferred code to make
312 // sure that no constant pools are emitted after.
313 masm()->CheckConstPool(true, false);
315 return !is_aborted();
319 bool LCodeGen::GenerateDeoptJumpTable() {
320 // Check that the jump table is accessible from everywhere in the function
321 // code, i.e. that offsets to the table can be encoded in the 24bit signed
322 // immediate of a branch instruction.
323 // To simplify we consider the code size from the first instruction to the
324 // end of the jump table. We also don't consider the pc load delta.
325 // Each entry in the jump table generates one instruction and inlines one
326 // 32bit data after it.
327 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
328 deopt_jump_table_.length() * 7)) {
329 Abort(kGeneratedCodeIsTooLarge);
332 if (deopt_jump_table_.length() > 0) {
333 Comment(";;; -------------------- Jump table --------------------");
336 __ bind(&table_start);
338 for (int i = 0; i < deopt_jump_table_.length(); i++) {
339 __ bind(&deopt_jump_table_[i].label);
340 Address entry = deopt_jump_table_[i].address;
341 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
342 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
343 if (id == Deoptimizer::kNotDeoptimizationEntry) {
344 Comment(";;; jump table entry %d.", i);
346 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
348 if (deopt_jump_table_[i].needs_frame) {
349 ASSERT(!info()->saves_caller_doubles());
350 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
351 if (needs_frame.is_bound()) {
354 __ bind(&needs_frame);
356 // This variant of deopt can only be used with stubs. Since we don't
357 // have a function pointer to install in the stack frame that we're
358 // building, install a special marker there instead.
359 ASSERT(info()->IsStub());
360 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
362 __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
363 __ mov(lr, Operand(pc), LeaveCC, al);
367 if (info()->saves_caller_doubles()) {
368 ASSERT(info()->IsStub());
369 RestoreCallerDoubles();
371 __ mov(lr, Operand(pc), LeaveCC, al);
372 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
374 masm()->CheckConstPool(false, false);
377 // Force constant pool emission at the end of the deopt jump table to make
378 // sure that no constant pools are emitted after.
379 masm()->CheckConstPool(true, false);
381 // The deoptimization jump table is the last part of the instruction
382 // sequence. Mark the generated code as done unless we bailed out.
383 if (!is_aborted()) status_ = DONE;
384 return !is_aborted();
388 bool LCodeGen::GenerateSafepointTable() {
390 safepoints_.Emit(masm(), GetStackSlotCount());
391 return !is_aborted();
395 Register LCodeGen::ToRegister(int index) const {
396 return Register::FromAllocationIndex(index);
400 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
401 return DwVfpRegister::FromAllocationIndex(index);
405 Register LCodeGen::ToRegister(LOperand* op) const {
406 ASSERT(op->IsRegister());
407 return ToRegister(op->index());
411 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
412 if (op->IsRegister()) {
413 return ToRegister(op->index());
414 } else if (op->IsConstantOperand()) {
415 LConstantOperand* const_op = LConstantOperand::cast(op);
416 HConstant* constant = chunk_->LookupConstant(const_op);
417 Handle<Object> literal = constant->handle(isolate());
418 Representation r = chunk_->LookupLiteralRepresentation(const_op);
419 if (r.IsInteger32()) {
420 ASSERT(literal->IsNumber());
421 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
422 } else if (r.IsDouble()) {
423 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
425 ASSERT(r.IsSmiOrTagged());
426 __ Move(scratch, literal);
429 } else if (op->IsStackSlot() || op->IsArgument()) {
430 __ ldr(scratch, ToMemOperand(op));
438 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
439 ASSERT(op->IsDoubleRegister());
440 return ToDoubleRegister(op->index());
444 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
445 SwVfpRegister flt_scratch,
446 DwVfpRegister dbl_scratch) {
447 if (op->IsDoubleRegister()) {
448 return ToDoubleRegister(op->index());
449 } else if (op->IsConstantOperand()) {
450 LConstantOperand* const_op = LConstantOperand::cast(op);
451 HConstant* constant = chunk_->LookupConstant(const_op);
452 Handle<Object> literal = constant->handle(isolate());
453 Representation r = chunk_->LookupLiteralRepresentation(const_op);
454 if (r.IsInteger32()) {
455 ASSERT(literal->IsNumber());
456 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
457 __ vmov(flt_scratch, ip);
458 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
460 } else if (r.IsDouble()) {
461 Abort(kUnsupportedDoubleImmediate);
462 } else if (r.IsTagged()) {
463 Abort(kUnsupportedTaggedImmediate);
465 } else if (op->IsStackSlot() || op->IsArgument()) {
466 // TODO(regis): Why is vldr not taking a MemOperand?
467 // __ vldr(dbl_scratch, ToMemOperand(op));
468 MemOperand mem_op = ToMemOperand(op);
469 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
477 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
478 HConstant* constant = chunk_->LookupConstant(op);
479 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
480 return constant->handle(isolate());
484 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
485 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
489 bool LCodeGen::IsSmi(LConstantOperand* op) const {
490 return chunk_->LookupLiteralRepresentation(op).IsSmi();
494 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
495 return ToRepresentation(op, Representation::Integer32());
499 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
500 const Representation& r) const {
501 HConstant* constant = chunk_->LookupConstant(op);
502 int32_t value = constant->Integer32Value();
503 if (r.IsInteger32()) return value;
504 ASSERT(r.IsSmiOrTagged());
505 return reinterpret_cast<int32_t>(Smi::FromInt(value));
509 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
510 HConstant* constant = chunk_->LookupConstant(op);
511 return Smi::FromInt(constant->Integer32Value());
515 double LCodeGen::ToDouble(LConstantOperand* op) const {
516 HConstant* constant = chunk_->LookupConstant(op);
517 ASSERT(constant->HasDoubleValue());
518 return constant->DoubleValue();
522 Operand LCodeGen::ToOperand(LOperand* op) {
523 if (op->IsConstantOperand()) {
524 LConstantOperand* const_op = LConstantOperand::cast(op);
525 HConstant* constant = chunk()->LookupConstant(const_op);
526 Representation r = chunk_->LookupLiteralRepresentation(const_op);
528 ASSERT(constant->HasSmiValue());
529 return Operand(Smi::FromInt(constant->Integer32Value()));
530 } else if (r.IsInteger32()) {
531 ASSERT(constant->HasInteger32Value());
532 return Operand(constant->Integer32Value());
533 } else if (r.IsDouble()) {
534 Abort(kToOperandUnsupportedDoubleImmediate);
536 ASSERT(r.IsTagged());
537 return Operand(constant->handle(isolate()));
538 } else if (op->IsRegister()) {
539 return Operand(ToRegister(op));
540 } else if (op->IsDoubleRegister()) {
541 Abort(kToOperandIsDoubleRegisterUnimplemented);
542 return Operand::Zero();
544 // Stack slots not implemented, use ToMemOperand instead.
546 return Operand::Zero();
550 static int ArgumentsOffsetWithoutFrame(int index) {
552 return -(index + 1) * kPointerSize;
556 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
557 ASSERT(!op->IsRegister());
558 ASSERT(!op->IsDoubleRegister());
559 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
560 if (NeedsEagerFrame()) {
561 return MemOperand(fp, StackSlotOffset(op->index()));
563 // Retrieve parameter without eager stack-frame relative to the
565 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
570 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
571 ASSERT(op->IsDoubleStackSlot());
572 if (NeedsEagerFrame()) {
573 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
575 // Retrieve parameter without eager stack-frame relative to the
578 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
583 void LCodeGen::WriteTranslation(LEnvironment* environment,
584 Translation* translation) {
585 if (environment == NULL) return;
587 // The translation includes one command per value in the environment.
588 int translation_size = environment->translation_size();
589 // The output frame height does not include the parameters.
590 int height = translation_size - environment->parameter_count();
592 WriteTranslation(environment->outer(), translation);
593 bool has_closure_id = !info()->closure().is_null() &&
594 !info()->closure().is_identical_to(environment->closure());
595 int closure_id = has_closure_id
596 ? DefineDeoptimizationLiteral(environment->closure())
597 : Translation::kSelfLiteralId;
599 switch (environment->frame_type()) {
601 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
604 translation->BeginConstructStubFrame(closure_id, translation_size);
607 ASSERT(translation_size == 1);
609 translation->BeginGetterStubFrame(closure_id);
612 ASSERT(translation_size == 2);
614 translation->BeginSetterStubFrame(closure_id);
617 translation->BeginCompiledStubFrame();
619 case ARGUMENTS_ADAPTOR:
620 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
624 int object_index = 0;
625 int dematerialized_index = 0;
626 for (int i = 0; i < translation_size; ++i) {
627 LOperand* value = environment->values()->at(i);
628 AddToTranslation(environment,
631 environment->HasTaggedValueAt(i),
632 environment->HasUint32ValueAt(i),
634 &dematerialized_index);
639 void LCodeGen::AddToTranslation(LEnvironment* environment,
640 Translation* translation,
644 int* object_index_pointer,
645 int* dematerialized_index_pointer) {
646 if (op == LEnvironment::materialization_marker()) {
647 int object_index = (*object_index_pointer)++;
648 if (environment->ObjectIsDuplicateAt(object_index)) {
649 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
650 translation->DuplicateObject(dupe_of);
653 int object_length = environment->ObjectLengthAt(object_index);
654 if (environment->ObjectIsArgumentsAt(object_index)) {
655 translation->BeginArgumentsObject(object_length);
657 translation->BeginCapturedObject(object_length);
659 int dematerialized_index = *dematerialized_index_pointer;
660 int env_offset = environment->translation_size() + dematerialized_index;
661 *dematerialized_index_pointer += object_length;
662 for (int i = 0; i < object_length; ++i) {
663 LOperand* value = environment->values()->at(env_offset + i);
664 AddToTranslation(environment,
667 environment->HasTaggedValueAt(env_offset + i),
668 environment->HasUint32ValueAt(env_offset + i),
669 object_index_pointer,
670 dematerialized_index_pointer);
675 if (op->IsStackSlot()) {
677 translation->StoreStackSlot(op->index());
678 } else if (is_uint32) {
679 translation->StoreUint32StackSlot(op->index());
681 translation->StoreInt32StackSlot(op->index());
683 } else if (op->IsDoubleStackSlot()) {
684 translation->StoreDoubleStackSlot(op->index());
685 } else if (op->IsArgument()) {
687 int src_index = GetStackSlotCount() + op->index();
688 translation->StoreStackSlot(src_index);
689 } else if (op->IsRegister()) {
690 Register reg = ToRegister(op);
692 translation->StoreRegister(reg);
693 } else if (is_uint32) {
694 translation->StoreUint32Register(reg);
696 translation->StoreInt32Register(reg);
698 } else if (op->IsDoubleRegister()) {
699 DoubleRegister reg = ToDoubleRegister(op);
700 translation->StoreDoubleRegister(reg);
701 } else if (op->IsConstantOperand()) {
702 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
703 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
704 translation->StoreLiteral(src_index);
711 void LCodeGen::CallCode(Handle<Code> code,
712 RelocInfo::Mode mode,
714 TargetAddressStorageMode storage_mode) {
715 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
719 void LCodeGen::CallCodeGeneric(Handle<Code> code,
720 RelocInfo::Mode mode,
722 SafepointMode safepoint_mode,
723 TargetAddressStorageMode storage_mode) {
724 ASSERT(instr != NULL);
725 // Block literal pool emission to ensure nop indicating no inlined smi code
726 // is in the correct position.
727 Assembler::BlockConstPoolScope block_const_pool(masm());
728 __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
729 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
731 // Signal that we don't inline smi code before these stubs in the
732 // optimizing code generator.
733 if (code->kind() == Code::BINARY_OP_IC ||
734 code->kind() == Code::COMPARE_IC) {
740 void LCodeGen::CallRuntime(const Runtime::Function* function,
743 SaveFPRegsMode save_doubles) {
744 ASSERT(instr != NULL);
746 __ CallRuntime(function, num_arguments, save_doubles);
748 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
752 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
753 if (context->IsRegister()) {
754 __ Move(cp, ToRegister(context));
755 } else if (context->IsStackSlot()) {
756 __ ldr(cp, ToMemOperand(context));
757 } else if (context->IsConstantOperand()) {
758 HConstant* constant =
759 chunk_->LookupConstant(LConstantOperand::cast(context));
760 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
767 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
771 LoadContextFromDeferred(context);
772 __ CallRuntimeSaveDoubles(id);
773 RecordSafepointWithRegisters(
774 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
778 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
779 Safepoint::DeoptMode mode) {
780 if (!environment->HasBeenRegistered()) {
781 // Physical stack frame layout:
782 // -x ............. -4 0 ..................................... y
783 // [incoming arguments] [spill slots] [pushed outgoing arguments]
785 // Layout of the environment:
786 // 0 ..................................................... size-1
787 // [parameters] [locals] [expression stack including arguments]
789 // Layout of the translation:
790 // 0 ........................................................ size - 1 + 4
791 // [expression stack including arguments] [locals] [4 words] [parameters]
792 // |>------------ translation_size ------------<|
795 int jsframe_count = 0;
796 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
798 if (e->frame_type() == JS_FUNCTION) {
802 Translation translation(&translations_, frame_count, jsframe_count, zone());
803 WriteTranslation(environment, &translation);
804 int deoptimization_index = deoptimizations_.length();
805 int pc_offset = masm()->pc_offset();
806 environment->Register(deoptimization_index,
808 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
809 deoptimizations_.Add(environment, zone());
814 void LCodeGen::DeoptimizeIf(Condition condition,
815 LEnvironment* environment,
816 Deoptimizer::BailoutType bailout_type) {
817 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
818 ASSERT(environment->HasBeenRegistered());
819 int id = environment->deoptimization_index();
820 ASSERT(info()->IsOptimizing() || info()->IsStub());
822 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
824 Abort(kBailoutWasNotPrepared);
828 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
829 Register scratch = scratch0();
830 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
832 // Store the condition on the stack if necessary
833 if (condition != al) {
834 __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
835 __ mov(scratch, Operand(1), LeaveCC, condition);
840 __ mov(scratch, Operand(count));
841 __ ldr(r1, MemOperand(scratch));
842 __ sub(r1, r1, Operand(1), SetCC);
843 __ movw(r1, FLAG_deopt_every_n_times, eq);
844 __ str(r1, MemOperand(scratch));
847 if (condition != al) {
848 // Clean up the stack before the deoptimizer call
852 __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
854 // 'Restore' the condition in a slightly hacky way. (It would be better
855 // to use 'msr' and 'mrs' instructions here, but they are not supported by
856 // our ARM simulator).
857 if (condition != al) {
859 __ cmp(scratch, Operand::Zero());
863 if (info()->ShouldTrapOnDeopt()) {
864 __ stop("trap_on_deopt", condition);
867 ASSERT(info()->IsStub() || frame_is_built_);
868 // Go through jump table if we need to handle condition, build frame, or
869 // restore caller doubles.
870 if (condition == al && frame_is_built_ &&
871 !info()->saves_caller_doubles()) {
872 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
874 // We often have several deopts to the same entry, reuse the last
875 // jump entry if this is the case.
876 if (deopt_jump_table_.is_empty() ||
877 (deopt_jump_table_.last().address != entry) ||
878 (deopt_jump_table_.last().bailout_type != bailout_type) ||
879 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
880 Deoptimizer::JumpTableEntry table_entry(entry,
883 deopt_jump_table_.Add(table_entry, zone());
885 __ b(condition, &deopt_jump_table_.last().label);
890 void LCodeGen::DeoptimizeIf(Condition condition,
891 LEnvironment* environment) {
892 Deoptimizer::BailoutType bailout_type = info()->IsStub()
894 : Deoptimizer::EAGER;
895 DeoptimizeIf(condition, environment, bailout_type);
899 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
900 int length = deoptimizations_.length();
901 if (length == 0) return;
902 Handle<DeoptimizationInputData> data =
903 factory()->NewDeoptimizationInputData(length, TENURED);
905 Handle<ByteArray> translations =
906 translations_.CreateByteArray(isolate()->factory());
907 data->SetTranslationByteArray(*translations);
908 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
910 Handle<FixedArray> literals =
911 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
912 { AllowDeferredHandleDereference copy_handles;
913 for (int i = 0; i < deoptimization_literals_.length(); i++) {
914 literals->set(i, *deoptimization_literals_[i]);
916 data->SetLiteralArray(*literals);
919 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
920 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
922 // Populate the deoptimization entries.
923 for (int i = 0; i < length; i++) {
924 LEnvironment* env = deoptimizations_[i];
925 data->SetAstId(i, env->ast_id());
926 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
927 data->SetArgumentsStackHeight(i,
928 Smi::FromInt(env->arguments_stack_height()));
929 data->SetPc(i, Smi::FromInt(env->pc_offset()));
931 code->set_deoptimization_data(*data);
935 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
936 int result = deoptimization_literals_.length();
937 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
938 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
940 deoptimization_literals_.Add(literal, zone());
945 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
946 ASSERT(deoptimization_literals_.length() == 0);
948 const ZoneList<Handle<JSFunction> >* inlined_closures =
949 chunk()->inlined_closures();
951 for (int i = 0, length = inlined_closures->length();
954 DefineDeoptimizationLiteral(inlined_closures->at(i));
957 inlined_function_count_ = deoptimization_literals_.length();
961 void LCodeGen::RecordSafepointWithLazyDeopt(
962 LInstruction* instr, SafepointMode safepoint_mode) {
963 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
964 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
966 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
967 RecordSafepointWithRegisters(
968 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
973 void LCodeGen::RecordSafepoint(
974 LPointerMap* pointers,
975 Safepoint::Kind kind,
977 Safepoint::DeoptMode deopt_mode) {
978 ASSERT(expected_safepoint_kind_ == kind);
980 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
981 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
982 kind, arguments, deopt_mode);
983 for (int i = 0; i < operands->length(); i++) {
984 LOperand* pointer = operands->at(i);
985 if (pointer->IsStackSlot()) {
986 safepoint.DefinePointerSlot(pointer->index(), zone());
987 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
988 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
991 if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) {
992 // Register pp always contains a pointer to the constant pool.
993 safepoint.DefinePointerRegister(pp, zone());
998 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
999 Safepoint::DeoptMode deopt_mode) {
1000 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
1004 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
1005 LPointerMap empty_pointers(zone());
1006 RecordSafepoint(&empty_pointers, deopt_mode);
1010 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1012 Safepoint::DeoptMode deopt_mode) {
1014 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1018 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
1019 LPointerMap* pointers,
1021 Safepoint::DeoptMode deopt_mode) {
1023 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
1027 void LCodeGen::RecordAndWritePosition(int position) {
1028 if (position == RelocInfo::kNoPosition) return;
1029 masm()->positions_recorder()->RecordPosition(position);
1030 masm()->positions_recorder()->WriteRecordedPositions();
1034 static const char* LabelType(LLabel* label) {
1035 if (label->is_loop_header()) return " (loop header)";
1036 if (label->is_osr_entry()) return " (OSR entry)";
1041 void LCodeGen::DoLabel(LLabel* label) {
1042 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1043 current_instruction_,
1044 label->hydrogen_value()->id(),
1047 __ bind(label->label());
1048 current_block_ = label->block_id();
1053 void LCodeGen::DoParallelMove(LParallelMove* move) {
1054 resolver_.Resolve(move);
1058 void LCodeGen::DoGap(LGap* gap) {
1059 for (int i = LGap::FIRST_INNER_POSITION;
1060 i <= LGap::LAST_INNER_POSITION;
1062 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1063 LParallelMove* move = gap->GetParallelMove(inner_pos);
1064 if (move != NULL) DoParallelMove(move);
1069 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1074 void LCodeGen::DoParameter(LParameter* instr) {
1079 void LCodeGen::DoCallStub(LCallStub* instr) {
1080 ASSERT(ToRegister(instr->context()).is(cp));
1081 ASSERT(ToRegister(instr->result()).is(r0));
1082 switch (instr->hydrogen()->major_key()) {
1083 case CodeStub::RegExpExec: {
1084 RegExpExecStub stub;
1085 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1088 case CodeStub::SubString: {
1090 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1093 case CodeStub::StringCompare: {
1094 StringCompareStub stub;
1095 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1104 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1105 GenerateOsrPrologue();
1109 void LCodeGen::DoModI(LModI* instr) {
1110 HMod* hmod = instr->hydrogen();
1111 HValue* left = hmod->left();
1112 HValue* right = hmod->right();
1113 if (hmod->RightIsPowerOf2()) {
1114 // TODO(svenpanne) We should really do the strength reduction on the
1116 Register left_reg = ToRegister(instr->left());
1117 Register result_reg = ToRegister(instr->result());
1119 // Note: The code below even works when right contains kMinInt.
1120 int32_t divisor = Abs(right->GetInteger32Constant());
1122 Label left_is_not_negative, done;
1123 if (left->CanBeNegative()) {
1124 __ cmp(left_reg, Operand::Zero());
1125 __ b(pl, &left_is_not_negative);
1126 __ rsb(result_reg, left_reg, Operand::Zero());
1127 __ and_(result_reg, result_reg, Operand(divisor - 1));
1128 __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
1129 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1130 DeoptimizeIf(eq, instr->environment());
1135 __ bind(&left_is_not_negative);
1136 __ and_(result_reg, left_reg, Operand(divisor - 1));
1138 } else if (CpuFeatures::IsSupported(SUDIV)) {
1139 CpuFeatureScope scope(masm(), SUDIV);
1141 Register left_reg = ToRegister(instr->left());
1142 Register right_reg = ToRegister(instr->right());
1143 Register result_reg = ToRegister(instr->result());
1146 // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1147 // case because we can't return a NaN.
1148 if (right->CanBeZero()) {
1149 __ cmp(right_reg, Operand::Zero());
1150 DeoptimizeIf(eq, instr->environment());
1153 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1154 // want. We have to deopt if we care about -0, because we can't return that.
1155 if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
1156 Label no_overflow_possible;
1157 __ cmp(left_reg, Operand(kMinInt));
1158 __ b(ne, &no_overflow_possible);
1159 __ cmp(right_reg, Operand(-1));
1160 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1161 DeoptimizeIf(eq, instr->environment());
1163 __ b(ne, &no_overflow_possible);
1164 __ mov(result_reg, Operand::Zero());
1167 __ bind(&no_overflow_possible);
1170 // For 'r3 = r1 % r2' we can have the following ARM code:
1172 // mls r3, r3, r2, r1
1174 __ sdiv(result_reg, left_reg, right_reg);
1175 __ mls(result_reg, result_reg, right_reg, left_reg);
1177 // If we care about -0, test if the dividend is <0 and the result is 0.
1178 if (left->CanBeNegative() &&
1179 hmod->CanBeZero() &&
1180 hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1181 __ cmp(result_reg, Operand::Zero());
1183 __ cmp(left_reg, Operand::Zero());
1184 DeoptimizeIf(lt, instr->environment());
1189 // General case, without any SDIV support.
1190 Register left_reg = ToRegister(instr->left());
1191 Register right_reg = ToRegister(instr->right());
1192 Register result_reg = ToRegister(instr->result());
1193 Register scratch = scratch0();
1194 ASSERT(!scratch.is(left_reg));
1195 ASSERT(!scratch.is(right_reg));
1196 ASSERT(!scratch.is(result_reg));
1197 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1198 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1199 ASSERT(!divisor.is(dividend));
1200 LowDwVfpRegister quotient = double_scratch0();
1201 ASSERT(!quotient.is(dividend));
1202 ASSERT(!quotient.is(divisor));
1205 // Check for x % 0, we have to deopt in this case because we can't return a
1207 if (right->CanBeZero()) {
1208 __ cmp(right_reg, Operand::Zero());
1209 DeoptimizeIf(eq, instr->environment());
1212 __ Move(result_reg, left_reg);
1213 // Load the arguments in VFP registers. The divisor value is preloaded
1214 // before. Be careful that 'right_reg' is only live on entry.
1215 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1216 __ vmov(double_scratch0().low(), left_reg);
1217 __ vcvt_f64_s32(dividend, double_scratch0().low());
1218 __ vmov(double_scratch0().low(), right_reg);
1219 __ vcvt_f64_s32(divisor, double_scratch0().low());
1221 // We do not care about the sign of the divisor. Note that we still handle
1222 // the kMinInt % -1 case correctly, though.
1223 __ vabs(divisor, divisor);
1224 // Compute the quotient and round it to a 32bit integer.
1225 __ vdiv(quotient, dividend, divisor);
1226 __ vcvt_s32_f64(quotient.low(), quotient);
1227 __ vcvt_f64_s32(quotient, quotient.low());
1229 // Compute the remainder in result.
1230 __ vmul(double_scratch0(), divisor, quotient);
1231 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1232 __ vmov(scratch, double_scratch0().low());
1233 __ sub(result_reg, left_reg, scratch, SetCC);
1235 // If we care about -0, test if the dividend is <0 and the result is 0.
1236 if (left->CanBeNegative() &&
1237 hmod->CanBeZero() &&
1238 hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1240 __ cmp(left_reg, Operand::Zero());
1241 DeoptimizeIf(mi, instr->environment());
1248 void LCodeGen::EmitSignedIntegerDivisionByConstant(
1254 LEnvironment* environment) {
1255 ASSERT(!AreAliased(dividend, scratch, ip));
1256 ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
1258 uint32_t divisor_abs = abs(divisor);
1260 int32_t power_of_2_factor =
1261 CompilerIntrinsics::CountTrailingZeros(divisor_abs);
1263 switch (divisor_abs) {
1265 DeoptimizeIf(al, environment);
1270 __ Move(result, dividend);
1272 __ rsb(result, dividend, Operand::Zero(), SetCC);
1273 DeoptimizeIf(vs, environment);
1275 // Compute the remainder.
1276 __ mov(remainder, Operand::Zero());
1280 if (IsPowerOf2(divisor_abs)) {
1281 // Branch and condition free code for integer division by a power
1283 int32_t power = WhichPowerOf2(divisor_abs);
1285 __ mov(scratch, Operand(dividend, ASR, power - 1));
1287 __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
1288 __ mov(result, Operand(scratch, ASR, power));
1289 // Negate if necessary.
1290 // We don't need to check for overflow because the case '-1' is
1291 // handled separately.
1293 ASSERT(divisor != -1);
1294 __ rsb(result, result, Operand::Zero());
1296 // Compute the remainder.
1298 __ sub(remainder, dividend, Operand(result, LSL, power));
1300 __ add(remainder, dividend, Operand(result, LSL, power));
1304 // Use magic numbers for a few specific divisors.
1305 // Details and proofs can be found in:
1306 // - Hacker's Delight, Henry S. Warren, Jr.
1307 // - The PowerPC Compiler Writer’s Guide
1308 // and probably many others.
1311 // <divisor with magic numbers> * <power of 2>
1313 // <divisor with magic numbers> * <other divisor with magic numbers>
1314 DivMagicNumbers magic_numbers =
1315 DivMagicNumberFor(divisor_abs >> power_of_2_factor);
1316 // Branch and condition free code for integer division by a power
1318 const int32_t M = magic_numbers.M;
1319 const int32_t s = magic_numbers.s + power_of_2_factor;
1321 __ mov(ip, Operand(M));
1322 __ smull(ip, scratch, dividend, ip);
1324 __ add(scratch, scratch, Operand(dividend));
1327 __ mov(scratch, Operand(scratch, ASR, s));
1329 __ add(result, scratch, Operand(dividend, LSR, 31));
1330 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1331 // Compute the remainder.
1332 __ mov(ip, Operand(divisor));
1333 // This sequence could be replaced with 'mls' when
1334 // it gets implemented.
1335 __ mul(scratch, result, ip);
1336 __ sub(remainder, dividend, scratch);
1342 void LCodeGen::DoDivI(LDivI* instr) {
1343 if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) {
1344 const Register dividend = ToRegister(instr->left());
1345 const Register result = ToRegister(instr->result());
1346 int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
1347 int32_t test_value = 0;
1351 test_value = divisor - 1;
1352 power = WhichPowerOf2(divisor);
1354 // Check for (0 / -x) that will produce negative zero.
1355 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1356 __ cmp(dividend, Operand::Zero());
1357 DeoptimizeIf(eq, instr->environment());
1359 // Check for (kMinInt / -1).
1360 if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1361 __ cmp(dividend, Operand(kMinInt));
1362 DeoptimizeIf(eq, instr->environment());
1364 test_value = - divisor - 1;
1365 power = WhichPowerOf2(-divisor);
1368 if (test_value != 0) {
1369 if (instr->hydrogen()->CheckFlag(
1370 HInstruction::kAllUsesTruncatingToInt32)) {
1371 __ sub(result, dividend, Operand::Zero(), SetCC);
1372 __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
1373 __ mov(result, Operand(result, ASR, power));
1374 if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
1375 if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
1376 return; // Don't fall through to "__ rsb" below.
1378 // Deoptimize if remainder is not 0.
1379 __ tst(dividend, Operand(test_value));
1380 DeoptimizeIf(ne, instr->environment());
1381 __ mov(result, Operand(dividend, ASR, power));
1382 if (divisor < 0) __ rsb(result, result, Operand(0));
1386 __ rsb(result, dividend, Operand(0));
1388 __ Move(result, dividend);
1395 const Register left = ToRegister(instr->left());
1396 const Register right = ToRegister(instr->right());
1397 const Register result = ToRegister(instr->result());
1400 if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1401 __ cmp(right, Operand::Zero());
1402 DeoptimizeIf(eq, instr->environment());
1405 // Check for (0 / -x) that will produce negative zero.
1406 if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1408 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1409 // Do the test only if it hadn't be done above.
1410 __ cmp(right, Operand::Zero());
1412 __ b(pl, &positive);
1413 __ cmp(left, Operand::Zero());
1414 DeoptimizeIf(eq, instr->environment());
1418 // Check for (kMinInt / -1).
1419 if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow) &&
1420 (!CpuFeatures::IsSupported(SUDIV) ||
1421 !instr->hydrogen_value()->CheckFlag(
1422 HValue::kAllUsesTruncatingToInt32))) {
1423 // We don't need to check for overflow when truncating with sdiv
1424 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1425 __ cmp(left, Operand(kMinInt));
1426 __ cmp(right, Operand(-1), eq);
1427 DeoptimizeIf(eq, instr->environment());
1430 if (CpuFeatures::IsSupported(SUDIV)) {
1431 CpuFeatureScope scope(masm(), SUDIV);
1432 __ sdiv(result, left, right);
1434 if (!instr->hydrogen_value()->CheckFlag(
1435 HInstruction::kAllUsesTruncatingToInt32)) {
1436 // Compute remainder and deopt if it's not zero.
1437 const Register remainder = scratch0();
1438 __ mls(remainder, result, right, left);
1439 __ cmp(remainder, Operand::Zero());
1440 DeoptimizeIf(ne, instr->environment());
1443 const DoubleRegister vleft = ToDoubleRegister(instr->temp());
1444 const DoubleRegister vright = double_scratch0();
1445 __ vmov(double_scratch0().low(), left);
1446 __ vcvt_f64_s32(vleft, double_scratch0().low());
1447 __ vmov(double_scratch0().low(), right);
1448 __ vcvt_f64_s32(vright, double_scratch0().low());
1449 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1450 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1451 __ vmov(result, double_scratch0().low());
1453 if (!instr->hydrogen_value()->CheckFlag(
1454 HInstruction::kAllUsesTruncatingToInt32)) {
1455 // Deopt if exact conversion to integer was not possible.
1456 // Use vright as scratch register.
1457 __ vcvt_f64_s32(double_scratch0(), double_scratch0().low());
1458 __ VFPCompareAndSetFlags(vleft, double_scratch0());
1459 DeoptimizeIf(ne, instr->environment());
1465 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1466 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1467 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1468 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1470 // This is computed in-place.
1471 ASSERT(addend.is(ToDoubleRegister(instr->result())));
1473 __ vmla(addend, multiplier, multiplicand);
1477 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1478 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1479 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1480 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1482 // This is computed in-place.
1483 ASSERT(minuend.is(ToDoubleRegister(instr->result())));
1485 __ vmls(minuend, multiplier, multiplicand);
1489 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1490 const Register result = ToRegister(instr->result());
1491 const Register left = ToRegister(instr->left());
1492 const Register remainder = ToRegister(instr->temp());
1493 const Register scratch = scratch0();
1495 if (!CpuFeatures::IsSupported(SUDIV)) {
1496 // If the CPU doesn't support sdiv instruction, we only optimize when we
1497 // have magic numbers for the divisor. The standard integer division routine
1498 // is usually slower than transitionning to VFP.
1499 ASSERT(instr->right()->IsConstantOperand());
1500 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1501 ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
1503 __ cmp(left, Operand::Zero());
1504 DeoptimizeIf(eq, instr->environment());
1506 EmitSignedIntegerDivisionByConstant(result,
1511 instr->environment());
1512 // We performed a truncating division. Correct the result if necessary.
1513 __ cmp(remainder, Operand::Zero());
1514 __ teq(remainder, Operand(divisor), ne);
1515 __ sub(result, result, Operand(1), LeaveCC, mi);
1517 CpuFeatureScope scope(masm(), SUDIV);
1518 const Register right = ToRegister(instr->right());
1521 __ cmp(right, Operand::Zero());
1522 DeoptimizeIf(eq, instr->environment());
1524 // Check for (kMinInt / -1).
1525 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1526 __ cmp(left, Operand(kMinInt));
1527 __ cmp(right, Operand(-1), eq);
1528 DeoptimizeIf(eq, instr->environment());
1531 // Check for (0 / -x) that will produce negative zero.
1532 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1533 __ cmp(right, Operand::Zero());
1534 __ cmp(left, Operand::Zero(), mi);
1535 // "right" can't be null because the code would have already been
1536 // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
1537 // In this case we need to deoptimize to produce a -0.
1538 DeoptimizeIf(eq, instr->environment());
1542 __ sdiv(result, left, right);
1543 // If both operands have the same sign then we are done.
1544 __ eor(remainder, left, Operand(right), SetCC);
1547 // Check if the result needs to be corrected.
1548 __ mls(remainder, result, right, left);
1549 __ cmp(remainder, Operand::Zero());
1550 __ sub(result, result, Operand(1), LeaveCC, ne);
1557 void LCodeGen::DoMulI(LMulI* instr) {
1558 Register result = ToRegister(instr->result());
1559 // Note that result may alias left.
1560 Register left = ToRegister(instr->left());
1561 LOperand* right_op = instr->right();
1563 bool bailout_on_minus_zero =
1564 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1565 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1567 if (right_op->IsConstantOperand()) {
1568 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1570 if (bailout_on_minus_zero && (constant < 0)) {
1571 // The case of a null constant will be handled separately.
1572 // If constant is negative and left is null, the result should be -0.
1573 __ cmp(left, Operand::Zero());
1574 DeoptimizeIf(eq, instr->environment());
1580 __ rsb(result, left, Operand::Zero(), SetCC);
1581 DeoptimizeIf(vs, instr->environment());
1583 __ rsb(result, left, Operand::Zero());
1587 if (bailout_on_minus_zero) {
1588 // If left is strictly negative and the constant is null, the
1589 // result is -0. Deoptimize if required, otherwise return 0.
1590 __ cmp(left, Operand::Zero());
1591 DeoptimizeIf(mi, instr->environment());
1593 __ mov(result, Operand::Zero());
1596 __ Move(result, left);
1599 // Multiplying by powers of two and powers of two plus or minus
1600 // one can be done faster with shifted operands.
1601 // For other constants we emit standard code.
1602 int32_t mask = constant >> 31;
1603 uint32_t constant_abs = (constant + mask) ^ mask;
1605 if (IsPowerOf2(constant_abs)) {
1606 int32_t shift = WhichPowerOf2(constant_abs);
1607 __ mov(result, Operand(left, LSL, shift));
1608 // Correct the sign of the result is the constant is negative.
1609 if (constant < 0) __ rsb(result, result, Operand::Zero());
1610 } else if (IsPowerOf2(constant_abs - 1)) {
1611 int32_t shift = WhichPowerOf2(constant_abs - 1);
1612 __ add(result, left, Operand(left, LSL, shift));
1613 // Correct the sign of the result is the constant is negative.
1614 if (constant < 0) __ rsb(result, result, Operand::Zero());
1615 } else if (IsPowerOf2(constant_abs + 1)) {
1616 int32_t shift = WhichPowerOf2(constant_abs + 1);
1617 __ rsb(result, left, Operand(left, LSL, shift));
1618 // Correct the sign of the result is the constant is negative.
1619 if (constant < 0) __ rsb(result, result, Operand::Zero());
1621 // Generate standard code.
1622 __ mov(ip, Operand(constant));
1623 __ mul(result, left, ip);
1628 ASSERT(right_op->IsRegister());
1629 Register right = ToRegister(right_op);
1632 Register scratch = scratch0();
1633 // scratch:result = left * right.
1634 if (instr->hydrogen()->representation().IsSmi()) {
1635 __ SmiUntag(result, left);
1636 __ smull(result, scratch, result, right);
1638 __ smull(result, scratch, left, right);
1640 __ cmp(scratch, Operand(result, ASR, 31));
1641 DeoptimizeIf(ne, instr->environment());
1643 if (instr->hydrogen()->representation().IsSmi()) {
1644 __ SmiUntag(result, left);
1645 __ mul(result, result, right);
1647 __ mul(result, left, right);
1651 if (bailout_on_minus_zero) {
1653 __ teq(left, Operand(right));
1655 // Bail out if the result is minus zero.
1656 __ cmp(result, Operand::Zero());
1657 DeoptimizeIf(eq, instr->environment());
1664 void LCodeGen::DoBitI(LBitI* instr) {
1665 LOperand* left_op = instr->left();
1666 LOperand* right_op = instr->right();
1667 ASSERT(left_op->IsRegister());
1668 Register left = ToRegister(left_op);
1669 Register result = ToRegister(instr->result());
1670 Operand right(no_reg);
1672 if (right_op->IsStackSlot() || right_op->IsArgument()) {
1673 right = Operand(EmitLoadRegister(right_op, ip));
1675 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1676 right = ToOperand(right_op);
1679 switch (instr->op()) {
1680 case Token::BIT_AND:
1681 __ and_(result, left, right);
1684 __ orr(result, left, right);
1686 case Token::BIT_XOR:
1687 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1688 __ mvn(result, Operand(left));
1690 __ eor(result, left, right);
1700 void LCodeGen::DoShiftI(LShiftI* instr) {
1701 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1702 // result may alias either of them.
1703 LOperand* right_op = instr->right();
1704 Register left = ToRegister(instr->left());
1705 Register result = ToRegister(instr->result());
1706 Register scratch = scratch0();
1707 if (right_op->IsRegister()) {
1708 // Mask the right_op operand.
1709 __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1710 switch (instr->op()) {
1712 __ mov(result, Operand(left, ROR, scratch));
1715 __ mov(result, Operand(left, ASR, scratch));
1718 if (instr->can_deopt()) {
1719 __ mov(result, Operand(left, LSR, scratch), SetCC);
1720 DeoptimizeIf(mi, instr->environment());
1722 __ mov(result, Operand(left, LSR, scratch));
1726 __ mov(result, Operand(left, LSL, scratch));
1733 // Mask the right_op operand.
1734 int value = ToInteger32(LConstantOperand::cast(right_op));
1735 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1736 switch (instr->op()) {
1738 if (shift_count != 0) {
1739 __ mov(result, Operand(left, ROR, shift_count));
1741 __ Move(result, left);
1745 if (shift_count != 0) {
1746 __ mov(result, Operand(left, ASR, shift_count));
1748 __ Move(result, left);
1752 if (shift_count != 0) {
1753 __ mov(result, Operand(left, LSR, shift_count));
1755 if (instr->can_deopt()) {
1756 __ tst(left, Operand(0x80000000));
1757 DeoptimizeIf(ne, instr->environment());
1759 __ Move(result, left);
1763 if (shift_count != 0) {
1764 if (instr->hydrogen_value()->representation().IsSmi() &&
1765 instr->can_deopt()) {
1766 if (shift_count != 1) {
1767 __ mov(result, Operand(left, LSL, shift_count - 1));
1768 __ SmiTag(result, result, SetCC);
1770 __ SmiTag(result, left, SetCC);
1772 DeoptimizeIf(vs, instr->environment());
1774 __ mov(result, Operand(left, LSL, shift_count));
1777 __ Move(result, left);
1788 void LCodeGen::DoSubI(LSubI* instr) {
1789 LOperand* left = instr->left();
1790 LOperand* right = instr->right();
1791 LOperand* result = instr->result();
1792 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1793 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1795 if (right->IsStackSlot() || right->IsArgument()) {
1796 Register right_reg = EmitLoadRegister(right, ip);
1797 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1799 ASSERT(right->IsRegister() || right->IsConstantOperand());
1800 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1804 DeoptimizeIf(vs, instr->environment());
1809 void LCodeGen::DoRSubI(LRSubI* instr) {
1810 LOperand* left = instr->left();
1811 LOperand* right = instr->right();
1812 LOperand* result = instr->result();
1813 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1814 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1816 if (right->IsStackSlot() || right->IsArgument()) {
1817 Register right_reg = EmitLoadRegister(right, ip);
1818 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1820 ASSERT(right->IsRegister() || right->IsConstantOperand());
1821 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1825 DeoptimizeIf(vs, instr->environment());
1830 void LCodeGen::DoConstantI(LConstantI* instr) {
1831 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1835 void LCodeGen::DoConstantS(LConstantS* instr) {
1836 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1840 void LCodeGen::DoConstantD(LConstantD* instr) {
1841 ASSERT(instr->result()->IsDoubleRegister());
1842 DwVfpRegister result = ToDoubleRegister(instr->result());
1843 double v = instr->value();
1844 __ Vmov(result, v, scratch0());
1848 void LCodeGen::DoConstantE(LConstantE* instr) {
1849 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1853 void LCodeGen::DoConstantT(LConstantT* instr) {
1854 Handle<Object> value = instr->value(isolate());
1855 AllowDeferredHandleDereference smi_check;
1856 __ Move(ToRegister(instr->result()), value);
1860 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1861 Register result = ToRegister(instr->result());
1862 Register map = ToRegister(instr->value());
1863 __ EnumLength(result, map);
1867 void LCodeGen::DoDateField(LDateField* instr) {
1868 Register object = ToRegister(instr->date());
1869 Register result = ToRegister(instr->result());
1870 Register scratch = ToRegister(instr->temp());
1871 Smi* index = instr->index();
1872 Label runtime, done;
1873 ASSERT(object.is(result));
1874 ASSERT(object.is(r0));
1875 ASSERT(!scratch.is(scratch0()));
1876 ASSERT(!scratch.is(object));
1879 DeoptimizeIf(eq, instr->environment());
1880 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1881 DeoptimizeIf(ne, instr->environment());
1883 if (index->value() == 0) {
1884 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1886 if (index->value() < JSDate::kFirstUncachedField) {
1887 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1888 __ mov(scratch, Operand(stamp));
1889 __ ldr(scratch, MemOperand(scratch));
1890 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1891 __ cmp(scratch, scratch0());
1893 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1894 kPointerSize * index->value()));
1898 __ PrepareCallCFunction(2, scratch);
1899 __ mov(r1, Operand(index));
1900 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1906 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1908 String::Encoding encoding) {
1909 if (index->IsConstantOperand()) {
1910 int offset = ToInteger32(LConstantOperand::cast(index));
1911 if (encoding == String::TWO_BYTE_ENCODING) {
1912 offset *= kUC16Size;
1914 STATIC_ASSERT(kCharSize == 1);
1915 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1917 Register scratch = scratch0();
1918 ASSERT(!scratch.is(string));
1919 ASSERT(!scratch.is(ToRegister(index)));
1920 if (encoding == String::ONE_BYTE_ENCODING) {
1921 __ add(scratch, string, Operand(ToRegister(index)));
1923 STATIC_ASSERT(kUC16Size == 2);
1924 __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
1926 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1930 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1931 String::Encoding encoding = instr->hydrogen()->encoding();
1932 Register string = ToRegister(instr->string());
1933 Register result = ToRegister(instr->result());
1935 if (FLAG_debug_code) {
1936 Register scratch = scratch0();
1937 __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1938 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1940 __ and_(scratch, scratch,
1941 Operand(kStringRepresentationMask | kStringEncodingMask));
1942 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1943 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1944 __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1945 ? one_byte_seq_type : two_byte_seq_type));
1946 __ Check(eq, kUnexpectedStringType);
1949 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1950 if (encoding == String::ONE_BYTE_ENCODING) {
1951 __ ldrb(result, operand);
1953 __ ldrh(result, operand);
1958 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1959 String::Encoding encoding = instr->hydrogen()->encoding();
1960 Register string = ToRegister(instr->string());
1961 Register value = ToRegister(instr->value());
1963 if (FLAG_debug_code) {
1964 Register index = ToRegister(instr->index());
1965 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1966 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1968 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1969 ? one_byte_seq_type : two_byte_seq_type;
1970 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1973 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1974 if (encoding == String::ONE_BYTE_ENCODING) {
1975 __ strb(value, operand);
1977 __ strh(value, operand);
1982 void LCodeGen::DoAddI(LAddI* instr) {
1983 LOperand* left = instr->left();
1984 LOperand* right = instr->right();
1985 LOperand* result = instr->result();
1986 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1987 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1989 if (right->IsStackSlot() || right->IsArgument()) {
1990 Register right_reg = EmitLoadRegister(right, ip);
1991 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1993 ASSERT(right->IsRegister() || right->IsConstantOperand());
1994 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1998 DeoptimizeIf(vs, instr->environment());
2003 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2004 LOperand* left = instr->left();
2005 LOperand* right = instr->right();
2006 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2007 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2008 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
2009 Register left_reg = ToRegister(left);
2010 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
2012 : Operand(EmitLoadRegister(right, ip));
2013 Register result_reg = ToRegister(instr->result());
2014 __ cmp(left_reg, right_op);
2015 __ Move(result_reg, left_reg, condition);
2016 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
2018 ASSERT(instr->hydrogen()->representation().IsDouble());
2019 DwVfpRegister left_reg = ToDoubleRegister(left);
2020 DwVfpRegister right_reg = ToDoubleRegister(right);
2021 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
2022 Label result_is_nan, return_left, return_right, check_zero, done;
2023 __ VFPCompareAndSetFlags(left_reg, right_reg);
2024 if (operation == HMathMinMax::kMathMin) {
2025 __ b(mi, &return_left);
2026 __ b(gt, &return_right);
2028 __ b(mi, &return_right);
2029 __ b(gt, &return_left);
2031 __ b(vs, &result_is_nan);
2032 // Left equals right => check for -0.
2033 __ VFPCompareAndSetFlags(left_reg, 0.0);
2034 if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2035 __ b(ne, &done); // left == right != 0.
2037 __ b(ne, &return_left); // left == right != 0.
2039 // At this point, both left and right are either 0 or -0.
2040 if (operation == HMathMinMax::kMathMin) {
2041 // We could use a single 'vorr' instruction here if we had NEON support.
2042 __ vneg(left_reg, left_reg);
2043 __ vsub(result_reg, left_reg, right_reg);
2044 __ vneg(result_reg, result_reg);
2046 // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2047 // the decision for vadd is easy because vand is a NEON instruction.
2048 __ vadd(result_reg, left_reg, right_reg);
2052 __ bind(&result_is_nan);
2053 __ vadd(result_reg, left_reg, right_reg);
2056 __ bind(&return_right);
2057 __ Move(result_reg, right_reg);
2058 if (!left_reg.is(result_reg)) {
2062 __ bind(&return_left);
2063 __ Move(result_reg, left_reg);
2070 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2071 DwVfpRegister left = ToDoubleRegister(instr->left());
2072 DwVfpRegister right = ToDoubleRegister(instr->right());
2073 DwVfpRegister result = ToDoubleRegister(instr->result());
2074 switch (instr->op()) {
2076 __ vadd(result, left, right);
2079 __ vsub(result, left, right);
2082 __ vmul(result, left, right);
2085 __ vdiv(result, left, right);
2088 __ PrepareCallCFunction(0, 2, scratch0());
2089 __ MovToFloatParameters(left, right);
2091 ExternalReference::mod_two_doubles_operation(isolate()),
2093 // Move the result in the double result register.
2094 __ MovFromFloatResult(result);
2104 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2105 ASSERT(ToRegister(instr->context()).is(cp));
2106 ASSERT(ToRegister(instr->left()).is(r1));
2107 ASSERT(ToRegister(instr->right()).is(r0));
2108 ASSERT(ToRegister(instr->result()).is(r0));
2110 BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
2111 // Block literal pool emission to ensure nop indicating no inlined smi code
2112 // is in the correct position.
2113 Assembler::BlockConstPoolScope block_const_pool(masm());
2114 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2115 __ nop(); // Signals no inlined code.
2119 template<class InstrType>
2120 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2121 int left_block = instr->TrueDestination(chunk_);
2122 int right_block = instr->FalseDestination(chunk_);
2124 int next_block = GetNextEmittedBlock();
2126 if (right_block == left_block || condition == al) {
2127 EmitGoto(left_block);
2128 } else if (left_block == next_block) {
2129 __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
2130 } else if (right_block == next_block) {
2131 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2133 __ b(condition, chunk_->GetAssemblyLabel(left_block));
2134 __ b(chunk_->GetAssemblyLabel(right_block));
2139 template<class InstrType>
2140 void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2141 int false_block = instr->FalseDestination(chunk_);
2142 __ b(condition, chunk_->GetAssemblyLabel(false_block));
2146 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2151 void LCodeGen::DoBranch(LBranch* instr) {
2152 Representation r = instr->hydrogen()->value()->representation();
2153 if (r.IsInteger32() || r.IsSmi()) {
2154 ASSERT(!info()->IsStub());
2155 Register reg = ToRegister(instr->value());
2156 __ cmp(reg, Operand::Zero());
2157 EmitBranch(instr, ne);
2158 } else if (r.IsDouble()) {
2159 ASSERT(!info()->IsStub());
2160 DwVfpRegister reg = ToDoubleRegister(instr->value());
2161 // Test the double value. Zero and NaN are false.
2162 __ VFPCompareAndSetFlags(reg, 0.0);
2163 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false)
2164 EmitBranch(instr, ne);
2166 ASSERT(r.IsTagged());
2167 Register reg = ToRegister(instr->value());
2168 HType type = instr->hydrogen()->value()->type();
2169 if (type.IsBoolean()) {
2170 ASSERT(!info()->IsStub());
2171 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2172 EmitBranch(instr, eq);
2173 } else if (type.IsSmi()) {
2174 ASSERT(!info()->IsStub());
2175 __ cmp(reg, Operand::Zero());
2176 EmitBranch(instr, ne);
2177 } else if (type.IsJSArray()) {
2178 ASSERT(!info()->IsStub());
2179 EmitBranch(instr, al);
2180 } else if (type.IsHeapNumber()) {
2181 ASSERT(!info()->IsStub());
2182 DwVfpRegister dbl_scratch = double_scratch0();
2183 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2184 // Test the double value. Zero and NaN are false.
2185 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2186 __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN)
2187 EmitBranch(instr, ne);
2188 } else if (type.IsString()) {
2189 ASSERT(!info()->IsStub());
2190 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2191 __ cmp(ip, Operand::Zero());
2192 EmitBranch(instr, ne);
2194 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2195 // Avoid deopts in the case where we've never executed this path before.
2196 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2198 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2199 // undefined -> false.
2200 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2201 __ b(eq, instr->FalseLabel(chunk_));
2203 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2204 // Boolean -> its value.
2205 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2206 __ b(eq, instr->TrueLabel(chunk_));
2207 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2208 __ b(eq, instr->FalseLabel(chunk_));
2210 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2212 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2213 __ b(eq, instr->FalseLabel(chunk_));
2216 if (expected.Contains(ToBooleanStub::SMI)) {
2217 // Smis: 0 -> false, all other -> true.
2218 __ cmp(reg, Operand::Zero());
2219 __ b(eq, instr->FalseLabel(chunk_));
2220 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2221 } else if (expected.NeedsMap()) {
2222 // If we need a map later and have a Smi -> deopt.
2224 DeoptimizeIf(eq, instr->environment());
2227 const Register map = scratch0();
2228 if (expected.NeedsMap()) {
2229 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2231 if (expected.CanBeUndetectable()) {
2232 // Undetectable -> false.
2233 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2234 __ tst(ip, Operand(1 << Map::kIsUndetectable));
2235 __ b(ne, instr->FalseLabel(chunk_));
2239 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2240 // spec object -> true.
2241 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2242 __ b(ge, instr->TrueLabel(chunk_));
2245 if (expected.Contains(ToBooleanStub::STRING)) {
2246 // String value -> false iff empty.
2248 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2249 __ b(ge, ¬_string);
2250 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2251 __ cmp(ip, Operand::Zero());
2252 __ b(ne, instr->TrueLabel(chunk_));
2253 __ b(instr->FalseLabel(chunk_));
2254 __ bind(¬_string);
2257 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2258 // Symbol value -> true.
2259 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2260 __ b(eq, instr->TrueLabel(chunk_));
2263 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2264 // heap number -> false iff +0, -0, or NaN.
2265 DwVfpRegister dbl_scratch = double_scratch0();
2266 Label not_heap_number;
2267 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2268 __ b(ne, ¬_heap_number);
2269 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2270 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2271 __ cmp(r0, r0, vs); // NaN -> false.
2272 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false.
2273 __ b(instr->TrueLabel(chunk_));
2274 __ bind(¬_heap_number);
2277 if (!expected.IsGeneric()) {
2278 // We've seen something for the first time -> deopt.
2279 // This can only happen if we are not generic already.
2280 DeoptimizeIf(al, instr->environment());
2287 void LCodeGen::EmitGoto(int block) {
2288 if (!IsNextEmittedBlock(block)) {
2289 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2294 void LCodeGen::DoGoto(LGoto* instr) {
2295 EmitGoto(instr->block_id());
2299 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2300 Condition cond = kNoCondition;
2303 case Token::EQ_STRICT:
2307 case Token::NE_STRICT:
2311 cond = is_unsigned ? lo : lt;
2314 cond = is_unsigned ? hi : gt;
2317 cond = is_unsigned ? ls : le;
2320 cond = is_unsigned ? hs : ge;
2323 case Token::INSTANCEOF:
2331 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2332 LOperand* left = instr->left();
2333 LOperand* right = instr->right();
2334 Condition cond = TokenToCondition(instr->op(), false);
2336 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2337 // We can statically evaluate the comparison.
2338 double left_val = ToDouble(LConstantOperand::cast(left));
2339 double right_val = ToDouble(LConstantOperand::cast(right));
2340 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2341 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2342 EmitGoto(next_block);
2344 if (instr->is_double()) {
2345 // Compare left and right operands as doubles and load the
2346 // resulting flags into the normal status register.
2347 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2348 // If a NaN is involved, i.e. the result is unordered (V set),
2349 // jump to false block label.
2350 __ b(vs, instr->FalseLabel(chunk_));
2352 if (right->IsConstantOperand()) {
2353 int32_t value = ToInteger32(LConstantOperand::cast(right));
2354 if (instr->hydrogen_value()->representation().IsSmi()) {
2355 __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2357 __ cmp(ToRegister(left), Operand(value));
2359 } else if (left->IsConstantOperand()) {
2360 int32_t value = ToInteger32(LConstantOperand::cast(left));
2361 if (instr->hydrogen_value()->representation().IsSmi()) {
2362 __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2364 __ cmp(ToRegister(right), Operand(value));
2366 // We transposed the operands. Reverse the condition.
2367 cond = ReverseCondition(cond);
2369 __ cmp(ToRegister(left), ToRegister(right));
2372 EmitBranch(instr, cond);
2377 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2378 Register left = ToRegister(instr->left());
2379 Register right = ToRegister(instr->right());
2381 __ cmp(left, Operand(right));
2382 EmitBranch(instr, eq);
2386 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2387 if (instr->hydrogen()->representation().IsTagged()) {
2388 Register input_reg = ToRegister(instr->object());
2389 __ mov(ip, Operand(factory()->the_hole_value()));
2390 __ cmp(input_reg, ip);
2391 EmitBranch(instr, eq);
2395 DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2396 __ VFPCompareAndSetFlags(input_reg, input_reg);
2397 EmitFalseBranch(instr, vc);
2399 Register scratch = scratch0();
2400 __ VmovHigh(scratch, input_reg);
2401 __ cmp(scratch, Operand(kHoleNanUpper32));
2402 EmitBranch(instr, eq);
2406 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2407 Representation rep = instr->hydrogen()->value()->representation();
2408 ASSERT(!rep.IsInteger32());
2409 Register scratch = ToRegister(instr->temp());
2411 if (rep.IsDouble()) {
2412 DwVfpRegister value = ToDoubleRegister(instr->value());
2413 __ VFPCompareAndSetFlags(value, 0.0);
2414 EmitFalseBranch(instr, ne);
2415 __ VmovHigh(scratch, value);
2416 __ cmp(scratch, Operand(0x80000000));
2418 Register value = ToRegister(instr->value());
2421 Heap::kHeapNumberMapRootIndex,
2422 instr->FalseLabel(chunk()),
2424 __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2425 __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2426 __ cmp(scratch, Operand(0x80000000));
2427 __ cmp(ip, Operand(0x00000000), eq);
2429 EmitBranch(instr, eq);
2433 Condition LCodeGen::EmitIsObject(Register input,
2435 Label* is_not_object,
2437 Register temp2 = scratch0();
2438 __ JumpIfSmi(input, is_not_object);
2440 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2441 __ cmp(input, temp2);
2442 __ b(eq, is_object);
2445 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2446 // Undetectable objects behave like undefined.
2447 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2448 __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2449 __ b(ne, is_not_object);
2451 // Load instance type and check that it is in object type range.
2452 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2453 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2454 __ b(lt, is_not_object);
2455 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2460 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2461 Register reg = ToRegister(instr->value());
2462 Register temp1 = ToRegister(instr->temp());
2464 Condition true_cond =
2465 EmitIsObject(reg, temp1,
2466 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2468 EmitBranch(instr, true_cond);
2472 Condition LCodeGen::EmitIsString(Register input,
2474 Label* is_not_string,
2475 SmiCheck check_needed = INLINE_SMI_CHECK) {
2476 if (check_needed == INLINE_SMI_CHECK) {
2477 __ JumpIfSmi(input, is_not_string);
2479 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2485 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2486 Register reg = ToRegister(instr->value());
2487 Register temp1 = ToRegister(instr->temp());
2489 SmiCheck check_needed =
2490 instr->hydrogen()->value()->IsHeapObject()
2491 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2492 Condition true_cond =
2493 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2495 EmitBranch(instr, true_cond);
2499 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2500 Register input_reg = EmitLoadRegister(instr->value(), ip);
2501 __ SmiTst(input_reg);
2502 EmitBranch(instr, eq);
2506 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2507 Register input = ToRegister(instr->value());
2508 Register temp = ToRegister(instr->temp());
2510 if (!instr->hydrogen()->value()->IsHeapObject()) {
2511 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2513 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2514 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2515 __ tst(temp, Operand(1 << Map::kIsUndetectable));
2516 EmitBranch(instr, ne);
2520 static Condition ComputeCompareCondition(Token::Value op) {
2522 case Token::EQ_STRICT:
2535 return kNoCondition;
2540 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2541 ASSERT(ToRegister(instr->context()).is(cp));
2542 Token::Value op = instr->op();
2544 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2545 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2546 // This instruction also signals no smi code inlined.
2547 __ cmp(r0, Operand::Zero());
2549 Condition condition = ComputeCompareCondition(op);
2551 EmitBranch(instr, condition);
2555 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2556 InstanceType from = instr->from();
2557 InstanceType to = instr->to();
2558 if (from == FIRST_TYPE) return to;
2559 ASSERT(from == to || to == LAST_TYPE);
2564 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2565 InstanceType from = instr->from();
2566 InstanceType to = instr->to();
2567 if (from == to) return eq;
2568 if (to == LAST_TYPE) return hs;
2569 if (from == FIRST_TYPE) return ls;
2575 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2576 Register scratch = scratch0();
2577 Register input = ToRegister(instr->value());
2579 if (!instr->hydrogen()->value()->IsHeapObject()) {
2580 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2583 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2584 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2588 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2589 Register input = ToRegister(instr->value());
2590 Register result = ToRegister(instr->result());
2592 __ AssertString(input);
2594 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2595 __ IndexFromHash(result, result);
2599 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2600 LHasCachedArrayIndexAndBranch* instr) {
2601 Register input = ToRegister(instr->value());
2602 Register scratch = scratch0();
2605 FieldMemOperand(input, String::kHashFieldOffset));
2606 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2607 EmitBranch(instr, eq);
2611 // Branches to a label or falls through with the answer in flags. Trashes
2612 // the temp registers, but not the input.
2613 void LCodeGen::EmitClassOfTest(Label* is_true,
2615 Handle<String>class_name,
2619 ASSERT(!input.is(temp));
2620 ASSERT(!input.is(temp2));
2621 ASSERT(!temp.is(temp2));
2623 __ JumpIfSmi(input, is_false);
2625 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2626 // Assuming the following assertions, we can use the same compares to test
2627 // for both being a function type and being in the object type range.
2628 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2629 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2630 FIRST_SPEC_OBJECT_TYPE + 1);
2631 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2632 LAST_SPEC_OBJECT_TYPE - 1);
2633 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2634 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2637 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2640 // Faster code path to avoid two compares: subtract lower bound from the
2641 // actual type and do a signed compare with the width of the type range.
2642 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2643 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2644 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2645 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2646 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2650 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2651 // Check if the constructor in the map is a function.
2652 __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2654 // Objects with a non-function constructor have class 'Object'.
2655 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2656 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2662 // temp now contains the constructor function. Grab the
2663 // instance class name from there.
2664 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2665 __ ldr(temp, FieldMemOperand(temp,
2666 SharedFunctionInfo::kInstanceClassNameOffset));
2667 // The class name we are testing against is internalized since it's a literal.
2668 // The name in the constructor is internalized because of the way the context
2669 // is booted. This routine isn't expected to work for random API-created
2670 // classes and it doesn't have to because you can't access it with natives
2671 // syntax. Since both sides are internalized it is sufficient to use an
2672 // identity comparison.
2673 __ cmp(temp, Operand(class_name));
2674 // End with the answer in flags.
2678 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2679 Register input = ToRegister(instr->value());
2680 Register temp = scratch0();
2681 Register temp2 = ToRegister(instr->temp());
2682 Handle<String> class_name = instr->hydrogen()->class_name();
2684 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2685 class_name, input, temp, temp2);
2687 EmitBranch(instr, eq);
2691 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2692 Register reg = ToRegister(instr->value());
2693 Register temp = ToRegister(instr->temp());
2695 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2696 __ cmp(temp, Operand(instr->map()));
2697 EmitBranch(instr, eq);
2701 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2702 ASSERT(ToRegister(instr->context()).is(cp));
2703 ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
2704 ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
2706 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2707 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2709 __ cmp(r0, Operand::Zero());
2710 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2711 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2715 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2716 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2718 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2719 LInstanceOfKnownGlobal* instr)
2720 : LDeferredCode(codegen), instr_(instr) { }
2721 virtual void Generate() V8_OVERRIDE {
2722 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2724 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2725 Label* map_check() { return &map_check_; }
2727 LInstanceOfKnownGlobal* instr_;
2731 DeferredInstanceOfKnownGlobal* deferred;
2732 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2734 Label done, false_result;
2735 Register object = ToRegister(instr->value());
2736 Register temp = ToRegister(instr->temp());
2737 Register result = ToRegister(instr->result());
2739 ASSERT(object.is(r0));
2740 ASSERT(result.is(r0));
2742 // A Smi is not instance of anything.
2743 __ JumpIfSmi(object, &false_result);
2745 // This is the inlined call site instanceof cache. The two occurences of the
2746 // hole value will be patched to the last map/result pair generated by the
2749 Register map = temp;
2750 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2752 // Block constant pool emission to ensure the positions of instructions are
2753 // as expected by the patcher. See InstanceofStub::Generate().
2754 Assembler::BlockConstPoolScope block_const_pool(masm());
2755 __ bind(deferred->map_check()); // Label for calculating code patching.
2756 // We use Factory::the_hole_value() on purpose instead of loading from the
2757 // root array to force relocation to be able to later patch with
2759 PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
2760 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2761 __ mov(ip, Operand(Handle<Object>(cell)));
2762 __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
2763 __ cmp(map, Operand(ip));
2764 __ b(ne, &cache_miss);
2765 // We use Factory::the_hole_value() on purpose instead of loading from the
2766 // root array to force relocation to be able to later patch
2767 // with true or false.
2768 __ mov(result, Operand(factory()->the_hole_value()));
2772 // The inlined call site cache did not match. Check null and string before
2773 // calling the deferred code.
2774 __ bind(&cache_miss);
2775 // Null is not instance of anything.
2776 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2777 __ cmp(object, Operand(ip));
2778 __ b(eq, &false_result);
2780 // String values is not instance of anything.
2781 Condition is_string = masm_->IsObjectStringType(object, temp);
2782 __ b(is_string, &false_result);
2784 // Go to the deferred code.
2785 __ b(deferred->entry());
2787 __ bind(&false_result);
2788 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2790 // Here result has either true or false. Deferred code also produces true or
2792 __ bind(deferred->exit());
2797 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2799 Register result = ToRegister(instr->result());
2800 ASSERT(result.is(r0));
2802 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2803 flags = static_cast<InstanceofStub::Flags>(
2804 flags | InstanceofStub::kArgsInRegisters);
2805 flags = static_cast<InstanceofStub::Flags>(
2806 flags | InstanceofStub::kCallSiteInlineCheck);
2807 flags = static_cast<InstanceofStub::Flags>(
2808 flags | InstanceofStub::kReturnTrueFalseObject);
2809 InstanceofStub stub(flags);
2811 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2812 LoadContextFromDeferred(instr->context());
2814 // Get the temp register reserved by the instruction. This needs to be r4 as
2815 // its slot of the pushing of safepoint registers is used to communicate the
2816 // offset to the location of the map check.
2817 Register temp = ToRegister(instr->temp());
2818 ASSERT(temp.is(r4));
2819 __ Move(InstanceofStub::right(), instr->function());
2820 static const int kAdditionalDelta = 5;
2821 // Make sure that code size is predicable, since we use specific constants
2822 // offsets in the code to find embedded values..
2823 PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
2824 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2825 Label before_push_delta;
2826 __ bind(&before_push_delta);
2827 __ BlockConstPoolFor(kAdditionalDelta);
2828 __ mov(temp, Operand(delta * kPointerSize));
2829 // The mov above can generate one or two instructions. The delta was computed
2830 // for two instructions, so we need to pad here in case of one instruction.
2831 if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
2832 ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
2835 __ StoreToSafepointRegisterSlot(temp, temp);
2836 CallCodeGeneric(stub.GetCode(isolate()),
2837 RelocInfo::CODE_TARGET,
2839 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2840 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2841 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2842 // Put the result value into the result register slot and
2843 // restore all registers.
2844 __ StoreToSafepointRegisterSlot(result, result);
2848 void LCodeGen::DoCmpT(LCmpT* instr) {
2849 ASSERT(ToRegister(instr->context()).is(cp));
2850 Token::Value op = instr->op();
2852 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2853 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2854 // This instruction also signals no smi code inlined.
2855 __ cmp(r0, Operand::Zero());
2857 Condition condition = ComputeCompareCondition(op);
2858 __ LoadRoot(ToRegister(instr->result()),
2859 Heap::kTrueValueRootIndex,
2861 __ LoadRoot(ToRegister(instr->result()),
2862 Heap::kFalseValueRootIndex,
2863 NegateCondition(condition));
2867 void LCodeGen::DoReturn(LReturn* instr) {
2868 if (FLAG_trace && info()->IsOptimizing()) {
2869 // Push the return value on the stack as the parameter.
2870 // Runtime::TraceExit returns its parameter in r0. We're leaving the code
2871 // managed by the register allocator and tearing down the frame, it's
2872 // safe to write to the context register.
2874 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2875 __ CallRuntime(Runtime::kTraceExit, 1);
2877 if (info()->saves_caller_doubles()) {
2878 RestoreCallerDoubles();
2880 int no_frame_start = -1;
2881 if (NeedsEagerFrame()) {
2882 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2884 if (instr->has_constant_parameter_count()) {
2885 int parameter_count = ToInteger32(instr->constant_parameter_count());
2886 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2887 if (sp_delta != 0) {
2888 __ add(sp, sp, Operand(sp_delta));
2891 Register reg = ToRegister(instr->parameter_count());
2892 // The argument count parameter is a smi
2894 __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2899 if (no_frame_start != -1) {
2900 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2905 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2906 Register result = ToRegister(instr->result());
2907 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2908 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
2909 if (instr->hydrogen()->RequiresHoleCheck()) {
2910 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2912 DeoptimizeIf(eq, instr->environment());
2917 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2918 ASSERT(ToRegister(instr->context()).is(cp));
2919 ASSERT(ToRegister(instr->global_object()).is(r0));
2920 ASSERT(ToRegister(instr->result()).is(r0));
2922 __ mov(r2, Operand(instr->name()));
2923 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2924 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2925 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2929 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2930 Register value = ToRegister(instr->value());
2931 Register cell = scratch0();
2934 __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
2936 // If the cell we are storing to contains the hole it could have
2937 // been deleted from the property dictionary. In that case, we need
2938 // to update the property details in the property dictionary to mark
2939 // it as no longer deleted.
2940 if (instr->hydrogen()->RequiresHoleCheck()) {
2941 // We use a temp to check the payload (CompareRoot might clobber ip).
2942 Register payload = ToRegister(instr->temp());
2943 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
2944 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2945 DeoptimizeIf(eq, instr->environment());
2949 __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
2950 // Cells are always rescanned, so no write barrier here.
2954 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2955 Register context = ToRegister(instr->context());
2956 Register result = ToRegister(instr->result());
2957 __ ldr(result, ContextOperand(context, instr->slot_index()));
2958 if (instr->hydrogen()->RequiresHoleCheck()) {
2959 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2961 if (instr->hydrogen()->DeoptimizesOnHole()) {
2962 DeoptimizeIf(eq, instr->environment());
2964 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
2970 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2971 Register context = ToRegister(instr->context());
2972 Register value = ToRegister(instr->value());
2973 Register scratch = scratch0();
2974 MemOperand target = ContextOperand(context, instr->slot_index());
2976 Label skip_assignment;
2978 if (instr->hydrogen()->RequiresHoleCheck()) {
2979 __ ldr(scratch, target);
2980 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2981 __ cmp(scratch, ip);
2982 if (instr->hydrogen()->DeoptimizesOnHole()) {
2983 DeoptimizeIf(eq, instr->environment());
2985 __ b(ne, &skip_assignment);
2989 __ str(value, target);
2990 if (instr->hydrogen()->NeedsWriteBarrier()) {
2991 SmiCheck check_needed =
2992 instr->hydrogen()->value()->IsHeapObject()
2993 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2994 __ RecordWriteContextSlot(context,
2998 GetLinkRegisterState(),
3000 EMIT_REMEMBERED_SET,
3004 __ bind(&skip_assignment);
3008 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3009 HObjectAccess access = instr->hydrogen()->access();
3010 int offset = access.offset();
3011 Register object = ToRegister(instr->object());
3013 if (access.IsExternalMemory()) {
3014 Register result = ToRegister(instr->result());
3015 MemOperand operand = MemOperand(object, offset);
3016 __ Load(result, operand, access.representation());
3020 if (instr->hydrogen()->representation().IsDouble()) {
3021 DwVfpRegister result = ToDoubleRegister(instr->result());
3022 __ vldr(result, FieldMemOperand(object, offset));
3026 Register result = ToRegister(instr->result());
3027 if (!access.IsInobject()) {
3028 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3031 MemOperand operand = FieldMemOperand(object, offset);
3032 __ Load(result, operand, access.representation());
3036 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3037 ASSERT(ToRegister(instr->context()).is(cp));
3038 ASSERT(ToRegister(instr->object()).is(r0));
3039 ASSERT(ToRegister(instr->result()).is(r0));
3041 // Name is always in r2.
3042 __ mov(r2, Operand(instr->name()));
3043 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
3044 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3048 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3049 Register scratch = scratch0();
3050 Register function = ToRegister(instr->function());
3051 Register result = ToRegister(instr->result());
3053 // Check that the function really is a function. Load map into the
3055 __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
3056 DeoptimizeIf(ne, instr->environment());
3058 // Make sure that the function has an instance prototype.
3060 __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3061 __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
3062 __ b(ne, &non_instance);
3064 // Get the prototype or initial map from the function.
3066 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3068 // Check that the function has a prototype or an initial map.
3069 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3071 DeoptimizeIf(eq, instr->environment());
3073 // If the function does not have an initial map, we're done.
3075 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3078 // Get the prototype from the initial map.
3079 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3082 // Non-instance prototype: Fetch prototype from constructor field
3084 __ bind(&non_instance);
3085 __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3092 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3093 Register result = ToRegister(instr->result());
3094 __ LoadRoot(result, instr->index());
3098 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3099 Register arguments = ToRegister(instr->arguments());
3100 Register result = ToRegister(instr->result());
3101 // There are two words between the frame pointer and the last argument.
3102 // Subtracting from length accounts for one of them add one more.
3103 if (instr->length()->IsConstantOperand()) {
3104 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3105 if (instr->index()->IsConstantOperand()) {
3106 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3107 int index = (const_length - const_index) + 1;
3108 __ ldr(result, MemOperand(arguments, index * kPointerSize));
3110 Register index = ToRegister(instr->index());
3111 __ rsb(result, index, Operand(const_length + 1));
3112 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3114 } else if (instr->index()->IsConstantOperand()) {
3115 Register length = ToRegister(instr->length());
3116 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3117 int loc = const_index - 1;
3119 __ sub(result, length, Operand(loc));
3120 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3122 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3125 Register length = ToRegister(instr->length());
3126 Register index = ToRegister(instr->index());
3127 __ sub(result, length, index);
3128 __ add(result, result, Operand(1));
3129 __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
3134 void LCodeGen::DoDeferredSIMD128ToTagged(LInstruction* instr,
3135 Runtime::FunctionId id) {
3136 // TODO(3095996): Get rid of this. For now, we need to make the
3137 // result register contain a valid pointer because it is already
3138 // contained in the register pointer map.
3139 Register reg = ToRegister(instr->result());
3140 __ mov(reg, Operand::Zero());
3142 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3143 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3144 __ CallRuntimeSaveDoubles(id);
3145 RecordSafepointWithRegisters(
3146 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
3147 __ sub(r0, r0, Operand(kHeapObjectTag));
3148 __ StoreToSafepointRegisterSlot(r0, reg);
3153 void LCodeGen::DoLoadKeyedSIMD128ExternalArray(LLoadKeyed* instr) {
3154 class DeferredSIMD128ToTagged V8_FINAL : public LDeferredCode {
3156 DeferredSIMD128ToTagged(LCodeGen* codegen, LInstruction* instr,
3157 Runtime::FunctionId id)
3158 : LDeferredCode(codegen), instr_(instr), id_(id) { }
3159 virtual void Generate() V8_OVERRIDE {
3160 codegen()->DoDeferredSIMD128ToTagged(instr_, id_);
3162 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3164 LInstruction* instr_;
3165 Runtime::FunctionId id_;
3168 // Allocate a SIMD128 object on the heap.
3169 Register reg = ToRegister(instr->result());
3170 Register temp = ToRegister(instr->temp());
3171 Register temp2 = ToRegister(instr->temp2());
3172 Register scratch = scratch0();
3174 DeferredSIMD128ToTagged* deferred = new(zone()) DeferredSIMD128ToTagged(
3175 this, instr, static_cast<Runtime::FunctionId>(T::kRuntimeAllocatorId()));
3176 if (FLAG_inline_new) {
3177 __ LoadRoot(scratch, static_cast<Heap::RootListIndex>(T::kMapRootIndex()));
3178 __ AllocateSIMDHeapObject(T::kSize, reg, temp, temp2, scratch,
3179 deferred->entry(), DONT_TAG_RESULT);
3181 __ jmp(deferred->entry());
3183 __ bind(deferred->exit());
3185 // Copy the SIMD128 value from the external array to the heap object.
3186 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
3187 Register external_pointer = ToRegister(instr->elements());
3188 Register key = no_reg;
3189 ElementsKind elements_kind = instr->elements_kind();
3190 bool key_is_constant = instr->key()->IsConstantOperand();
3191 int constant_key = 0;
3192 if (key_is_constant) {
3193 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3194 if (constant_key & 0xF0000000) {
3195 Abort(kArrayIndexConstantValueTooBig);
3198 key = ToRegister(instr->key());
3200 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3201 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3202 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3203 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
3204 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
3207 (instr->additional_index() << element_size_shift) + additional_offset;
3208 Operand operand = key_is_constant
3209 ? Operand(constant_key << element_size_shift)
3210 : Operand(key, LSL, shift_size);
3212 __ add(scratch, external_pointer, operand);
3213 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
3214 __ ldr(temp, MemOperand(scratch, base_offset + offset));
3215 __ str(temp, MemOperand(reg, T::kValueOffset + offset));
3218 // Now that we have finished with the object's real address tag it
3219 __ add(reg, reg, Operand(kHeapObjectTag));
3223 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3224 Register external_pointer = ToRegister(instr->elements());
3225 Register key = no_reg;
3226 ElementsKind elements_kind = instr->elements_kind();
3227 bool key_is_constant = instr->key()->IsConstantOperand();
3228 int constant_key = 0;
3229 if (key_is_constant) {
3230 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3231 if (constant_key & 0xF0000000) {
3232 Abort(kArrayIndexConstantValueTooBig);
3235 key = ToRegister(instr->key());
3237 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3238 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3239 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3240 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
3241 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
3245 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3246 elements_kind == FLOAT32_ELEMENTS ||
3247 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3248 elements_kind == FLOAT64_ELEMENTS) {
3250 (instr->additional_index() << element_size_shift) + additional_offset;
3251 DwVfpRegister result = ToDoubleRegister(instr->result());
3252 Operand operand = key_is_constant
3253 ? Operand(constant_key << element_size_shift)
3254 : Operand(key, LSL, shift_size);
3255 __ add(scratch0(), external_pointer, operand);
3256 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3257 elements_kind == FLOAT32_ELEMENTS) {
3258 __ vldr(double_scratch0().low(), scratch0(), base_offset);
3259 __ vcvt_f64_f32(result, double_scratch0().low());
3260 } else { // loading doubles, not floats.
3261 __ vldr(result, scratch0(), base_offset);
3263 } else if (IsFloat32x4ElementsKind(elements_kind)) {
3264 DoLoadKeyedSIMD128ExternalArray<Float32x4>(instr);
3265 } else if (IsInt32x4ElementsKind(elements_kind)) {
3266 DoLoadKeyedSIMD128ExternalArray<Int32x4>(instr);
3268 Register result = ToRegister(instr->result());
3269 MemOperand mem_operand = PrepareKeyedOperand(
3270 key, external_pointer, key_is_constant, constant_key,
3271 element_size_shift, shift_size,
3272 instr->additional_index(), additional_offset);
3273 switch (elements_kind) {
3274 case EXTERNAL_INT8_ELEMENTS:
3276 __ ldrsb(result, mem_operand);
3278 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3279 case EXTERNAL_UINT8_ELEMENTS:
3280 case UINT8_ELEMENTS:
3281 case UINT8_CLAMPED_ELEMENTS:
3282 __ ldrb(result, mem_operand);
3284 case EXTERNAL_INT16_ELEMENTS:
3285 case INT16_ELEMENTS:
3286 __ ldrsh(result, mem_operand);
3288 case EXTERNAL_UINT16_ELEMENTS:
3289 case UINT16_ELEMENTS:
3290 __ ldrh(result, mem_operand);
3292 case EXTERNAL_INT32_ELEMENTS:
3293 case INT32_ELEMENTS:
3294 __ ldr(result, mem_operand);
3296 case EXTERNAL_UINT32_ELEMENTS:
3297 case UINT32_ELEMENTS:
3298 __ ldr(result, mem_operand);
3299 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3300 __ cmp(result, Operand(0x80000000));
3301 DeoptimizeIf(cs, instr->environment());
3304 case FLOAT32_ELEMENTS:
3305 case FLOAT64_ELEMENTS:
3306 case EXTERNAL_FLOAT32_ELEMENTS:
3307 case EXTERNAL_FLOAT64_ELEMENTS:
3308 case FLOAT32x4_ELEMENTS:
3309 case INT32x4_ELEMENTS:
3310 case EXTERNAL_FLOAT32x4_ELEMENTS:
3311 case EXTERNAL_INT32x4_ELEMENTS:
3312 case FAST_HOLEY_DOUBLE_ELEMENTS:
3313 case FAST_HOLEY_ELEMENTS:
3314 case FAST_HOLEY_SMI_ELEMENTS:
3315 case FAST_DOUBLE_ELEMENTS:
3317 case FAST_SMI_ELEMENTS:
3318 case DICTIONARY_ELEMENTS:
3319 case NON_STRICT_ARGUMENTS_ELEMENTS:
3327 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3328 Register elements = ToRegister(instr->elements());
3329 bool key_is_constant = instr->key()->IsConstantOperand();
3330 Register key = no_reg;
3331 DwVfpRegister result = ToDoubleRegister(instr->result());
3332 Register scratch = scratch0();
3334 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3337 FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3338 (instr->additional_index() << element_size_shift);
3339 if (key_is_constant) {
3340 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3341 if (constant_key & 0xF0000000) {
3342 Abort(kArrayIndexConstantValueTooBig);
3344 base_offset += constant_key << element_size_shift;
3346 __ add(scratch, elements, Operand(base_offset));
3348 if (!key_is_constant) {
3349 key = ToRegister(instr->key());
3350 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3351 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3352 __ add(scratch, scratch, Operand(key, LSL, shift_size));
3355 __ vldr(result, scratch, 0);
3357 if (instr->hydrogen()->RequiresHoleCheck()) {
3358 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3359 __ cmp(scratch, Operand(kHoleNanUpper32));
3360 DeoptimizeIf(eq, instr->environment());
3365 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3366 Register elements = ToRegister(instr->elements());
3367 Register result = ToRegister(instr->result());
3368 Register scratch = scratch0();
3369 Register store_base = scratch;
3372 if (instr->key()->IsConstantOperand()) {
3373 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3374 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3375 instr->additional_index());
3376 store_base = elements;
3378 Register key = ToRegister(instr->key());
3379 // Even though the HLoadKeyed instruction forces the input
3380 // representation for the key to be an integer, the input gets replaced
3381 // during bound check elimination with the index argument to the bounds
3382 // check, which can be tagged, so that case must be handled here, too.
3383 if (instr->hydrogen()->key()->representation().IsSmi()) {
3384 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
3386 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3388 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3390 __ ldr(result, FieldMemOperand(store_base, offset));
3392 // Check for the hole value.
3393 if (instr->hydrogen()->RequiresHoleCheck()) {
3394 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3396 DeoptimizeIf(ne, instr->environment());
3398 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3399 __ cmp(result, scratch);
3400 DeoptimizeIf(eq, instr->environment());
3406 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3407 if (instr->is_typed_elements()) {
3408 DoLoadKeyedExternalArray(instr);
3409 } else if (instr->hydrogen()->representation().IsDouble()) {
3410 DoLoadKeyedFixedDoubleArray(instr);
3412 DoLoadKeyedFixedArray(instr);
3417 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3419 bool key_is_constant,
3423 int additional_index,
3424 int additional_offset) {
3425 int base_offset = (additional_index << element_size) + additional_offset;
3426 if (key_is_constant) {
3427 return MemOperand(base,
3428 base_offset + (constant_key << element_size));
3431 if (additional_offset != 0) {
3432 __ mov(scratch0(), Operand(base_offset));
3433 if (shift_size >= 0) {
3434 __ add(scratch0(), scratch0(), Operand(key, LSL, shift_size));
3436 ASSERT_EQ(-1, shift_size);
3437 __ add(scratch0(), scratch0(), Operand(key, LSR, 1));
3439 return MemOperand(base, scratch0());
3442 if (additional_index != 0) {
3443 additional_index *= 1 << (element_size - shift_size);
3444 __ add(scratch0(), key, Operand(additional_index));
3447 if (additional_index == 0) {
3448 if (shift_size >= 0) {
3449 return MemOperand(base, key, LSL, shift_size);
3451 ASSERT_EQ(-1, shift_size);
3452 return MemOperand(base, key, LSR, 1);
3456 if (shift_size >= 0) {
3457 return MemOperand(base, scratch0(), LSL, shift_size);
3459 ASSERT_EQ(-1, shift_size);
3460 return MemOperand(base, scratch0(), LSR, 1);
3465 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3466 ASSERT(ToRegister(instr->context()).is(cp));
3467 ASSERT(ToRegister(instr->object()).is(r1));
3468 ASSERT(ToRegister(instr->key()).is(r0));
3470 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3471 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3475 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3476 Register scratch = scratch0();
3477 Register result = ToRegister(instr->result());
3479 if (instr->hydrogen()->from_inlined()) {
3480 __ sub(result, sp, Operand(2 * kPointerSize));
3482 // Check if the calling frame is an arguments adaptor frame.
3483 Label done, adapted;
3484 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3485 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3486 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3488 // Result is the frame pointer for the frame if not adapted and for the real
3489 // frame below the adaptor frame if adapted.
3490 __ mov(result, fp, LeaveCC, ne);
3491 __ mov(result, scratch, LeaveCC, eq);
3496 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3497 Register elem = ToRegister(instr->elements());
3498 Register result = ToRegister(instr->result());
3502 // If no arguments adaptor frame the number of arguments is fixed.
3504 __ mov(result, Operand(scope()->num_parameters()));
3507 // Arguments adaptor frame present. Get argument length from there.
3508 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3510 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3511 __ SmiUntag(result);
3513 // Argument length is in result register.
3518 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3519 Register receiver = ToRegister(instr->receiver());
3520 Register function = ToRegister(instr->function());
3521 Register result = ToRegister(instr->result());
3522 Register scratch = scratch0();
3524 // If the receiver is null or undefined, we have to pass the global
3525 // object as a receiver to normal functions. Values have to be
3526 // passed unchanged to builtins and strict-mode functions.
3527 Label global_object, result_in_receiver;
3529 if (!instr->hydrogen()->known_function()) {
3530 // Do not transform the receiver to object for strict mode
3533 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3535 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3536 int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3537 __ tst(scratch, Operand(mask));
3538 __ b(ne, &result_in_receiver);
3540 // Do not transform the receiver to object for builtins.
3541 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3542 __ b(ne, &result_in_receiver);
3545 // Normal function. Replace undefined or null with global receiver.
3546 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3547 __ cmp(receiver, scratch);
3548 __ b(eq, &global_object);
3549 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3550 __ cmp(receiver, scratch);
3551 __ b(eq, &global_object);
3553 // Deoptimize if the receiver is not a JS object.
3554 __ SmiTst(receiver);
3555 DeoptimizeIf(eq, instr->environment());
3556 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3557 DeoptimizeIf(lt, instr->environment());
3559 __ b(&result_in_receiver);
3560 __ bind(&global_object);
3561 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
3563 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3565 FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
3567 if (result.is(receiver)) {
3568 __ bind(&result_in_receiver);
3572 __ bind(&result_in_receiver);
3573 __ mov(result, receiver);
3574 __ bind(&result_ok);
3579 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3580 Register receiver = ToRegister(instr->receiver());
3581 Register function = ToRegister(instr->function());
3582 Register length = ToRegister(instr->length());
3583 Register elements = ToRegister(instr->elements());
3584 Register scratch = scratch0();
3585 ASSERT(receiver.is(r0)); // Used for parameter count.
3586 ASSERT(function.is(r1)); // Required by InvokeFunction.
3587 ASSERT(ToRegister(instr->result()).is(r0));
3589 // Copy the arguments to this function possibly from the
3590 // adaptor frame below it.
3591 const uint32_t kArgumentsLimit = 1 * KB;
3592 __ cmp(length, Operand(kArgumentsLimit));
3593 DeoptimizeIf(hi, instr->environment());
3595 // Push the receiver and use the register to keep the original
3596 // number of arguments.
3598 __ mov(receiver, length);
3599 // The arguments are at a one pointer size offset from elements.
3600 __ add(elements, elements, Operand(1 * kPointerSize));
3602 // Loop through the arguments pushing them onto the execution
3605 // length is a small non-negative integer, due to the test above.
3606 __ cmp(length, Operand::Zero());
3609 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3611 __ sub(length, length, Operand(1), SetCC);
3615 ASSERT(instr->HasPointerMap());
3616 LPointerMap* pointers = instr->pointer_map();
3617 SafepointGenerator safepoint_generator(
3618 this, pointers, Safepoint::kLazyDeopt);
3619 // The number of arguments is stored in receiver which is r0, as expected
3620 // by InvokeFunction.
3621 ParameterCount actual(receiver);
3622 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3626 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3627 LOperand* argument = instr->value();
3628 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3629 Abort(kDoPushArgumentNotImplementedForDoubleType);
3631 Register argument_reg = EmitLoadRegister(argument, ip);
3632 __ push(argument_reg);
3637 void LCodeGen::DoDrop(LDrop* instr) {
3638 __ Drop(instr->count());
3642 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3643 Register result = ToRegister(instr->result());
3644 __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3648 void LCodeGen::DoContext(LContext* instr) {
3649 // If there is a non-return use, the context must be moved to a register.
3650 Register result = ToRegister(instr->result());
3651 if (info()->IsOptimizing()) {
3652 __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3654 // If there is no frame, the context must be in cp.
3655 ASSERT(result.is(cp));
3660 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3661 ASSERT(ToRegister(instr->context()).is(cp));
3662 __ push(cp); // The context is the first argument.
3663 __ Move(scratch0(), instr->hydrogen()->pairs());
3664 __ push(scratch0());
3665 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3666 __ push(scratch0());
3667 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3671 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3672 int formal_parameter_count,
3674 LInstruction* instr,
3676 bool dont_adapt_arguments =
3677 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3678 bool can_invoke_directly =
3679 dont_adapt_arguments || formal_parameter_count == arity;
3681 LPointerMap* pointers = instr->pointer_map();
3683 if (can_invoke_directly) {
3684 if (r1_state == R1_UNINITIALIZED) {
3685 __ Move(r1, function);
3689 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
3691 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3692 // is available to write to at this point.
3693 if (dont_adapt_arguments) {
3694 __ mov(r0, Operand(arity));
3698 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
3701 // Set up deoptimization.
3702 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3704 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3705 ParameterCount count(arity);
3706 ParameterCount expected(formal_parameter_count);
3707 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3712 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3713 ASSERT(instr->context() != NULL);
3714 ASSERT(ToRegister(instr->context()).is(cp));
3715 Register input = ToRegister(instr->value());
3716 Register result = ToRegister(instr->result());
3717 Register scratch = scratch0();
3719 // Deoptimize if not a heap number.
3720 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3721 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3722 __ cmp(scratch, Operand(ip));
3723 DeoptimizeIf(ne, instr->environment());
3726 Register exponent = scratch0();
3728 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3729 // Check the sign of the argument. If the argument is positive, just
3731 __ tst(exponent, Operand(HeapNumber::kSignMask));
3732 // Move the input to the result if necessary.
3733 __ Move(result, input);
3736 // Input is negative. Reverse its sign.
3737 // Preserve the value of all registers.
3739 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3741 // Registers were saved at the safepoint, so we can use
3742 // many scratch registers.
3743 Register tmp1 = input.is(r1) ? r0 : r1;
3744 Register tmp2 = input.is(r2) ? r0 : r2;
3745 Register tmp3 = input.is(r3) ? r0 : r3;
3746 Register tmp4 = input.is(r4) ? r0 : r4;
3748 // exponent: floating point exponent value.
3750 Label allocated, slow;
3751 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3752 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3755 // Slow case: Call the runtime system to do the number allocation.
3758 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3760 // Set the pointer to the new heap number in tmp.
3761 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3762 // Restore input_reg after call to runtime.
3763 __ LoadFromSafepointRegisterSlot(input, input);
3764 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3766 __ bind(&allocated);
3767 // exponent: floating point exponent value.
3768 // tmp1: allocated heap number.
3769 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3770 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3771 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3772 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3774 __ StoreToSafepointRegisterSlot(tmp1, result);
3781 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3782 Register input = ToRegister(instr->value());
3783 Register result = ToRegister(instr->result());
3784 __ cmp(input, Operand::Zero());
3785 __ Move(result, input, pl);
3786 // We can make rsb conditional because the previous cmp instruction
3787 // will clear the V (overflow) flag and rsb won't set this flag
3788 // if input is positive.
3789 __ rsb(result, input, Operand::Zero(), SetCC, mi);
3790 // Deoptimize on overflow.
3791 DeoptimizeIf(vs, instr->environment());
3795 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3796 // Class for deferred case.
3797 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3799 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3800 : LDeferredCode(codegen), instr_(instr) { }
3801 virtual void Generate() V8_OVERRIDE {
3802 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3804 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3809 Representation r = instr->hydrogen()->value()->representation();
3811 DwVfpRegister input = ToDoubleRegister(instr->value());
3812 DwVfpRegister result = ToDoubleRegister(instr->result());
3813 __ vabs(result, input);
3814 } else if (r.IsSmiOrInteger32()) {
3815 EmitIntegerMathAbs(instr);
3817 // Representation is tagged.
3818 DeferredMathAbsTaggedHeapNumber* deferred =
3819 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3820 Register input = ToRegister(instr->value());
3822 __ JumpIfNotSmi(input, deferred->entry());
3823 // If smi, handle it directly.
3824 EmitIntegerMathAbs(instr);
3825 __ bind(deferred->exit());
3830 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3831 DwVfpRegister input = ToDoubleRegister(instr->value());
3832 Register result = ToRegister(instr->result());
3833 Register input_high = scratch0();
3836 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3837 DeoptimizeIf(al, instr->environment());
3840 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3842 __ cmp(result, Operand::Zero());
3844 __ cmp(input_high, Operand::Zero());
3845 DeoptimizeIf(mi, instr->environment());
3851 void LCodeGen::DoMathRound(LMathRound* instr) {
3852 DwVfpRegister input = ToDoubleRegister(instr->value());
3853 Register result = ToRegister(instr->result());
3854 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3855 DwVfpRegister input_plus_dot_five = double_scratch1;
3856 Register input_high = scratch0();
3857 DwVfpRegister dot_five = double_scratch0();
3858 Label convert, done;
3860 __ Vmov(dot_five, 0.5, scratch0());
3861 __ vabs(double_scratch1, input);
3862 __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3863 // If input is in [-0.5, -0], the result is -0.
3864 // If input is in [+0, +0.5[, the result is +0.
3865 // If the input is +0.5, the result is 1.
3866 __ b(hi, &convert); // Out of [-0.5, +0.5].
3867 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3868 __ VmovHigh(input_high, input);
3869 __ cmp(input_high, Operand::Zero());
3870 DeoptimizeIf(mi, instr->environment()); // [-0.5, -0].
3872 __ VFPCompareAndSetFlags(input, dot_five);
3873 __ mov(result, Operand(1), LeaveCC, eq); // +0.5.
3874 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3875 // flag kBailoutOnMinusZero.
3876 __ mov(result, Operand::Zero(), LeaveCC, ne);
3880 __ vadd(input_plus_dot_five, input, dot_five);
3881 // Reuse dot_five (double_scratch0) as we no longer need this value.
3882 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3884 DeoptimizeIf(al, instr->environment());
3889 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3890 DwVfpRegister input = ToDoubleRegister(instr->value());
3891 DwVfpRegister result = ToDoubleRegister(instr->result());
3892 __ vsqrt(result, input);
3896 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3897 DwVfpRegister input = ToDoubleRegister(instr->value());
3898 DwVfpRegister result = ToDoubleRegister(instr->result());
3899 DwVfpRegister temp = double_scratch0();
3901 // Note that according to ECMA-262 15.8.2.13:
3902 // Math.pow(-Infinity, 0.5) == Infinity
3903 // Math.sqrt(-Infinity) == NaN
3905 __ vmov(temp, -V8_INFINITY, scratch0());
3906 __ VFPCompareAndSetFlags(input, temp);
3907 __ vneg(result, temp, eq);
3910 // Add +0 to convert -0 to +0.
3911 __ vadd(result, input, kDoubleRegZero);
3912 __ vsqrt(result, result);
3917 void LCodeGen::DoPower(LPower* instr) {
3918 Representation exponent_type = instr->hydrogen()->right()->representation();
3919 // Having marked this as a call, we can use any registers.
3920 // Just make sure that the input/output registers are the expected ones.
3921 ASSERT(!instr->right()->IsDoubleRegister() ||
3922 ToDoubleRegister(instr->right()).is(d1));
3923 ASSERT(!instr->right()->IsRegister() ||
3924 ToRegister(instr->right()).is(r2));
3925 ASSERT(ToDoubleRegister(instr->left()).is(d0));
3926 ASSERT(ToDoubleRegister(instr->result()).is(d2));
3928 if (exponent_type.IsSmi()) {
3929 MathPowStub stub(MathPowStub::TAGGED);
3931 } else if (exponent_type.IsTagged()) {
3933 __ JumpIfSmi(r2, &no_deopt);
3934 __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
3935 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3936 __ cmp(r6, Operand(ip));
3937 DeoptimizeIf(ne, instr->environment());
3939 MathPowStub stub(MathPowStub::TAGGED);
3941 } else if (exponent_type.IsInteger32()) {
3942 MathPowStub stub(MathPowStub::INTEGER);
3945 ASSERT(exponent_type.IsDouble());
3946 MathPowStub stub(MathPowStub::DOUBLE);
3952 void LCodeGen::DoMathExp(LMathExp* instr) {
3953 DwVfpRegister input = ToDoubleRegister(instr->value());
3954 DwVfpRegister result = ToDoubleRegister(instr->result());
3955 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3956 DwVfpRegister double_scratch2 = double_scratch0();
3957 Register temp1 = ToRegister(instr->temp1());
3958 Register temp2 = ToRegister(instr->temp2());
3960 MathExpGenerator::EmitMathExp(
3961 masm(), input, result, double_scratch1, double_scratch2,
3962 temp1, temp2, scratch0());
3966 void LCodeGen::DoMathLog(LMathLog* instr) {
3967 __ PrepareCallCFunction(0, 1, scratch0());
3968 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3969 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3971 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3975 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3976 ASSERT(ToRegister(instr->context()).is(cp));
3977 ASSERT(ToRegister(instr->function()).is(r1));
3978 ASSERT(instr->HasPointerMap());
3980 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3981 if (known_function.is_null()) {
3982 LPointerMap* pointers = instr->pointer_map();
3983 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3984 ParameterCount count(instr->arity());
3985 __ InvokeFunction(r1, count, CALL_FUNCTION, generator);
3987 CallKnownFunction(known_function,
3988 instr->hydrogen()->formal_parameter_count(),
3991 R1_CONTAINS_TARGET);
3996 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3997 ASSERT(ToRegister(instr->result()).is(r0));
3999 LPointerMap* pointers = instr->pointer_map();
4000 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4002 if (instr->target()->IsConstantOperand()) {
4003 LConstantOperand* target = LConstantOperand::cast(instr->target());
4004 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4005 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4006 PlatformCallInterfaceDescriptor* call_descriptor =
4007 instr->descriptor()->platform_specific_descriptor();
4008 __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
4009 call_descriptor->storage_mode());
4011 ASSERT(instr->target()->IsRegister());
4012 Register target = ToRegister(instr->target());
4013 generator.BeforeCall(__ CallSize(target));
4014 __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4017 generator.AfterCall();
4021 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4022 ASSERT(ToRegister(instr->function()).is(r1));
4023 ASSERT(ToRegister(instr->result()).is(r0));
4025 if (instr->hydrogen()->pass_argument_count()) {
4026 __ mov(r0, Operand(instr->arity()));
4030 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
4032 // Load the code entry address
4033 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
4036 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4040 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4041 ASSERT(ToRegister(instr->context()).is(cp));
4042 ASSERT(ToRegister(instr->function()).is(r1));
4043 ASSERT(ToRegister(instr->result()).is(r0));
4045 int arity = instr->arity();
4046 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
4047 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4051 void LCodeGen::DoCallNew(LCallNew* instr) {
4052 ASSERT(ToRegister(instr->context()).is(cp));
4053 ASSERT(ToRegister(instr->constructor()).is(r1));
4054 ASSERT(ToRegister(instr->result()).is(r0));
4056 __ mov(r0, Operand(instr->arity()));
4057 // No cell in r2 for construct type feedback in optimized code
4058 Handle<Object> undefined_value(isolate()->factory()->undefined_value());
4059 __ mov(r2, Operand(undefined_value));
4060 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4061 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4065 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4066 ASSERT(ToRegister(instr->context()).is(cp));
4067 ASSERT(ToRegister(instr->constructor()).is(r1));
4068 ASSERT(ToRegister(instr->result()).is(r0));
4070 __ mov(r0, Operand(instr->arity()));
4071 __ mov(r2, Operand(factory()->undefined_value()));
4072 ElementsKind kind = instr->hydrogen()->elements_kind();
4073 AllocationSiteOverrideMode override_mode =
4074 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4075 ? DISABLE_ALLOCATION_SITES
4078 if (instr->arity() == 0) {
4079 ArrayNoArgumentConstructorStub stub(kind, override_mode);
4080 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4081 } else if (instr->arity() == 1) {
4083 if (IsFastPackedElementsKind(kind)) {
4085 // We might need a change here
4086 // look at the first argument
4087 __ ldr(r5, MemOperand(sp, 0));
4088 __ cmp(r5, Operand::Zero());
4089 __ b(eq, &packed_case);
4091 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4092 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
4093 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4095 __ bind(&packed_case);
4098 ArraySingleArgumentConstructorStub stub(kind, override_mode);
4099 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4102 ArrayNArgumentsConstructorStub stub(kind, override_mode);
4103 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4108 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4109 CallRuntime(instr->function(), instr->arity(), instr);
4113 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4114 Register function = ToRegister(instr->function());
4115 Register code_object = ToRegister(instr->code_object());
4116 __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
4118 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4122 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4123 Register result = ToRegister(instr->result());
4124 Register base = ToRegister(instr->base_object());
4125 if (instr->offset()->IsConstantOperand()) {
4126 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4127 __ add(result, base, Operand(ToInteger32(offset)));
4129 Register offset = ToRegister(instr->offset());
4130 __ add(result, base, offset);
4135 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4136 Representation representation = instr->representation();
4138 Register object = ToRegister(instr->object());
4139 Register scratch = scratch0();
4140 HObjectAccess access = instr->hydrogen()->access();
4141 int offset = access.offset();
4143 if (access.IsExternalMemory()) {
4144 Register value = ToRegister(instr->value());
4145 MemOperand operand = MemOperand(object, offset);
4146 __ Store(value, operand, representation);
4150 Handle<Map> transition = instr->transition();
4152 if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
4153 Register value = ToRegister(instr->value());
4154 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4156 DeoptimizeIf(eq, instr->environment());
4158 } else if (representation.IsDouble()) {
4159 ASSERT(transition.is_null());
4160 ASSERT(access.IsInobject());
4161 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4162 DwVfpRegister value = ToDoubleRegister(instr->value());
4163 __ vstr(value, FieldMemOperand(object, offset));
4167 if (!transition.is_null()) {
4168 __ mov(scratch, Operand(transition));
4169 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4170 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4171 Register temp = ToRegister(instr->temp());
4172 // Update the write barrier for the map field.
4173 __ RecordWriteField(object,
4174 HeapObject::kMapOffset,
4177 GetLinkRegisterState(),
4179 OMIT_REMEMBERED_SET,
4185 Register value = ToRegister(instr->value());
4186 SmiCheck check_needed =
4187 instr->hydrogen()->value()->IsHeapObject()
4188 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4189 if (access.IsInobject()) {
4190 MemOperand operand = FieldMemOperand(object, offset);
4191 __ Store(value, operand, representation);
4192 if (instr->hydrogen()->NeedsWriteBarrier()) {
4193 // Update the write barrier for the object for in-object properties.
4194 __ RecordWriteField(object,
4198 GetLinkRegisterState(),
4200 EMIT_REMEMBERED_SET,
4204 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4205 MemOperand operand = FieldMemOperand(scratch, offset);
4206 __ Store(value, operand, representation);
4207 if (instr->hydrogen()->NeedsWriteBarrier()) {
4208 // Update the write barrier for the properties array.
4209 // object is used as a scratch register.
4210 __ RecordWriteField(scratch,
4214 GetLinkRegisterState(),
4216 EMIT_REMEMBERED_SET,
4223 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4224 ASSERT(ToRegister(instr->context()).is(cp));
4225 ASSERT(ToRegister(instr->object()).is(r1));
4226 ASSERT(ToRegister(instr->value()).is(r0));
4228 // Name is always in r2.
4229 __ mov(r2, Operand(instr->name()));
4230 Handle<Code> ic = StoreIC::initialize_stub(isolate(),
4231 instr->strict_mode_flag());
4232 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4236 void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) {
4237 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4239 __ b(NegateCondition(condition), &done);
4240 __ stop("eliminated bounds check failed");
4243 DeoptimizeIf(condition, check->environment());
4248 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4249 if (instr->hydrogen()->skip_check()) return;
4251 if (instr->index()->IsConstantOperand()) {
4252 int constant_index =
4253 ToInteger32(LConstantOperand::cast(instr->index()));
4254 if (instr->hydrogen()->length()->representation().IsSmi()) {
4255 __ mov(ip, Operand(Smi::FromInt(constant_index)));
4257 __ mov(ip, Operand(constant_index));
4259 __ cmp(ip, ToRegister(instr->length()));
4261 __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
4263 Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
4264 ApplyCheckIf(condition, instr);
4269 void LCodeGen::DoStoreKeyedSIMD128ExternalArray(LStoreKeyed* instr) {
4270 ASSERT(instr->value()->IsRegister());
4271 Register temp = ToRegister(instr->temp());
4272 Register input_reg = ToRegister(instr->value());
4273 __ SmiTst(input_reg);
4274 DeoptimizeIf(eq, instr->environment());
4275 __ CompareObjectType(input_reg, temp, no_reg, T::kInstanceType);
4276 DeoptimizeIf(ne, instr->environment());
4278 STATIC_ASSERT(T::kValueSize % kPointerSize == 0);
4279 Register external_pointer = ToRegister(instr->elements());
4280 Register key = no_reg;
4281 ElementsKind elements_kind = instr->elements_kind();
4282 bool key_is_constant = instr->key()->IsConstantOperand();
4283 int constant_key = 0;
4284 if (key_is_constant) {
4285 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4286 if (constant_key & 0xF0000000) {
4287 Abort(kArrayIndexConstantValueTooBig);
4290 key = ToRegister(instr->key());
4292 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4293 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4294 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4295 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
4296 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
4300 (instr->additional_index() << element_size_shift) + additional_offset;
4301 Register address = scratch0();
4302 if (key_is_constant) {
4303 if (constant_key != 0) {
4304 __ add(address, external_pointer,
4305 Operand(constant_key << element_size_shift));
4307 address = external_pointer;
4310 __ add(address, external_pointer, Operand(key, LSL, shift_size));
4313 for (int offset = 0; offset < T::kValueSize; offset += kPointerSize) {
4314 __ ldr(temp, MemOperand(input_reg,
4315 T::kValueOffset - kHeapObjectTag + offset));
4316 __ str(temp, MemOperand(address, base_offset + offset));
4321 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4322 Register external_pointer = ToRegister(instr->elements());
4323 Register key = no_reg;
4324 ElementsKind elements_kind = instr->elements_kind();
4325 bool key_is_constant = instr->key()->IsConstantOperand();
4326 int constant_key = 0;
4327 if (key_is_constant) {
4328 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4329 if (constant_key & 0xF0000000) {
4330 Abort(kArrayIndexConstantValueTooBig);
4333 key = ToRegister(instr->key());
4335 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4336 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4337 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4338 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
4339 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
4342 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4343 elements_kind == FLOAT32_ELEMENTS ||
4344 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4345 elements_kind == FLOAT64_ELEMENTS) {
4347 (instr->additional_index() << element_size_shift) + additional_offset;
4348 Register address = scratch0();
4349 DwVfpRegister value(ToDoubleRegister(instr->value()));
4350 if (key_is_constant) {
4351 if (constant_key != 0) {
4352 __ add(address, external_pointer,
4353 Operand(constant_key << element_size_shift));
4355 address = external_pointer;
4358 __ add(address, external_pointer, Operand(key, LSL, shift_size));
4360 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4361 elements_kind == FLOAT32_ELEMENTS) {
4362 __ vcvt_f32_f64(double_scratch0().low(), value);
4363 __ vstr(double_scratch0().low(), address, base_offset);
4364 } else { // Storing doubles, not floats.
4365 __ vstr(value, address, base_offset);
4367 } else if (IsFloat32x4ElementsKind(elements_kind)) {
4368 DoStoreKeyedSIMD128ExternalArray<Float32x4>(instr);
4369 } else if (IsInt32x4ElementsKind(elements_kind)) {
4370 DoStoreKeyedSIMD128ExternalArray<Int32x4>(instr);
4372 Register value(ToRegister(instr->value()));
4373 MemOperand mem_operand = PrepareKeyedOperand(
4374 key, external_pointer, key_is_constant, constant_key,
4375 element_size_shift, shift_size,
4376 instr->additional_index(), additional_offset);
4377 switch (elements_kind) {
4378 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4379 case EXTERNAL_INT8_ELEMENTS:
4380 case EXTERNAL_UINT8_ELEMENTS:
4381 case UINT8_ELEMENTS:
4382 case UINT8_CLAMPED_ELEMENTS:
4384 __ strb(value, mem_operand);
4386 case EXTERNAL_INT16_ELEMENTS:
4387 case EXTERNAL_UINT16_ELEMENTS:
4388 case INT16_ELEMENTS:
4389 case UINT16_ELEMENTS:
4390 __ strh(value, mem_operand);
4392 case EXTERNAL_INT32_ELEMENTS:
4393 case EXTERNAL_UINT32_ELEMENTS:
4394 case INT32_ELEMENTS:
4395 case UINT32_ELEMENTS:
4396 __ str(value, mem_operand);
4398 case FLOAT32_ELEMENTS:
4399 case FLOAT64_ELEMENTS:
4400 case EXTERNAL_FLOAT32_ELEMENTS:
4401 case EXTERNAL_FLOAT64_ELEMENTS:
4402 case FLOAT32x4_ELEMENTS:
4403 case INT32x4_ELEMENTS:
4404 case EXTERNAL_FLOAT32x4_ELEMENTS:
4405 case EXTERNAL_INT32x4_ELEMENTS:
4406 case FAST_DOUBLE_ELEMENTS:
4408 case FAST_SMI_ELEMENTS:
4409 case FAST_HOLEY_DOUBLE_ELEMENTS:
4410 case FAST_HOLEY_ELEMENTS:
4411 case FAST_HOLEY_SMI_ELEMENTS:
4412 case DICTIONARY_ELEMENTS:
4413 case NON_STRICT_ARGUMENTS_ELEMENTS:
4421 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4422 DwVfpRegister value = ToDoubleRegister(instr->value());
4423 Register elements = ToRegister(instr->elements());
4424 Register scratch = scratch0();
4425 DwVfpRegister double_scratch = double_scratch0();
4426 bool key_is_constant = instr->key()->IsConstantOperand();
4428 // Calculate the effective address of the slot in the array to store the
4430 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4431 if (key_is_constant) {
4432 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4433 if (constant_key & 0xF0000000) {
4434 Abort(kArrayIndexConstantValueTooBig);
4436 __ add(scratch, elements,
4437 Operand((constant_key << element_size_shift) +
4438 FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4440 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4441 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4442 __ add(scratch, elements,
4443 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4444 __ add(scratch, scratch,
4445 Operand(ToRegister(instr->key()), LSL, shift_size));
4448 if (instr->NeedsCanonicalization()) {
4449 // Force a canonical NaN.
4450 if (masm()->emit_debug_code()) {
4452 __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4453 __ Assert(ne, kDefaultNaNModeNotSet);
4455 __ VFPCanonicalizeNaN(double_scratch, value);
4456 __ vstr(double_scratch, scratch,
4457 instr->additional_index() << element_size_shift);
4459 __ vstr(value, scratch, instr->additional_index() << element_size_shift);
4464 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4465 Register value = ToRegister(instr->value());
4466 Register elements = ToRegister(instr->elements());
4467 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4469 Register scratch = scratch0();
4470 Register store_base = scratch;
4474 if (instr->key()->IsConstantOperand()) {
4475 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4476 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4477 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4478 instr->additional_index());
4479 store_base = elements;
4481 // Even though the HLoadKeyed instruction forces the input
4482 // representation for the key to be an integer, the input gets replaced
4483 // during bound check elimination with the index argument to the bounds
4484 // check, which can be tagged, so that case must be handled here, too.
4485 if (instr->hydrogen()->key()->representation().IsSmi()) {
4486 __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4488 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4490 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4492 __ str(value, FieldMemOperand(store_base, offset));
4494 if (instr->hydrogen()->NeedsWriteBarrier()) {
4495 SmiCheck check_needed =
4496 instr->hydrogen()->value()->IsHeapObject()
4497 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4498 // Compute address of modified element and store it into key register.
4499 __ add(key, store_base, Operand(offset - kHeapObjectTag));
4500 __ RecordWrite(elements,
4503 GetLinkRegisterState(),
4505 EMIT_REMEMBERED_SET,
4511 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4512 // By cases: external, fast double
4513 if (instr->is_typed_elements()) {
4514 DoStoreKeyedExternalArray(instr);
4515 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4516 DoStoreKeyedFixedDoubleArray(instr);
4518 DoStoreKeyedFixedArray(instr);
4523 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4524 ASSERT(ToRegister(instr->context()).is(cp));
4525 ASSERT(ToRegister(instr->object()).is(r2));
4526 ASSERT(ToRegister(instr->key()).is(r1));
4527 ASSERT(ToRegister(instr->value()).is(r0));
4529 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4530 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4531 : isolate()->builtins()->KeyedStoreIC_Initialize();
4532 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4536 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4537 Register object_reg = ToRegister(instr->object());
4538 Register scratch = scratch0();
4540 Handle<Map> from_map = instr->original_map();
4541 Handle<Map> to_map = instr->transitioned_map();
4542 ElementsKind from_kind = instr->from_kind();
4543 ElementsKind to_kind = instr->to_kind();
4545 Label not_applicable;
4546 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4547 __ cmp(scratch, Operand(from_map));
4548 __ b(ne, ¬_applicable);
4550 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4551 Register new_map_reg = ToRegister(instr->new_map_temp());
4552 __ mov(new_map_reg, Operand(to_map));
4553 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4555 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4556 scratch, GetLinkRegisterState(), kDontSaveFPRegs);
4558 ASSERT(ToRegister(instr->context()).is(cp));
4559 PushSafepointRegistersScope scope(
4560 this, Safepoint::kWithRegistersAndDoubles);
4561 __ Move(r0, object_reg);
4562 __ Move(r1, to_map);
4563 TransitionElementsKindStub stub(from_kind, to_kind);
4565 RecordSafepointWithRegistersAndDoubles(
4566 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4568 __ bind(¬_applicable);
4572 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4573 Register object = ToRegister(instr->object());
4574 Register temp = ToRegister(instr->temp());
4575 Label no_memento_found;
4576 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4577 DeoptimizeIf(eq, instr->environment());
4578 __ bind(&no_memento_found);
4582 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4583 ASSERT(ToRegister(instr->context()).is(cp));
4584 ASSERT(ToRegister(instr->left()).is(r1));
4585 ASSERT(ToRegister(instr->right()).is(r0));
4586 StringAddStub stub(instr->hydrogen()->flags(),
4587 instr->hydrogen()->pretenure_flag());
4588 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4592 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4593 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4595 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4596 : LDeferredCode(codegen), instr_(instr) { }
4597 virtual void Generate() V8_OVERRIDE {
4598 codegen()->DoDeferredStringCharCodeAt(instr_);
4600 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4602 LStringCharCodeAt* instr_;
4605 DeferredStringCharCodeAt* deferred =
4606 new(zone()) DeferredStringCharCodeAt(this, instr);
4608 StringCharLoadGenerator::Generate(masm(),
4609 ToRegister(instr->string()),
4610 ToRegister(instr->index()),
4611 ToRegister(instr->result()),
4613 __ bind(deferred->exit());
4617 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4618 Register string = ToRegister(instr->string());
4619 Register result = ToRegister(instr->result());
4620 Register scratch = scratch0();
4622 // TODO(3095996): Get rid of this. For now, we need to make the
4623 // result register contain a valid pointer because it is already
4624 // contained in the register pointer map.
4625 __ mov(result, Operand::Zero());
4627 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4629 // Push the index as a smi. This is safe because of the checks in
4630 // DoStringCharCodeAt above.
4631 if (instr->index()->IsConstantOperand()) {
4632 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4633 __ mov(scratch, Operand(Smi::FromInt(const_index)));
4636 Register index = ToRegister(instr->index());
4640 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
4644 __ StoreToSafepointRegisterSlot(r0, result);
4648 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4649 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4651 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4652 : LDeferredCode(codegen), instr_(instr) { }
4653 virtual void Generate() V8_OVERRIDE {
4654 codegen()->DoDeferredStringCharFromCode(instr_);
4656 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4658 LStringCharFromCode* instr_;
4661 DeferredStringCharFromCode* deferred =
4662 new(zone()) DeferredStringCharFromCode(this, instr);
4664 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4665 Register char_code = ToRegister(instr->char_code());
4666 Register result = ToRegister(instr->result());
4667 ASSERT(!char_code.is(result));
4669 __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4670 __ b(hi, deferred->entry());
4671 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4672 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4673 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4674 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4676 __ b(eq, deferred->entry());
4677 __ bind(deferred->exit());
4681 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4682 Register char_code = ToRegister(instr->char_code());
4683 Register result = ToRegister(instr->result());
4685 // TODO(3095996): Get rid of this. For now, we need to make the
4686 // result register contain a valid pointer because it is already
4687 // contained in the register pointer map.
4688 __ mov(result, Operand::Zero());
4690 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4691 __ SmiTag(char_code);
4693 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4694 __ StoreToSafepointRegisterSlot(r0, result);
4698 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4699 LOperand* input = instr->value();
4700 ASSERT(input->IsRegister() || input->IsStackSlot());
4701 LOperand* output = instr->result();
4702 ASSERT(output->IsDoubleRegister());
4703 SwVfpRegister single_scratch = double_scratch0().low();
4704 if (input->IsStackSlot()) {
4705 Register scratch = scratch0();
4706 __ ldr(scratch, ToMemOperand(input));
4707 __ vmov(single_scratch, scratch);
4709 __ vmov(single_scratch, ToRegister(input));
4711 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4715 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4716 LOperand* input = instr->value();
4717 LOperand* output = instr->result();
4718 ASSERT(output->IsRegister());
4719 if (!instr->hydrogen()->value()->HasRange() ||
4720 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4721 __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
4722 DeoptimizeIf(vs, instr->environment());
4724 __ SmiTag(ToRegister(output), ToRegister(input));
4729 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4730 LOperand* input = instr->value();
4731 LOperand* output = instr->result();
4733 SwVfpRegister flt_scratch = double_scratch0().low();
4734 __ vmov(flt_scratch, ToRegister(input));
4735 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4739 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
4740 LOperand* input = instr->value();
4741 LOperand* output = instr->result();
4742 if (!instr->hydrogen()->value()->HasRange() ||
4743 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4744 __ tst(ToRegister(input), Operand(0xc0000000));
4745 DeoptimizeIf(ne, instr->environment());
4747 __ SmiTag(ToRegister(output), ToRegister(input));
4751 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4752 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4754 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4755 : LDeferredCode(codegen), instr_(instr) { }
4756 virtual void Generate() V8_OVERRIDE {
4757 codegen()->DoDeferredNumberTagI(instr_,
4761 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4763 LNumberTagI* instr_;
4766 Register src = ToRegister(instr->value());
4767 Register dst = ToRegister(instr->result());
4769 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4770 __ SmiTag(dst, src, SetCC);
4771 __ b(vs, deferred->entry());
4772 __ bind(deferred->exit());
4776 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4777 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4779 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4780 : LDeferredCode(codegen), instr_(instr) { }
4781 virtual void Generate() V8_OVERRIDE {
4782 codegen()->DoDeferredNumberTagI(instr_,
4786 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4788 LNumberTagU* instr_;
4791 Register input = ToRegister(instr->value());
4792 Register result = ToRegister(instr->result());
4794 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4795 __ cmp(input, Operand(Smi::kMaxValue));
4796 __ b(hi, deferred->entry());
4797 __ SmiTag(result, input);
4798 __ bind(deferred->exit());
4802 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4804 IntegerSignedness signedness) {
4806 Register src = ToRegister(value);
4807 Register dst = ToRegister(instr->result());
4808 LowDwVfpRegister dbl_scratch = double_scratch0();
4810 // Preserve the value of all registers.
4811 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4814 if (signedness == SIGNED_INT32) {
4815 // There was overflow, so bits 30 and 31 of the original integer
4816 // disagree. Try to allocate a heap number in new space and store
4817 // the value in there. If that fails, call the runtime system.
4819 __ SmiUntag(src, dst);
4820 __ eor(src, src, Operand(0x80000000));
4822 __ vmov(dbl_scratch.low(), src);
4823 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4825 __ vmov(dbl_scratch.low(), src);
4826 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4829 if (FLAG_inline_new) {
4830 __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
4831 __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
4836 // Slow case: Call the runtime system to do the number allocation.
4839 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4840 // register is stored, as this register is in the pointer map, but contains an
4842 __ mov(ip, Operand::Zero());
4843 __ StoreToSafepointRegisterSlot(ip, dst);
4844 // NumberTagI and NumberTagD use the context from the frame, rather than
4845 // the environment's HContext or HInlinedContext value.
4846 // They only call Runtime::kAllocateHeapNumber.
4847 // The corresponding HChange instructions are added in a phase that does
4848 // not have easy access to the local context.
4849 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4850 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4851 RecordSafepointWithRegisters(
4852 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4854 __ sub(dst, dst, Operand(kHeapObjectTag));
4856 // Done. Put the value in dbl_scratch into the value of the allocated heap
4859 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4860 __ add(dst, dst, Operand(kHeapObjectTag));
4861 __ StoreToSafepointRegisterSlot(dst, dst);
4865 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4866 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4868 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4869 : LDeferredCode(codegen), instr_(instr) { }
4870 virtual void Generate() V8_OVERRIDE {
4871 codegen()->DoDeferredNumberTagD(instr_);
4873 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4875 LNumberTagD* instr_;
4878 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4879 Register scratch = scratch0();
4880 Register reg = ToRegister(instr->result());
4881 Register temp1 = ToRegister(instr->temp());
4882 Register temp2 = ToRegister(instr->temp2());
4884 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4885 if (FLAG_inline_new) {
4886 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4887 // We want the untagged address first for performance
4888 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4891 __ jmp(deferred->entry());
4893 __ bind(deferred->exit());
4894 __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4895 // Now that we have finished with the object's real address tag it
4896 __ add(reg, reg, Operand(kHeapObjectTag));
4900 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4901 // TODO(3095996): Get rid of this. For now, we need to make the
4902 // result register contain a valid pointer because it is already
4903 // contained in the register pointer map.
4904 Register reg = ToRegister(instr->result());
4905 __ mov(reg, Operand::Zero());
4907 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4908 // NumberTagI and NumberTagD use the context from the frame, rather than
4909 // the environment's HContext or HInlinedContext value.
4910 // They only call Runtime::kAllocateHeapNumber.
4911 // The corresponding HChange instructions are added in a phase that does
4912 // not have easy access to the local context.
4913 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4914 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4915 RecordSafepointWithRegisters(
4916 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4917 __ sub(r0, r0, Operand(kHeapObjectTag));
4918 __ StoreToSafepointRegisterSlot(r0, reg);
4922 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4923 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4924 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4928 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4929 Register input = ToRegister(instr->value());
4930 Register result = ToRegister(instr->result());
4931 if (instr->needs_check()) {
4932 STATIC_ASSERT(kHeapObjectTag == 1);
4933 // If the input is a HeapObject, SmiUntag will set the carry flag.
4934 __ SmiUntag(result, input, SetCC);
4935 DeoptimizeIf(cs, instr->environment());
4937 __ SmiUntag(result, input);
4942 void LCodeGen::EmitNumberUntagD(Register input_reg,
4943 DwVfpRegister result_reg,
4944 bool can_convert_undefined_to_nan,
4945 bool deoptimize_on_minus_zero,
4947 NumberUntagDMode mode) {
4948 Register scratch = scratch0();
4949 SwVfpRegister flt_scratch = double_scratch0().low();
4950 ASSERT(!result_reg.is(double_scratch0()));
4951 Label convert, load_smi, done;
4952 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4954 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4955 // Heap number map check.
4956 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4957 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4958 __ cmp(scratch, Operand(ip));
4959 if (can_convert_undefined_to_nan) {
4962 DeoptimizeIf(ne, env);
4965 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4966 if (deoptimize_on_minus_zero) {
4967 __ VmovLow(scratch, result_reg);
4968 __ cmp(scratch, Operand::Zero());
4970 __ VmovHigh(scratch, result_reg);
4971 __ cmp(scratch, Operand(HeapNumber::kSignMask));
4972 DeoptimizeIf(eq, env);
4975 if (can_convert_undefined_to_nan) {
4977 // Convert undefined (and hole) to NaN.
4978 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4979 __ cmp(input_reg, Operand(ip));
4980 DeoptimizeIf(ne, env);
4981 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4982 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
4986 __ SmiUntag(scratch, input_reg);
4987 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4989 // Smi to double register conversion
4991 // scratch: untagged value of input_reg
4992 __ vmov(flt_scratch, scratch);
4993 __ vcvt_f64_s32(result_reg, flt_scratch);
4998 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4999 Register input_reg = ToRegister(instr->value());
5000 Register scratch1 = scratch0();
5001 Register scratch2 = ToRegister(instr->temp());
5002 LowDwVfpRegister double_scratch = double_scratch0();
5003 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
5005 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
5006 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
5010 // The input was optimistically untagged; revert it.
5011 // The carry flag is set when we reach this deferred code as we just executed
5012 // SmiUntag(heap_object, SetCC)
5013 STATIC_ASSERT(kHeapObjectTag == 1);
5014 __ adc(scratch2, input_reg, Operand(input_reg));
5016 // Heap number map check.
5017 __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
5018 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5019 __ cmp(scratch1, Operand(ip));
5021 if (instr->truncating()) {
5022 // Performs a truncating conversion of a floating point number as used by
5023 // the JS bitwise operations.
5024 Label no_heap_number, check_bools, check_false;
5025 __ b(ne, &no_heap_number);
5026 __ TruncateHeapNumberToI(input_reg, scratch2);
5029 // Check for Oddballs. Undefined/False is converted to zero and True to one
5030 // for truncating conversions.
5031 __ bind(&no_heap_number);
5032 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5033 __ cmp(scratch2, Operand(ip));
5034 __ b(ne, &check_bools);
5035 __ mov(input_reg, Operand::Zero());
5038 __ bind(&check_bools);
5039 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5040 __ cmp(scratch2, Operand(ip));
5041 __ b(ne, &check_false);
5042 __ mov(input_reg, Operand(1));
5045 __ bind(&check_false);
5046 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5047 __ cmp(scratch2, Operand(ip));
5048 DeoptimizeIf(ne, instr->environment());
5049 __ mov(input_reg, Operand::Zero());
5052 // Deoptimize if we don't have a heap number.
5053 DeoptimizeIf(ne, instr->environment());
5055 __ sub(ip, scratch2, Operand(kHeapObjectTag));
5056 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
5057 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
5058 DeoptimizeIf(ne, instr->environment());
5060 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5061 __ cmp(input_reg, Operand::Zero());
5063 __ VmovHigh(scratch1, double_scratch2);
5064 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5065 DeoptimizeIf(ne, instr->environment());
5072 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5073 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5075 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5076 : LDeferredCode(codegen), instr_(instr) { }
5077 virtual void Generate() V8_OVERRIDE {
5078 codegen()->DoDeferredTaggedToI(instr_);
5080 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5085 LOperand* input = instr->value();
5086 ASSERT(input->IsRegister());
5087 ASSERT(input->Equals(instr->result()));
5089 Register input_reg = ToRegister(input);
5091 if (instr->hydrogen()->value()->representation().IsSmi()) {
5092 __ SmiUntag(input_reg);
5094 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5096 // Optimistically untag the input.
5097 // If the input is a HeapObject, SmiUntag will set the carry flag.
5098 __ SmiUntag(input_reg, SetCC);
5099 // Branch to deferred code if the input was tagged.
5100 // The deferred code will take care of restoring the tag.
5101 __ b(cs, deferred->entry());
5102 __ bind(deferred->exit());
5107 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5108 LOperand* input = instr->value();
5109 ASSERT(input->IsRegister());
5110 LOperand* result = instr->result();
5111 ASSERT(result->IsDoubleRegister());
5113 Register input_reg = ToRegister(input);
5114 DwVfpRegister result_reg = ToDoubleRegister(result);
5116 HValue* value = instr->hydrogen()->value();
5117 NumberUntagDMode mode = value->representation().IsSmi()
5118 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5120 EmitNumberUntagD(input_reg, result_reg,
5121 instr->hydrogen()->can_convert_undefined_to_nan(),
5122 instr->hydrogen()->deoptimize_on_minus_zero(),
5123 instr->environment(),
5128 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5129 Register result_reg = ToRegister(instr->result());
5130 Register scratch1 = scratch0();
5131 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5132 LowDwVfpRegister double_scratch = double_scratch0();
5134 if (instr->truncating()) {
5135 __ TruncateDoubleToI(result_reg, double_input);
5137 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5138 // Deoptimize if the input wasn't a int32 (inside a double).
5139 DeoptimizeIf(ne, instr->environment());
5140 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5142 __ cmp(result_reg, Operand::Zero());
5144 __ VmovHigh(scratch1, double_input);
5145 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5146 DeoptimizeIf(ne, instr->environment());
5153 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5154 Register result_reg = ToRegister(instr->result());
5155 Register scratch1 = scratch0();
5156 DwVfpRegister double_input = ToDoubleRegister(instr->value());
5157 LowDwVfpRegister double_scratch = double_scratch0();
5159 if (instr->truncating()) {
5160 __ TruncateDoubleToI(result_reg, double_input);
5162 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5163 // Deoptimize if the input wasn't a int32 (inside a double).
5164 DeoptimizeIf(ne, instr->environment());
5165 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5167 __ cmp(result_reg, Operand::Zero());
5169 __ VmovHigh(scratch1, double_input);
5170 __ tst(scratch1, Operand(HeapNumber::kSignMask));
5171 DeoptimizeIf(ne, instr->environment());
5175 __ SmiTag(result_reg, SetCC);
5176 DeoptimizeIf(vs, instr->environment());
5180 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5181 LOperand* input = instr->value();
5182 __ SmiTst(ToRegister(input));
5183 DeoptimizeIf(ne, instr->environment());
5187 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5188 if (!instr->hydrogen()->value()->IsHeapObject()) {
5189 LOperand* input = instr->value();
5190 __ SmiTst(ToRegister(input));
5191 DeoptimizeIf(eq, instr->environment());
5196 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5197 Register input = ToRegister(instr->value());
5198 Register scratch = scratch0();
5200 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5201 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5203 if (instr->hydrogen()->is_interval_check()) {
5206 instr->hydrogen()->GetCheckInterval(&first, &last);
5208 __ cmp(scratch, Operand(first));
5210 // If there is only one type in the interval check for equality.
5211 if (first == last) {
5212 DeoptimizeIf(ne, instr->environment());
5214 DeoptimizeIf(lo, instr->environment());
5215 // Omit check for the last type.
5216 if (last != LAST_TYPE) {
5217 __ cmp(scratch, Operand(last));
5218 DeoptimizeIf(hi, instr->environment());
5224 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5226 if (IsPowerOf2(mask)) {
5227 ASSERT(tag == 0 || IsPowerOf2(tag));
5228 __ tst(scratch, Operand(mask));
5229 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
5231 __ and_(scratch, scratch, Operand(mask));
5232 __ cmp(scratch, Operand(tag));
5233 DeoptimizeIf(ne, instr->environment());
5239 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5240 Register reg = ToRegister(instr->value());
5241 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5242 AllowDeferredHandleDereference smi_check;
5243 if (isolate()->heap()->InNewSpace(*object)) {
5244 Register reg = ToRegister(instr->value());
5245 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5246 __ mov(ip, Operand(Handle<Object>(cell)));
5247 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
5250 __ cmp(reg, Operand(object));
5252 DeoptimizeIf(ne, instr->environment());
5256 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5258 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5260 __ mov(cp, Operand::Zero());
5261 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5262 RecordSafepointWithRegisters(
5263 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5264 __ StoreToSafepointRegisterSlot(r0, scratch0());
5266 __ tst(scratch0(), Operand(kSmiTagMask));
5267 DeoptimizeIf(eq, instr->environment());
5271 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5272 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5274 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5275 : LDeferredCode(codegen), instr_(instr), object_(object) {
5276 SetExit(check_maps());
5278 virtual void Generate() V8_OVERRIDE {
5279 codegen()->DoDeferredInstanceMigration(instr_, object_);
5281 Label* check_maps() { return &check_maps_; }
5282 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5289 if (instr->hydrogen()->CanOmitMapChecks()) return;
5290 Register map_reg = scratch0();
5292 LOperand* input = instr->value();
5293 ASSERT(input->IsRegister());
5294 Register reg = ToRegister(input);
5296 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5298 DeferredCheckMaps* deferred = NULL;
5299 if (instr->hydrogen()->has_migration_target()) {
5300 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5301 __ bind(deferred->check_maps());
5304 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5306 for (int i = 0; i < map_set.size() - 1; i++) {
5307 Handle<Map> map = map_set.at(i).handle();
5308 __ CompareMap(map_reg, map, &success);
5312 Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5313 __ CompareMap(map_reg, map, &success);
5314 if (instr->hydrogen()->has_migration_target()) {
5315 __ b(ne, deferred->entry());
5317 DeoptimizeIf(ne, instr->environment());
5324 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5325 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
5326 Register result_reg = ToRegister(instr->result());
5327 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5331 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5332 Register unclamped_reg = ToRegister(instr->unclamped());
5333 Register result_reg = ToRegister(instr->result());
5334 __ ClampUint8(result_reg, unclamped_reg);
5338 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5339 Register scratch = scratch0();
5340 Register input_reg = ToRegister(instr->unclamped());
5341 Register result_reg = ToRegister(instr->result());
5342 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5343 Label is_smi, done, heap_number;
5345 // Both smi and heap number cases are handled.
5346 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5348 // Check for heap number
5349 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5350 __ cmp(scratch, Operand(factory()->heap_number_map()));
5351 __ b(eq, &heap_number);
5353 // Check for undefined. Undefined is converted to zero for clamping
5355 __ cmp(input_reg, Operand(factory()->undefined_value()));
5356 DeoptimizeIf(ne, instr->environment());
5357 __ mov(result_reg, Operand::Zero());
5361 __ bind(&heap_number);
5362 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5363 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5368 __ ClampUint8(result_reg, result_reg);
5374 void LCodeGen::DoAllocate(LAllocate* instr) {
5375 class DeferredAllocate V8_FINAL : public LDeferredCode {
5377 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5378 : LDeferredCode(codegen), instr_(instr) { }
5379 virtual void Generate() V8_OVERRIDE {
5380 codegen()->DoDeferredAllocate(instr_);
5382 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5387 DeferredAllocate* deferred =
5388 new(zone()) DeferredAllocate(this, instr);
5390 Register result = ToRegister(instr->result());
5391 Register scratch = ToRegister(instr->temp1());
5392 Register scratch2 = ToRegister(instr->temp2());
5394 // Allocate memory for the object.
5395 AllocationFlags flags = TAG_OBJECT;
5396 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5397 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5399 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5400 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5401 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5402 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5403 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5404 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5405 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5408 if (instr->size()->IsConstantOperand()) {
5409 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5410 if (size <= Page::kMaxRegularHeapObjectSize) {
5411 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5413 __ jmp(deferred->entry());
5416 Register size = ToRegister(instr->size());
5425 __ bind(deferred->exit());
5427 if (instr->hydrogen()->MustPrefillWithFiller()) {
5428 if (instr->size()->IsConstantOperand()) {
5429 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5430 __ mov(scratch, Operand(size));
5432 scratch = ToRegister(instr->size());
5434 __ sub(scratch, scratch, Operand(kPointerSize));
5435 __ sub(result, result, Operand(kHeapObjectTag));
5438 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5439 __ str(scratch2, MemOperand(result, scratch));
5440 __ sub(scratch, scratch, Operand(kPointerSize));
5441 __ cmp(scratch, Operand(0));
5443 __ add(result, result, Operand(kHeapObjectTag));
5448 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5449 Register result = ToRegister(instr->result());
5451 // TODO(3095996): Get rid of this. For now, we need to make the
5452 // result register contain a valid pointer because it is already
5453 // contained in the register pointer map.
5454 __ mov(result, Operand(Smi::FromInt(0)));
5456 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5457 if (instr->size()->IsRegister()) {
5458 Register size = ToRegister(instr->size());
5459 ASSERT(!size.is(result));
5463 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5464 __ Push(Smi::FromInt(size));
5467 int flags = AllocateDoubleAlignFlag::encode(
5468 instr->hydrogen()->MustAllocateDoubleAligned());
5469 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5470 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5471 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5472 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5473 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5474 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5475 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5477 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5479 __ Push(Smi::FromInt(flags));
5481 CallRuntimeFromDeferred(
5482 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5483 __ StoreToSafepointRegisterSlot(r0, result);
5487 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5488 ASSERT(ToRegister(instr->value()).is(r0));
5490 CallRuntime(Runtime::kToFastProperties, 1, instr);
5494 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5495 ASSERT(ToRegister(instr->context()).is(cp));
5497 // Registers will be used as follows:
5498 // r6 = literals array.
5499 // r1 = regexp literal.
5500 // r0 = regexp literal clone.
5501 // r2-5 are used as temporaries.
5502 int literal_offset =
5503 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5504 __ Move(r6, instr->hydrogen()->literals());
5505 __ ldr(r1, FieldMemOperand(r6, literal_offset));
5506 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5508 __ b(ne, &materialized);
5510 // Create regexp literal using runtime function
5511 // Result will be in r0.
5512 __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5513 __ mov(r4, Operand(instr->hydrogen()->pattern()));
5514 __ mov(r3, Operand(instr->hydrogen()->flags()));
5515 __ Push(r6, r5, r4, r3);
5516 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5519 __ bind(&materialized);
5520 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5521 Label allocated, runtime_allocate;
5523 __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5526 __ bind(&runtime_allocate);
5527 __ mov(r0, Operand(Smi::FromInt(size)));
5529 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5532 __ bind(&allocated);
5533 // Copy the content into the newly allocated memory.
5534 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
5538 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5539 ASSERT(ToRegister(instr->context()).is(cp));
5540 // Use the fast case closure allocation code that allocates in new
5541 // space for nested functions that don't need literals cloning.
5542 bool pretenure = instr->hydrogen()->pretenure();
5543 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5544 FastNewClosureStub stub(instr->hydrogen()->language_mode(),
5545 instr->hydrogen()->is_generator());
5546 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5547 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5549 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5550 __ mov(r1, Operand(pretenure ? factory()->true_value()
5551 : factory()->false_value()));
5552 __ Push(cp, r2, r1);
5553 CallRuntime(Runtime::kNewClosure, 3, instr);
5558 void LCodeGen::DoTypeof(LTypeof* instr) {
5559 Register input = ToRegister(instr->value());
5561 CallRuntime(Runtime::kTypeof, 1, instr);
5565 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5566 Register input = ToRegister(instr->value());
5568 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5569 instr->FalseLabel(chunk_),
5571 instr->type_literal());
5572 if (final_branch_condition != kNoCondition) {
5573 EmitBranch(instr, final_branch_condition);
5578 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5581 Handle<String> type_name) {
5582 Condition final_branch_condition = kNoCondition;
5583 Register scratch = scratch0();
5584 if (type_name->Equals(heap()->number_string())) {
5585 __ JumpIfSmi(input, true_label);
5586 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5587 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5588 final_branch_condition = eq;
5590 } else if (type_name->Equals(heap()->float32x4_string())) {
5591 __ JumpIfSmi(input, false_label);
5592 __ CompareObjectType(input, scratch, no_reg, FLOAT32x4_TYPE);
5593 final_branch_condition = eq;
5595 } else if (type_name->Equals(heap()->int32x4_string())) {
5596 __ JumpIfSmi(input, false_label);
5597 __ CompareObjectType(input, scratch, no_reg, INT32x4_TYPE);
5598 final_branch_condition = eq;
5600 } else if (type_name->Equals(heap()->string_string())) {
5601 __ JumpIfSmi(input, false_label);
5602 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5603 __ b(ge, false_label);
5604 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5605 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5606 final_branch_condition = eq;
5608 } else if (type_name->Equals(heap()->symbol_string())) {
5609 __ JumpIfSmi(input, false_label);
5610 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5611 final_branch_condition = eq;
5613 } else if (type_name->Equals(heap()->boolean_string())) {
5614 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5615 __ b(eq, true_label);
5616 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5617 final_branch_condition = eq;
5619 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5620 __ CompareRoot(input, Heap::kNullValueRootIndex);
5621 final_branch_condition = eq;
5623 } else if (type_name->Equals(heap()->undefined_string())) {
5624 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5625 __ b(eq, true_label);
5626 __ JumpIfSmi(input, false_label);
5627 // Check for undetectable objects => true.
5628 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5629 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5630 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5631 final_branch_condition = ne;
5633 } else if (type_name->Equals(heap()->function_string())) {
5634 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5635 Register type_reg = scratch;
5636 __ JumpIfSmi(input, false_label);
5637 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5638 __ b(eq, true_label);
5639 __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5640 final_branch_condition = eq;
5642 } else if (type_name->Equals(heap()->object_string())) {
5643 Register map = scratch;
5644 __ JumpIfSmi(input, false_label);
5645 if (!FLAG_harmony_typeof) {
5646 __ CompareRoot(input, Heap::kNullValueRootIndex);
5647 __ b(eq, true_label);
5649 __ CheckObjectTypeRange(input,
5651 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5652 LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
5654 // Check for undetectable objects => false.
5655 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5656 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
5657 final_branch_condition = eq;
5663 return final_branch_condition;
5667 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5668 Register temp1 = ToRegister(instr->temp());
5670 EmitIsConstructCall(temp1, scratch0());
5671 EmitBranch(instr, eq);
5675 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5676 ASSERT(!temp1.is(temp2));
5677 // Get the frame pointer for the calling frame.
5678 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5680 // Skip the arguments adaptor frame if it exists.
5681 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5682 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5683 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq);
5685 // Check the marker in the calling frame.
5686 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5687 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5691 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5692 if (!info()->IsStub()) {
5693 // Ensure that we have enough space after the previous lazy-bailout
5694 // instruction for patching the code here.
5695 int current_pc = masm()->pc_offset();
5696 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5697 // Block literal pool emission for duration of padding.
5698 Assembler::BlockConstPoolScope block_const_pool(masm());
5699 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5700 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5701 while (padding_size > 0) {
5703 padding_size -= Assembler::kInstrSize;
5707 last_lazy_deopt_pc_ = masm()->pc_offset();
5711 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5712 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5713 ASSERT(instr->HasEnvironment());
5714 LEnvironment* env = instr->environment();
5715 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5716 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5720 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5721 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5722 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5723 // needed return address), even though the implementation of LAZY and EAGER is
5724 // now identical. When LAZY is eventually completely folded into EAGER, remove
5725 // the special case below.
5726 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5727 type = Deoptimizer::LAZY;
5730 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5731 DeoptimizeIf(al, instr->environment(), type);
5735 void LCodeGen::DoDummy(LDummy* instr) {
5736 // Nothing to see here, move on!
5740 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5741 // Nothing to see here, move on!
5745 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5746 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5747 LoadContextFromDeferred(instr->context());
5748 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5749 RecordSafepointWithLazyDeopt(
5750 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5751 ASSERT(instr->HasEnvironment());
5752 LEnvironment* env = instr->environment();
5753 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5757 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5758 class DeferredStackCheck V8_FINAL : public LDeferredCode {
5760 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5761 : LDeferredCode(codegen), instr_(instr) { }
5762 virtual void Generate() V8_OVERRIDE {
5763 codegen()->DoDeferredStackCheck(instr_);
5765 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5767 LStackCheck* instr_;
5770 ASSERT(instr->HasEnvironment());
5771 LEnvironment* env = instr->environment();
5772 // There is no LLazyBailout instruction for stack-checks. We have to
5773 // prepare for lazy deoptimization explicitly here.
5774 if (instr->hydrogen()->is_function_entry()) {
5775 // Perform stack overflow check.
5777 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5778 __ cmp(sp, Operand(ip));
5780 PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
5781 ASSERT(instr->context()->IsRegister());
5782 ASSERT(ToRegister(instr->context()).is(cp));
5783 CallCode(isolate()->builtins()->StackCheck(),
5784 RelocInfo::CODE_TARGET,
5786 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5788 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5789 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5791 ASSERT(instr->hydrogen()->is_backwards_branch());
5792 // Perform stack overflow check if this goto needs it before jumping.
5793 DeferredStackCheck* deferred_stack_check =
5794 new(zone()) DeferredStackCheck(this, instr);
5795 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5796 __ cmp(sp, Operand(ip));
5797 __ b(lo, deferred_stack_check->entry());
5798 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5799 __ bind(instr->done_label());
5800 deferred_stack_check->SetExit(instr->done_label());
5801 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5802 // Don't record a deoptimization index for the safepoint here.
5803 // This will be done explicitly when emitting call and the safepoint in
5804 // the deferred code.
5809 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5810 // This is a pseudo-instruction that ensures that the environment here is
5811 // properly registered for deoptimization and records the assembler's PC
5813 LEnvironment* environment = instr->environment();
5815 // If the environment were already registered, we would have no way of
5816 // backpatching it with the spill slot operands.
5817 ASSERT(!environment->HasBeenRegistered());
5818 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5820 GenerateOsrPrologue();
5824 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5825 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5827 DeoptimizeIf(eq, instr->environment());
5829 Register null_value = r5;
5830 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5831 __ cmp(r0, null_value);
5832 DeoptimizeIf(eq, instr->environment());
5835 DeoptimizeIf(eq, instr->environment());
5837 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5838 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5839 DeoptimizeIf(le, instr->environment());
5841 Label use_cache, call_runtime;
5842 __ CheckEnumCache(null_value, &call_runtime);
5844 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5847 // Get the set of properties to enumerate.
5848 __ bind(&call_runtime);
5850 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5852 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
5853 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5855 DeoptimizeIf(ne, instr->environment());
5856 __ bind(&use_cache);
5860 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5861 Register map = ToRegister(instr->map());
5862 Register result = ToRegister(instr->result());
5863 Label load_cache, done;
5864 __ EnumLength(result, map);
5865 __ cmp(result, Operand(Smi::FromInt(0)));
5866 __ b(ne, &load_cache);
5867 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5870 __ bind(&load_cache);
5871 __ LoadInstanceDescriptors(map, result);
5873 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5875 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5876 __ cmp(result, Operand::Zero());
5877 DeoptimizeIf(eq, instr->environment());
5883 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5884 Register object = ToRegister(instr->value());
5885 Register map = ToRegister(instr->map());
5886 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5887 __ cmp(map, scratch0());
5888 DeoptimizeIf(ne, instr->environment());
5892 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5893 Register object = ToRegister(instr->object());
5894 Register index = ToRegister(instr->index());
5895 Register result = ToRegister(instr->result());
5896 Register scratch = scratch0();
5898 Label out_of_object, done;
5899 __ cmp(index, Operand::Zero());
5900 __ b(lt, &out_of_object);
5902 __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
5903 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5907 __ bind(&out_of_object);
5908 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5909 // Index is equal to negated out of object property index plus 1.
5910 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
5911 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5912 __ ldr(result, FieldMemOperand(scratch,
5913 FixedArray::kHeaderSize - kPointerSize));
5920 } } // namespace v8::internal