1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "arm/lithium-codegen-arm.h"
31 #include "arm/lithium-gap-resolver-arm.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
39 class SafepointGenerator : public CallWrapper {
41 SafepointGenerator(LCodeGen* codegen,
42 LPointerMap* pointers,
43 Safepoint::DeoptMode mode)
47 virtual ~SafepointGenerator() { }
49 virtual void BeforeCall(int call_size) const { }
51 virtual void AfterCall() const {
52 codegen_->RecordSafepoint(pointers_, deopt_mode_);
57 LPointerMap* pointers_;
58 Safepoint::DeoptMode deopt_mode_;
64 bool LCodeGen::GenerateCode() {
65 HPhase phase("Z_Code generation", chunk());
68 CpuFeatures::Scope scope1(VFP3);
69 CpuFeatures::Scope scope2(ARMv7);
71 CodeStub::GenerateFPStubs();
73 // Open a frame scope to indicate that there is a frame on the stack. The
74 // NONE indicates that the scope shouldn't actually generate code to set up
75 // the frame (that is done in GeneratePrologue).
76 FrameScope frame_scope(masm_, StackFrame::NONE);
78 return GeneratePrologue() &&
80 GenerateDeferredCode() &&
81 GenerateDeoptJumpTable() &&
82 GenerateSafepointTable();
86 void LCodeGen::FinishCode(Handle<Code> code) {
88 code->set_stack_slots(GetStackSlotCount());
89 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
90 PopulateDeoptimizationData(code);
94 void LCodeGen::Abort(const char* format, ...) {
95 if (FLAG_trace_bailout) {
96 SmartArrayPointer<char> name(
97 info()->shared_info()->DebugName()->ToCString());
98 PrintF("Aborting LCodeGen in @\"%s\": ", *name);
100 va_start(arguments, format);
101 OS::VPrint(format, arguments);
109 void LCodeGen::Comment(const char* format, ...) {
110 if (!FLAG_code_comments) return;
112 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
114 va_start(arguments, format);
115 builder.AddFormattedList(format, arguments);
118 // Copy the string before recording it in the assembler to avoid
119 // issues when the stack allocated buffer goes out of scope.
120 size_t length = builder.position();
121 Vector<char> copy = Vector<char>::New(length + 1);
122 memcpy(copy.start(), builder.Finalize(), copy.length());
123 masm()->RecordComment(copy.start());
127 bool LCodeGen::GeneratePrologue() {
128 ASSERT(is_generating());
131 if (strlen(FLAG_stop_at) > 0 &&
132 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
137 // r1: Callee's JS function.
138 // cp: Callee's context.
139 // fp: Caller's frame pointer.
142 // Strict mode functions and builtins need to replace the receiver
143 // with undefined when called as functions (without an explicit
144 // receiver object). r5 is zero for method calls and non-zero for
146 if (!info_->is_classic_mode() || info_->is_native()) {
148 __ cmp(r5, Operand(0));
150 int receiver_offset = scope()->num_parameters() * kPointerSize;
151 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
152 __ str(r2, MemOperand(sp, receiver_offset));
156 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
157 __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
159 // Reserve space for the stack slots needed by the code.
160 int slots = GetStackSlotCount();
162 if (FLAG_debug_code) {
163 __ mov(r0, Operand(slots));
164 __ mov(r2, Operand(kSlotsZapValue));
168 __ sub(r0, r0, Operand(1), SetCC);
171 __ sub(sp, sp, Operand(slots * kPointerSize));
175 // Possibly allocate a local context.
176 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
177 if (heap_slots > 0 ||
178 (scope()->is_qml_mode() && scope()->is_global_scope())) {
179 Comment(";;; Allocate local context");
180 // Argument to NewContext is the function, which is in r1.
182 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
183 FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
186 __ CallRuntime(Runtime::kNewFunctionContext, 1);
188 RecordSafepoint(Safepoint::kNoLazyDeopt);
189 // Context is returned in both r0 and cp. It replaces the context
190 // passed to us. It's saved in the stack and kept live in cp.
191 __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
192 // Copy any necessary parameters into the context.
193 int num_parameters = scope()->num_parameters();
194 for (int i = 0; i < num_parameters; i++) {
195 Variable* var = scope()->parameter(i);
196 if (var->IsContextSlot()) {
197 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
198 (num_parameters - 1 - i) * kPointerSize;
199 // Load parameter from stack.
200 __ ldr(r0, MemOperand(fp, parameter_offset));
201 // Store it in the context.
202 MemOperand target = ContextOperand(cp, var->index());
204 // Update the write barrier. This clobbers r3 and r0.
205 __ RecordWriteContextSlot(
206 cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
209 Comment(";;; End allocate local context");
214 __ CallRuntime(Runtime::kTraceEnter, 0);
216 return !is_aborted();
220 bool LCodeGen::GenerateBody() {
221 ASSERT(is_generating());
222 bool emit_instructions = true;
223 for (current_instruction_ = 0;
224 !is_aborted() && current_instruction_ < instructions_->length();
225 current_instruction_++) {
226 LInstruction* instr = instructions_->at(current_instruction_);
227 if (instr->IsLabel()) {
228 LLabel* label = LLabel::cast(instr);
229 emit_instructions = !label->HasReplacement();
232 if (emit_instructions) {
233 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
234 instr->CompileToNative(this);
237 EnsureSpaceForLazyDeopt();
238 return !is_aborted();
242 bool LCodeGen::GenerateDeferredCode() {
243 ASSERT(is_generating());
244 if (deferred_.length() > 0) {
245 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
246 LDeferredCode* code = deferred_[i];
247 __ bind(code->entry());
248 Comment(";;; Deferred code @%d: %s.",
249 code->instruction_index(),
250 code->instr()->Mnemonic());
252 __ jmp(code->exit());
256 // Force constant pool emission at the end of the deferred code to make
257 // sure that no constant pools are emitted after.
258 masm()->CheckConstPool(true, false);
260 return !is_aborted();
264 bool LCodeGen::GenerateDeoptJumpTable() {
265 // Check that the jump table is accessible from everywhere in the function
266 // code, i.e. that offsets to the table can be encoded in the 24bit signed
267 // immediate of a branch instruction.
268 // To simplify we consider the code size from the first instruction to the
269 // end of the jump table. We also don't consider the pc load delta.
270 // Each entry in the jump table generates one instruction and inlines one
271 // 32bit data after it.
272 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
273 deopt_jump_table_.length() * 2)) {
274 Abort("Generated code is too large");
277 // Block the constant pool emission during the jump table emission.
278 __ BlockConstPoolFor(deopt_jump_table_.length());
279 __ RecordComment("[ Deoptimisation jump table");
281 __ bind(&table_start);
282 for (int i = 0; i < deopt_jump_table_.length(); i++) {
283 __ bind(&deopt_jump_table_[i].label);
284 __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta));
285 __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address));
287 ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
288 deopt_jump_table_.length() * 2);
289 __ RecordComment("]");
291 // The deoptimization jump table is the last part of the instruction
292 // sequence. Mark the generated code as done unless we bailed out.
293 if (!is_aborted()) status_ = DONE;
294 return !is_aborted();
298 bool LCodeGen::GenerateSafepointTable() {
300 safepoints_.Emit(masm(), GetStackSlotCount());
301 return !is_aborted();
305 Register LCodeGen::ToRegister(int index) const {
306 return Register::FromAllocationIndex(index);
310 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
311 return DoubleRegister::FromAllocationIndex(index);
315 Register LCodeGen::ToRegister(LOperand* op) const {
316 ASSERT(op->IsRegister());
317 return ToRegister(op->index());
321 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
322 if (op->IsRegister()) {
323 return ToRegister(op->index());
324 } else if (op->IsConstantOperand()) {
325 LConstantOperand* const_op = LConstantOperand::cast(op);
326 Handle<Object> literal = chunk_->LookupLiteral(const_op);
327 Representation r = chunk_->LookupLiteralRepresentation(const_op);
328 if (r.IsInteger32()) {
329 ASSERT(literal->IsNumber());
330 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
331 } else if (r.IsDouble()) {
332 Abort("EmitLoadRegister: Unsupported double immediate.");
334 ASSERT(r.IsTagged());
335 if (literal->IsSmi()) {
336 __ mov(scratch, Operand(literal));
338 __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
342 } else if (op->IsStackSlot() || op->IsArgument()) {
343 __ ldr(scratch, ToMemOperand(op));
351 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
352 ASSERT(op->IsDoubleRegister());
353 return ToDoubleRegister(op->index());
357 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
358 SwVfpRegister flt_scratch,
359 DoubleRegister dbl_scratch) {
360 if (op->IsDoubleRegister()) {
361 return ToDoubleRegister(op->index());
362 } else if (op->IsConstantOperand()) {
363 LConstantOperand* const_op = LConstantOperand::cast(op);
364 Handle<Object> literal = chunk_->LookupLiteral(const_op);
365 Representation r = chunk_->LookupLiteralRepresentation(const_op);
366 if (r.IsInteger32()) {
367 ASSERT(literal->IsNumber());
368 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
369 __ vmov(flt_scratch, ip);
370 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
372 } else if (r.IsDouble()) {
373 Abort("unsupported double immediate");
374 } else if (r.IsTagged()) {
375 Abort("unsupported tagged immediate");
377 } else if (op->IsStackSlot() || op->IsArgument()) {
378 // TODO(regis): Why is vldr not taking a MemOperand?
379 // __ vldr(dbl_scratch, ToMemOperand(op));
380 MemOperand mem_op = ToMemOperand(op);
381 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
389 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
390 Handle<Object> literal = chunk_->LookupLiteral(op);
391 ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
396 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
397 return chunk_->LookupLiteralRepresentation(op).IsInteger32();
401 int LCodeGen::ToInteger32(LConstantOperand* op) const {
402 Handle<Object> value = chunk_->LookupLiteral(op);
403 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
404 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
406 return static_cast<int32_t>(value->Number());
410 double LCodeGen::ToDouble(LConstantOperand* op) const {
411 Handle<Object> value = chunk_->LookupLiteral(op);
412 return value->Number();
416 Operand LCodeGen::ToOperand(LOperand* op) {
417 if (op->IsConstantOperand()) {
418 LConstantOperand* const_op = LConstantOperand::cast(op);
419 Handle<Object> literal = chunk_->LookupLiteral(const_op);
420 Representation r = chunk_->LookupLiteralRepresentation(const_op);
421 if (r.IsInteger32()) {
422 ASSERT(literal->IsNumber());
423 return Operand(static_cast<int32_t>(literal->Number()));
424 } else if (r.IsDouble()) {
425 Abort("ToOperand Unsupported double immediate.");
427 ASSERT(r.IsTagged());
428 return Operand(literal);
429 } else if (op->IsRegister()) {
430 return Operand(ToRegister(op));
431 } else if (op->IsDoubleRegister()) {
432 Abort("ToOperand IsDoubleRegister unimplemented");
435 // Stack slots not implemented, use ToMemOperand instead.
441 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
442 ASSERT(!op->IsRegister());
443 ASSERT(!op->IsDoubleRegister());
444 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
445 int index = op->index();
447 // Local or spill slot. Skip the frame pointer, function, and
448 // context in the fixed part of the frame.
449 return MemOperand(fp, -(index + 3) * kPointerSize);
451 // Incoming parameter. Skip the return address.
452 return MemOperand(fp, -(index - 1) * kPointerSize);
457 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
458 ASSERT(op->IsDoubleStackSlot());
459 int index = op->index();
461 // Local or spill slot. Skip the frame pointer, function, context,
462 // and the first word of the double in the fixed part of the frame.
463 return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
465 // Incoming parameter. Skip the return address and the first word of
467 return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
472 void LCodeGen::WriteTranslation(LEnvironment* environment,
473 Translation* translation) {
474 if (environment == NULL) return;
476 // The translation includes one command per value in the environment.
477 int translation_size = environment->values()->length();
478 // The output frame height does not include the parameters.
479 int height = translation_size - environment->parameter_count();
481 WriteTranslation(environment->outer(), translation);
482 int closure_id = DefineDeoptimizationLiteral(environment->closure());
483 switch (environment->frame_type()) {
485 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
488 translation->BeginConstructStubFrame(closure_id, translation_size);
490 case ARGUMENTS_ADAPTOR:
491 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
496 for (int i = 0; i < translation_size; ++i) {
497 LOperand* value = environment->values()->at(i);
498 // spilled_registers_ and spilled_double_registers_ are either
499 // both NULL or both set.
500 if (environment->spilled_registers() != NULL && value != NULL) {
501 if (value->IsRegister() &&
502 environment->spilled_registers()[value->index()] != NULL) {
503 translation->MarkDuplicate();
504 AddToTranslation(translation,
505 environment->spilled_registers()[value->index()],
506 environment->HasTaggedValueAt(i));
508 value->IsDoubleRegister() &&
509 environment->spilled_double_registers()[value->index()] != NULL) {
510 translation->MarkDuplicate();
513 environment->spilled_double_registers()[value->index()],
518 AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
523 void LCodeGen::AddToTranslation(Translation* translation,
527 // TODO(twuerthinger): Introduce marker operands to indicate that this value
528 // is not present and must be reconstructed from the deoptimizer. Currently
529 // this is only used for the arguments object.
530 translation->StoreArgumentsObject();
531 } else if (op->IsStackSlot()) {
533 translation->StoreStackSlot(op->index());
535 translation->StoreInt32StackSlot(op->index());
537 } else if (op->IsDoubleStackSlot()) {
538 translation->StoreDoubleStackSlot(op->index());
539 } else if (op->IsArgument()) {
541 int src_index = GetStackSlotCount() + op->index();
542 translation->StoreStackSlot(src_index);
543 } else if (op->IsRegister()) {
544 Register reg = ToRegister(op);
546 translation->StoreRegister(reg);
548 translation->StoreInt32Register(reg);
550 } else if (op->IsDoubleRegister()) {
551 DoubleRegister reg = ToDoubleRegister(op);
552 translation->StoreDoubleRegister(reg);
553 } else if (op->IsConstantOperand()) {
554 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
555 int src_index = DefineDeoptimizationLiteral(literal);
556 translation->StoreLiteral(src_index);
563 void LCodeGen::CallCode(Handle<Code> code,
564 RelocInfo::Mode mode,
565 LInstruction* instr) {
566 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
570 void LCodeGen::CallCodeGeneric(Handle<Code> code,
571 RelocInfo::Mode mode,
573 SafepointMode safepoint_mode) {
574 ASSERT(instr != NULL);
575 LPointerMap* pointers = instr->pointer_map();
576 RecordPosition(pointers->position());
578 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
580 // Signal that we don't inline smi code before these stubs in the
581 // optimizing code generator.
582 if (code->kind() == Code::BINARY_OP_IC ||
583 code->kind() == Code::COMPARE_IC) {
589 void LCodeGen::CallRuntime(const Runtime::Function* function,
591 LInstruction* instr) {
592 ASSERT(instr != NULL);
593 LPointerMap* pointers = instr->pointer_map();
594 ASSERT(pointers != NULL);
595 RecordPosition(pointers->position());
597 __ CallRuntime(function, num_arguments);
598 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
602 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
604 LInstruction* instr) {
605 __ CallRuntimeSaveDoubles(id);
606 RecordSafepointWithRegisters(
607 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
611 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
612 Safepoint::DeoptMode mode) {
613 if (!environment->HasBeenRegistered()) {
614 // Physical stack frame layout:
615 // -x ............. -4 0 ..................................... y
616 // [incoming arguments] [spill slots] [pushed outgoing arguments]
618 // Layout of the environment:
619 // 0 ..................................................... size-1
620 // [parameters] [locals] [expression stack including arguments]
622 // Layout of the translation:
623 // 0 ........................................................ size - 1 + 4
624 // [expression stack including arguments] [locals] [4 words] [parameters]
625 // |>------------ translation_size ------------<|
628 int jsframe_count = 0;
629 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
631 if (e->frame_type() == JS_FUNCTION) {
635 Translation translation(&translations_, frame_count, jsframe_count);
636 WriteTranslation(environment, &translation);
637 int deoptimization_index = deoptimizations_.length();
638 int pc_offset = masm()->pc_offset();
639 environment->Register(deoptimization_index,
641 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
642 deoptimizations_.Add(environment);
647 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
648 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
649 ASSERT(environment->HasBeenRegistered());
650 int id = environment->deoptimization_index();
651 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
653 Abort("bailout was not prepared");
657 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
659 if (FLAG_deopt_every_n_times == 1 &&
660 info_->shared_info()->opt_count() == id) {
661 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
665 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
668 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
670 // We often have several deopts to the same entry, reuse the last
671 // jump entry if this is the case.
672 if (deopt_jump_table_.is_empty() ||
673 (deopt_jump_table_.last().address != entry)) {
674 deopt_jump_table_.Add(JumpTableEntry(entry));
676 __ b(cc, &deopt_jump_table_.last().label);
681 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
682 int length = deoptimizations_.length();
683 if (length == 0) return;
684 Handle<DeoptimizationInputData> data =
685 factory()->NewDeoptimizationInputData(length, TENURED);
687 Handle<ByteArray> translations = translations_.CreateByteArray();
688 data->SetTranslationByteArray(*translations);
689 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
691 Handle<FixedArray> literals =
692 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
693 for (int i = 0; i < deoptimization_literals_.length(); i++) {
694 literals->set(i, *deoptimization_literals_[i]);
696 data->SetLiteralArray(*literals);
698 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
699 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
701 // Populate the deoptimization entries.
702 for (int i = 0; i < length; i++) {
703 LEnvironment* env = deoptimizations_[i];
704 data->SetAstId(i, Smi::FromInt(env->ast_id()));
705 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
706 data->SetArgumentsStackHeight(i,
707 Smi::FromInt(env->arguments_stack_height()));
708 data->SetPc(i, Smi::FromInt(env->pc_offset()));
710 code->set_deoptimization_data(*data);
714 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
715 int result = deoptimization_literals_.length();
716 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
717 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
719 deoptimization_literals_.Add(literal);
724 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
725 ASSERT(deoptimization_literals_.length() == 0);
727 const ZoneList<Handle<JSFunction> >* inlined_closures =
728 chunk()->inlined_closures();
730 for (int i = 0, length = inlined_closures->length();
733 DefineDeoptimizationLiteral(inlined_closures->at(i));
736 inlined_function_count_ = deoptimization_literals_.length();
740 void LCodeGen::RecordSafepointWithLazyDeopt(
741 LInstruction* instr, SafepointMode safepoint_mode) {
742 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
743 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
745 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
746 RecordSafepointWithRegisters(
747 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
752 void LCodeGen::RecordSafepoint(
753 LPointerMap* pointers,
754 Safepoint::Kind kind,
756 Safepoint::DeoptMode deopt_mode) {
757 ASSERT(expected_safepoint_kind_ == kind);
759 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
760 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
761 kind, arguments, deopt_mode);
762 for (int i = 0; i < operands->length(); i++) {
763 LOperand* pointer = operands->at(i);
764 if (pointer->IsStackSlot()) {
765 safepoint.DefinePointerSlot(pointer->index());
766 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
767 safepoint.DefinePointerRegister(ToRegister(pointer));
770 if (kind & Safepoint::kWithRegisters) {
771 // Register cp always contains a pointer to the context.
772 safepoint.DefinePointerRegister(cp);
777 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
778 Safepoint::DeoptMode deopt_mode) {
779 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
783 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
784 LPointerMap empty_pointers(RelocInfo::kNoPosition);
785 RecordSafepoint(&empty_pointers, deopt_mode);
789 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
791 Safepoint::DeoptMode deopt_mode) {
793 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
797 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
798 LPointerMap* pointers,
800 Safepoint::DeoptMode deopt_mode) {
802 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
806 void LCodeGen::RecordPosition(int position) {
807 if (position == RelocInfo::kNoPosition) return;
808 masm()->positions_recorder()->RecordPosition(position);
812 void LCodeGen::DoLabel(LLabel* label) {
813 if (label->is_loop_header()) {
814 Comment(";;; B%d - LOOP entry", label->block_id());
816 Comment(";;; B%d", label->block_id());
818 __ bind(label->label());
819 current_block_ = label->block_id();
824 void LCodeGen::DoParallelMove(LParallelMove* move) {
825 resolver_.Resolve(move);
829 void LCodeGen::DoGap(LGap* gap) {
830 for (int i = LGap::FIRST_INNER_POSITION;
831 i <= LGap::LAST_INNER_POSITION;
833 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
834 LParallelMove* move = gap->GetParallelMove(inner_pos);
835 if (move != NULL) DoParallelMove(move);
840 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
845 void LCodeGen::DoParameter(LParameter* instr) {
850 void LCodeGen::DoCallStub(LCallStub* instr) {
851 ASSERT(ToRegister(instr->result()).is(r0));
852 switch (instr->hydrogen()->major_key()) {
853 case CodeStub::RegExpConstructResult: {
854 RegExpConstructResultStub stub;
855 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
858 case CodeStub::RegExpExec: {
860 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
863 case CodeStub::SubString: {
865 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
868 case CodeStub::NumberToString: {
869 NumberToStringStub stub;
870 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
873 case CodeStub::StringAdd: {
874 StringAddStub stub(NO_STRING_ADD_FLAGS);
875 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
878 case CodeStub::StringCompare: {
879 StringCompareStub stub;
880 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
883 case CodeStub::TranscendentalCache: {
884 __ ldr(r0, MemOperand(sp, 0));
885 TranscendentalCacheStub stub(instr->transcendental_type(),
886 TranscendentalCacheStub::TAGGED);
887 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
896 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
901 void LCodeGen::DoModI(LModI* instr) {
902 if (instr->hydrogen()->HasPowerOf2Divisor()) {
903 Register dividend = ToRegister(instr->InputAt(0));
904 Register result = ToRegister(instr->result());
907 HConstant::cast(instr->hydrogen()->right())->Integer32Value();
909 if (divisor < 0) divisor = -divisor;
911 Label positive_dividend, done;
912 __ cmp(dividend, Operand(0));
913 __ b(pl, &positive_dividend);
914 __ rsb(result, dividend, Operand(0));
915 __ and_(result, result, Operand(divisor - 1), SetCC);
916 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
917 DeoptimizeIf(eq, instr->environment());
919 __ rsb(result, result, Operand(0));
921 __ bind(&positive_dividend);
922 __ and_(result, dividend, Operand(divisor - 1));
927 // These registers hold untagged 32 bit values.
928 Register left = ToRegister(instr->InputAt(0));
929 Register right = ToRegister(instr->InputAt(1));
930 Register result = ToRegister(instr->result());
932 Register scratch = scratch0();
933 Register scratch2 = ToRegister(instr->TempAt(0));
934 DwVfpRegister dividend = ToDoubleRegister(instr->TempAt(1));
935 DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
936 DwVfpRegister quotient = double_scratch0();
938 ASSERT(!dividend.is(divisor));
939 ASSERT(!dividend.is(quotient));
940 ASSERT(!divisor.is(quotient));
941 ASSERT(!scratch.is(left));
942 ASSERT(!scratch.is(right));
943 ASSERT(!scratch.is(result));
945 Label done, vfp_modulo, both_positive, right_negative;
948 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
949 __ cmp(right, Operand(0));
950 DeoptimizeIf(eq, instr->environment());
953 __ Move(result, left);
955 // (0 % x) must yield 0 (if x is finite, which is the case here).
956 __ cmp(left, Operand(0));
958 // Preload right in a vfp register.
959 __ vmov(divisor.low(), right);
960 __ b(lt, &vfp_modulo);
962 __ cmp(left, Operand(right));
965 // Check for (positive) power of two on the right hand side.
966 __ JumpIfNotPowerOfTwoOrZeroAndNeg(right,
970 // Perform modulo operation (scratch contains right - 1).
971 __ and_(result, scratch, Operand(left));
974 __ bind(&right_negative);
975 // Negate right. The sign of the divisor does not matter.
976 __ rsb(right, right, Operand(0));
978 __ bind(&both_positive);
979 const int kUnfolds = 3;
980 // If the right hand side is smaller than the (nonnegative)
981 // left hand side, the left hand side is the result.
982 // Else try a few subtractions of the left hand side.
983 __ mov(scratch, left);
984 for (int i = 0; i < kUnfolds; i++) {
985 // Check if the left hand side is less or equal than the
986 // the right hand side.
987 __ cmp(scratch, Operand(right));
988 __ mov(result, scratch, LeaveCC, lt);
990 // If not, reduce the left hand side by the right hand
991 // side and check again.
992 if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
995 __ bind(&vfp_modulo);
996 // Load the arguments in VFP registers.
997 // The divisor value is preloaded before. Be careful that 'right' is only live
999 __ vmov(dividend.low(), left);
1000 // From here on don't use right as it may have been reallocated (for example
1004 __ vcvt_f64_s32(dividend, dividend.low());
1005 __ vcvt_f64_s32(divisor, divisor.low());
1007 // We do not care about the sign of the divisor.
1008 __ vabs(divisor, divisor);
1009 // Compute the quotient and round it to a 32bit integer.
1010 __ vdiv(quotient, dividend, divisor);
1011 __ vcvt_s32_f64(quotient.low(), quotient);
1012 __ vcvt_f64_s32(quotient, quotient.low());
1014 // Compute the remainder in result.
1015 DwVfpRegister double_scratch = dividend;
1016 __ vmul(double_scratch, divisor, quotient);
1017 __ vcvt_s32_f64(double_scratch.low(), double_scratch);
1018 __ vmov(scratch, double_scratch.low());
1020 if (!instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1021 __ sub(result, left, scratch);
1025 __ sub(scratch2, left, scratch, SetCC);
1027 __ cmp(left, Operand(0));
1028 DeoptimizeIf(mi, instr->environment());
1030 // Load the result and we are done.
1031 __ mov(result, scratch2);
1038 void LCodeGen::EmitSignedIntegerDivisionByConstant(
1044 LEnvironment* environment) {
1045 ASSERT(!AreAliased(dividend, scratch, ip));
1046 ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
1048 uint32_t divisor_abs = abs(divisor);
1050 int32_t power_of_2_factor =
1051 CompilerIntrinsics::CountTrailingZeros(divisor_abs);
1053 switch (divisor_abs) {
1055 DeoptimizeIf(al, environment);
1060 __ Move(result, dividend);
1062 __ rsb(result, dividend, Operand(0), SetCC);
1063 DeoptimizeIf(vs, environment);
1065 // Compute the remainder.
1066 __ mov(remainder, Operand(0));
1070 if (IsPowerOf2(divisor_abs)) {
1071 // Branch and condition free code for integer division by a power
1073 int32_t power = WhichPowerOf2(divisor_abs);
1075 __ mov(scratch, Operand(dividend, ASR, power - 1));
1077 __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
1078 __ mov(result, Operand(scratch, ASR, power));
1079 // Negate if necessary.
1080 // We don't need to check for overflow because the case '-1' is
1081 // handled separately.
1083 ASSERT(divisor != -1);
1084 __ rsb(result, result, Operand(0));
1086 // Compute the remainder.
1088 __ sub(remainder, dividend, Operand(result, LSL, power));
1090 __ add(remainder, dividend, Operand(result, LSL, power));
1094 // Use magic numbers for a few specific divisors.
1095 // Details and proofs can be found in:
1096 // - Hacker's Delight, Henry S. Warren, Jr.
1097 // - The PowerPC Compiler Writer’s Guide
1098 // and probably many others.
1101 // <divisor with magic numbers> * <power of 2>
1103 // <divisor with magic numbers> * <other divisor with magic numbers>
1104 DivMagicNumbers magic_numbers =
1105 DivMagicNumberFor(divisor_abs >> power_of_2_factor);
1106 // Branch and condition free code for integer division by a power
1108 const int32_t M = magic_numbers.M;
1109 const int32_t s = magic_numbers.s + power_of_2_factor;
1111 __ mov(ip, Operand(M));
1112 __ smull(ip, scratch, dividend, ip);
1114 __ add(scratch, scratch, Operand(dividend));
1117 __ mov(scratch, Operand(scratch, ASR, s));
1119 __ add(result, scratch, Operand(dividend, LSR, 31));
1120 if (divisor < 0) __ rsb(result, result, Operand(0));
1121 // Compute the remainder.
1122 __ mov(ip, Operand(divisor));
1123 // This sequence could be replaced with 'mls' when
1124 // it gets implemented.
1125 __ mul(scratch, result, ip);
1126 __ sub(remainder, dividend, scratch);
1132 void LCodeGen::DoDivI(LDivI* instr) {
1133 class DeferredDivI: public LDeferredCode {
1135 DeferredDivI(LCodeGen* codegen, LDivI* instr)
1136 : LDeferredCode(codegen), instr_(instr) { }
1137 virtual void Generate() {
1138 codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
1140 virtual LInstruction* instr() { return instr_; }
1145 const Register left = ToRegister(instr->InputAt(0));
1146 const Register right = ToRegister(instr->InputAt(1));
1147 const Register scratch = scratch0();
1148 const Register result = ToRegister(instr->result());
1151 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1152 __ cmp(right, Operand(0));
1153 DeoptimizeIf(eq, instr->environment());
1156 // Check for (0 / -x) that will produce negative zero.
1157 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1158 Label left_not_zero;
1159 __ cmp(left, Operand(0));
1160 __ b(ne, &left_not_zero);
1161 __ cmp(right, Operand(0));
1162 DeoptimizeIf(mi, instr->environment());
1163 __ bind(&left_not_zero);
1166 // Check for (-kMinInt / -1).
1167 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1168 Label left_not_min_int;
1169 __ cmp(left, Operand(kMinInt));
1170 __ b(ne, &left_not_min_int);
1171 __ cmp(right, Operand(-1));
1172 DeoptimizeIf(eq, instr->environment());
1173 __ bind(&left_not_min_int);
1176 Label done, deoptimize;
1177 // Test for a few common cases first.
1178 __ cmp(right, Operand(1));
1179 __ mov(result, left, LeaveCC, eq);
1182 __ cmp(right, Operand(2));
1183 __ tst(left, Operand(1), eq);
1184 __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
1187 __ cmp(right, Operand(4));
1188 __ tst(left, Operand(3), eq);
1189 __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
1192 // Call the stub. The numbers in r0 and r1 have
1193 // to be tagged to Smis. If that is not possible, deoptimize.
1194 DeferredDivI* deferred = new DeferredDivI(this, instr);
1196 __ TrySmiTag(left, &deoptimize, scratch);
1197 __ TrySmiTag(right, &deoptimize, scratch);
1199 __ b(al, deferred->entry());
1200 __ bind(deferred->exit());
1202 // If the result in r0 is a Smi, untag it, else deoptimize.
1203 __ JumpIfNotSmi(result, &deoptimize);
1204 __ SmiUntag(result);
1207 __ bind(&deoptimize);
1208 DeoptimizeIf(al, instr->environment());
1213 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1214 const Register result = ToRegister(instr->result());
1215 const Register left = ToRegister(instr->InputAt(0));
1216 const Register remainder = ToRegister(instr->TempAt(0));
1217 const Register scratch = scratch0();
1219 // We only optimize this for division by constants, because the standard
1220 // integer division routine is usually slower than transitionning to VFP.
1221 // This could be optimized on processors with SDIV available.
1222 ASSERT(instr->InputAt(1)->IsConstantOperand());
1223 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->InputAt(1)));
1225 __ cmp(left, Operand(0));
1226 DeoptimizeIf(eq, instr->environment());
1228 EmitSignedIntegerDivisionByConstant(result,
1233 instr->environment());
1234 // We operated a truncating division. Correct the result if necessary.
1235 __ cmp(remainder, Operand(0));
1236 __ teq(remainder, Operand(divisor), ne);
1237 __ sub(result, result, Operand(1), LeaveCC, mi);
1242 void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
1244 Register left = ToRegister(instr->InputAt(0));
1245 Register right = ToRegister(instr->InputAt(1));
1247 PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
1248 // Move left to r1 and right to r0 for the stub call.
1251 } else if (left.is(r0) && right.is(r1)) {
1252 __ Swap(r0, r1, r2);
1253 } else if (left.is(r0)) {
1254 ASSERT(!right.is(r1));
1258 ASSERT(!left.is(r0) && !right.is(r0));
1262 BinaryOpStub stub(op, OVERWRITE_LEFT);
1264 RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
1266 Safepoint::kNoLazyDeopt);
1267 // Overwrite the stored value of r0 with the result of the stub.
1268 __ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
1272 void LCodeGen::DoMulI(LMulI* instr) {
1273 Register scratch = scratch0();
1274 Register result = ToRegister(instr->result());
1275 // Note that result may alias left.
1276 Register left = ToRegister(instr->InputAt(0));
1277 LOperand* right_op = instr->InputAt(1);
1279 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1280 bool bailout_on_minus_zero =
1281 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1283 if (right_op->IsConstantOperand() && !can_overflow) {
1284 // Use optimized code for specific constants.
1285 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1287 if (bailout_on_minus_zero && (constant < 0)) {
1288 // The case of a null constant will be handled separately.
1289 // If constant is negative and left is null, the result should be -0.
1290 __ cmp(left, Operand(0));
1291 DeoptimizeIf(eq, instr->environment());
1296 __ rsb(result, left, Operand(0));
1299 if (bailout_on_minus_zero) {
1300 // If left is strictly negative and the constant is null, the
1301 // result is -0. Deoptimize if required, otherwise return 0.
1302 __ cmp(left, Operand(0));
1303 DeoptimizeIf(mi, instr->environment());
1305 __ mov(result, Operand(0));
1308 __ Move(result, left);
1311 // Multiplying by powers of two and powers of two plus or minus
1312 // one can be done faster with shifted operands.
1313 // For other constants we emit standard code.
1314 int32_t mask = constant >> 31;
1315 uint32_t constant_abs = (constant + mask) ^ mask;
1317 if (IsPowerOf2(constant_abs) ||
1318 IsPowerOf2(constant_abs - 1) ||
1319 IsPowerOf2(constant_abs + 1)) {
1320 if (IsPowerOf2(constant_abs)) {
1321 int32_t shift = WhichPowerOf2(constant_abs);
1322 __ mov(result, Operand(left, LSL, shift));
1323 } else if (IsPowerOf2(constant_abs - 1)) {
1324 int32_t shift = WhichPowerOf2(constant_abs - 1);
1325 __ add(result, left, Operand(left, LSL, shift));
1326 } else if (IsPowerOf2(constant_abs + 1)) {
1327 int32_t shift = WhichPowerOf2(constant_abs + 1);
1328 __ rsb(result, left, Operand(left, LSL, shift));
1331 // Correct the sign of the result is the constant is negative.
1332 if (constant < 0) __ rsb(result, result, Operand(0));
1335 // Generate standard code.
1336 __ mov(ip, Operand(constant));
1337 __ mul(result, left, ip);
1342 Register right = EmitLoadRegister(right_op, scratch);
1343 if (bailout_on_minus_zero) {
1344 __ orr(ToRegister(instr->TempAt(0)), left, right);
1348 // scratch:result = left * right.
1349 __ smull(result, scratch, left, right);
1350 __ cmp(scratch, Operand(result, ASR, 31));
1351 DeoptimizeIf(ne, instr->environment());
1353 __ mul(result, left, right);
1356 if (bailout_on_minus_zero) {
1357 // Bail out if the result is supposed to be negative zero.
1359 __ cmp(result, Operand(0));
1361 __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
1362 DeoptimizeIf(mi, instr->environment());
1369 void LCodeGen::DoBitI(LBitI* instr) {
1370 LOperand* left_op = instr->InputAt(0);
1371 LOperand* right_op = instr->InputAt(1);
1372 ASSERT(left_op->IsRegister());
1373 Register left = ToRegister(left_op);
1374 Register result = ToRegister(instr->result());
1375 Operand right(no_reg);
1377 if (right_op->IsStackSlot() || right_op->IsArgument()) {
1378 right = Operand(EmitLoadRegister(right_op, ip));
1380 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1381 right = ToOperand(right_op);
1384 switch (instr->op()) {
1385 case Token::BIT_AND:
1386 __ and_(result, left, right);
1389 __ orr(result, left, right);
1391 case Token::BIT_XOR:
1392 __ eor(result, left, right);
1401 void LCodeGen::DoShiftI(LShiftI* instr) {
1402 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1403 // result may alias either of them.
1404 LOperand* right_op = instr->InputAt(1);
1405 Register left = ToRegister(instr->InputAt(0));
1406 Register result = ToRegister(instr->result());
1407 Register scratch = scratch0();
1408 if (right_op->IsRegister()) {
1409 // Mask the right_op operand.
1410 __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1411 switch (instr->op()) {
1413 __ mov(result, Operand(left, ASR, scratch));
1416 if (instr->can_deopt()) {
1417 __ mov(result, Operand(left, LSR, scratch), SetCC);
1418 DeoptimizeIf(mi, instr->environment());
1420 __ mov(result, Operand(left, LSR, scratch));
1424 __ mov(result, Operand(left, LSL, scratch));
1431 // Mask the right_op operand.
1432 int value = ToInteger32(LConstantOperand::cast(right_op));
1433 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1434 switch (instr->op()) {
1436 if (shift_count != 0) {
1437 __ mov(result, Operand(left, ASR, shift_count));
1439 __ Move(result, left);
1443 if (shift_count != 0) {
1444 __ mov(result, Operand(left, LSR, shift_count));
1446 if (instr->can_deopt()) {
1447 __ tst(left, Operand(0x80000000));
1448 DeoptimizeIf(ne, instr->environment());
1450 __ Move(result, left);
1454 if (shift_count != 0) {
1455 __ mov(result, Operand(left, LSL, shift_count));
1457 __ Move(result, left);
1468 void LCodeGen::DoSubI(LSubI* instr) {
1469 LOperand* left = instr->InputAt(0);
1470 LOperand* right = instr->InputAt(1);
1471 LOperand* result = instr->result();
1472 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1473 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1475 if (right->IsStackSlot() || right->IsArgument()) {
1476 Register right_reg = EmitLoadRegister(right, ip);
1477 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1479 ASSERT(right->IsRegister() || right->IsConstantOperand());
1480 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1484 DeoptimizeIf(vs, instr->environment());
1489 void LCodeGen::DoConstantI(LConstantI* instr) {
1490 ASSERT(instr->result()->IsRegister());
1491 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1495 void LCodeGen::DoConstantD(LConstantD* instr) {
1496 ASSERT(instr->result()->IsDoubleRegister());
1497 DwVfpRegister result = ToDoubleRegister(instr->result());
1498 double v = instr->value();
1503 void LCodeGen::DoConstantT(LConstantT* instr) {
1504 Handle<Object> value = instr->value();
1505 if (value->IsSmi()) {
1506 __ mov(ToRegister(instr->result()), Operand(value));
1508 __ LoadHeapObject(ToRegister(instr->result()),
1509 Handle<HeapObject>::cast(value));
1514 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1515 Register result = ToRegister(instr->result());
1516 Register array = ToRegister(instr->InputAt(0));
1517 __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
1521 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1522 Register result = ToRegister(instr->result());
1523 Register array = ToRegister(instr->InputAt(0));
1524 __ ldr(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1528 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1529 Register result = ToRegister(instr->result());
1530 Register input = ToRegister(instr->InputAt(0));
1532 // Load map into |result|.
1533 __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
1534 // Load the map's "bit field 2" into |result|. We only need the first byte,
1535 // but the following bit field extraction takes care of that anyway.
1536 __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
1537 // Retrieve elements_kind from bit field 2.
1538 __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1542 void LCodeGen::DoValueOf(LValueOf* instr) {
1543 Register input = ToRegister(instr->InputAt(0));
1544 Register result = ToRegister(instr->result());
1545 Register map = ToRegister(instr->TempAt(0));
1548 // If the object is a smi return the object.
1549 __ tst(input, Operand(kSmiTagMask));
1550 __ Move(result, input, eq);
1553 // If the object is not a value type, return the object.
1554 __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
1555 __ Move(result, input, ne);
1557 __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
1563 void LCodeGen::DoDateField(LDateField* instr) {
1564 Register object = ToRegister(instr->InputAt(0));
1565 Register result = ToRegister(instr->result());
1566 Register scratch = ToRegister(instr->TempAt(0));
1567 Smi* index = instr->index();
1568 Label runtime, done;
1569 ASSERT(object.is(result));
1570 ASSERT(object.is(r0));
1571 ASSERT(!scratch.is(scratch0()));
1572 ASSERT(!scratch.is(object));
1575 __ AbortIfSmi(object);
1576 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1577 __ Assert(eq, "Trying to get date field from non-date.");
1580 if (index->value() == 0) {
1581 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1583 if (index->value() < JSDate::kFirstUncachedField) {
1584 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1585 __ mov(scratch, Operand(stamp));
1586 __ ldr(scratch, MemOperand(scratch));
1587 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1588 __ cmp(scratch, scratch0());
1590 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1591 kPointerSize * index->value()));
1595 __ PrepareCallCFunction(2, scratch);
1596 __ mov(r1, Operand(index));
1597 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1603 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1604 Register input = ToRegister(instr->InputAt(0));
1605 Register result = ToRegister(instr->result());
1606 __ mvn(result, Operand(input));
1610 void LCodeGen::DoThrow(LThrow* instr) {
1611 Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
1613 CallRuntime(Runtime::kThrow, 1, instr);
1615 if (FLAG_debug_code) {
1616 __ stop("Unreachable code.");
1621 void LCodeGen::DoAddI(LAddI* instr) {
1622 LOperand* left = instr->InputAt(0);
1623 LOperand* right = instr->InputAt(1);
1624 LOperand* result = instr->result();
1625 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1626 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1628 if (right->IsStackSlot() || right->IsArgument()) {
1629 Register right_reg = EmitLoadRegister(right, ip);
1630 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1632 ASSERT(right->IsRegister() || right->IsConstantOperand());
1633 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1637 DeoptimizeIf(vs, instr->environment());
1642 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1643 DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1644 DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1645 DoubleRegister result = ToDoubleRegister(instr->result());
1646 switch (instr->op()) {
1648 __ vadd(result, left, right);
1651 __ vsub(result, left, right);
1654 __ vmul(result, left, right);
1657 __ vdiv(result, left, right);
1660 // Save r0-r3 on the stack.
1661 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1663 __ PrepareCallCFunction(0, 2, scratch0());
1664 __ SetCallCDoubleArguments(left, right);
1666 ExternalReference::double_fp_operation(Token::MOD, isolate()),
1668 // Move the result in the double result register.
1669 __ GetCFunctionDoubleResult(result);
1672 __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
1682 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1683 ASSERT(ToRegister(instr->InputAt(0)).is(r1));
1684 ASSERT(ToRegister(instr->InputAt(1)).is(r0));
1685 ASSERT(ToRegister(instr->result()).is(r0));
1687 BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1688 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1689 __ nop(); // Signals no inlined code.
1693 int LCodeGen::GetNextEmittedBlock(int block) {
1694 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1695 LLabel* label = chunk_->GetLabel(i);
1696 if (!label->HasReplacement()) return i;
1702 void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1703 int next_block = GetNextEmittedBlock(current_block_);
1704 right_block = chunk_->LookupDestination(right_block);
1705 left_block = chunk_->LookupDestination(left_block);
1707 if (right_block == left_block) {
1708 EmitGoto(left_block);
1709 } else if (left_block == next_block) {
1710 __ b(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1711 } else if (right_block == next_block) {
1712 __ b(cc, chunk_->GetAssemblyLabel(left_block));
1714 __ b(cc, chunk_->GetAssemblyLabel(left_block));
1715 __ b(chunk_->GetAssemblyLabel(right_block));
1720 void LCodeGen::DoBranch(LBranch* instr) {
1721 int true_block = chunk_->LookupDestination(instr->true_block_id());
1722 int false_block = chunk_->LookupDestination(instr->false_block_id());
1724 Representation r = instr->hydrogen()->value()->representation();
1725 if (r.IsInteger32()) {
1726 Register reg = ToRegister(instr->InputAt(0));
1727 __ cmp(reg, Operand(0));
1728 EmitBranch(true_block, false_block, ne);
1729 } else if (r.IsDouble()) {
1730 DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1731 Register scratch = scratch0();
1733 // Test the double value. Zero and NaN are false.
1734 __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
1735 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
1736 EmitBranch(true_block, false_block, eq);
1738 ASSERT(r.IsTagged());
1739 Register reg = ToRegister(instr->InputAt(0));
1740 HType type = instr->hydrogen()->value()->type();
1741 if (type.IsBoolean()) {
1742 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1743 EmitBranch(true_block, false_block, eq);
1744 } else if (type.IsSmi()) {
1745 __ cmp(reg, Operand(0));
1746 EmitBranch(true_block, false_block, ne);
1748 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1749 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1751 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1752 // Avoid deopts in the case where we've never executed this path before.
1753 if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1755 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1756 // undefined -> false.
1757 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
1758 __ b(eq, false_label);
1760 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1761 // Boolean -> its value.
1762 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
1763 __ b(eq, true_label);
1764 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
1765 __ b(eq, false_label);
1767 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1769 __ CompareRoot(reg, Heap::kNullValueRootIndex);
1770 __ b(eq, false_label);
1773 if (expected.Contains(ToBooleanStub::SMI)) {
1774 // Smis: 0 -> false, all other -> true.
1775 __ cmp(reg, Operand(0));
1776 __ b(eq, false_label);
1777 __ JumpIfSmi(reg, true_label);
1778 } else if (expected.NeedsMap()) {
1779 // If we need a map later and have a Smi -> deopt.
1780 __ tst(reg, Operand(kSmiTagMask));
1781 DeoptimizeIf(eq, instr->environment());
1784 const Register map = scratch0();
1785 if (expected.NeedsMap()) {
1786 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1788 if (expected.CanBeUndetectable()) {
1789 // Undetectable -> false.
1790 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
1791 __ tst(ip, Operand(1 << Map::kIsUndetectable));
1792 __ b(ne, false_label);
1796 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1797 // spec object -> true.
1798 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
1799 __ b(ge, true_label);
1802 if (expected.Contains(ToBooleanStub::STRING)) {
1803 // String value -> false iff empty.
1805 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
1806 __ b(ge, ¬_string);
1807 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
1808 __ cmp(ip, Operand(0));
1809 __ b(ne, true_label);
1811 __ bind(¬_string);
1814 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1815 // heap number -> false iff +0, -0, or NaN.
1816 DoubleRegister dbl_scratch = double_scratch0();
1817 Label not_heap_number;
1818 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
1819 __ b(ne, ¬_heap_number);
1820 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1821 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
1822 __ b(vs, false_label); // NaN -> false.
1823 __ b(eq, false_label); // +0, -0 -> false.
1825 __ bind(¬_heap_number);
1828 // We've seen something for the first time -> deopt.
1829 DeoptimizeIf(al, instr->environment());
1835 void LCodeGen::EmitGoto(int block) {
1836 block = chunk_->LookupDestination(block);
1837 int next_block = GetNextEmittedBlock(current_block_);
1838 if (block != next_block) {
1839 __ jmp(chunk_->GetAssemblyLabel(block));
1844 void LCodeGen::DoGoto(LGoto* instr) {
1845 EmitGoto(instr->block_id());
1849 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1850 Condition cond = kNoCondition;
1853 case Token::EQ_STRICT:
1857 cond = is_unsigned ? lo : lt;
1860 cond = is_unsigned ? hi : gt;
1863 cond = is_unsigned ? ls : le;
1866 cond = is_unsigned ? hs : ge;
1869 case Token::INSTANCEOF:
1877 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1878 LOperand* left = instr->InputAt(0);
1879 LOperand* right = instr->InputAt(1);
1880 int false_block = chunk_->LookupDestination(instr->false_block_id());
1881 int true_block = chunk_->LookupDestination(instr->true_block_id());
1882 Condition cond = TokenToCondition(instr->op(), false);
1884 if (left->IsConstantOperand() && right->IsConstantOperand()) {
1885 // We can statically evaluate the comparison.
1886 double left_val = ToDouble(LConstantOperand::cast(left));
1887 double right_val = ToDouble(LConstantOperand::cast(right));
1889 EvalComparison(instr->op(), left_val, right_val) ? true_block
1891 EmitGoto(next_block);
1893 if (instr->is_double()) {
1894 // Compare left and right operands as doubles and load the
1895 // resulting flags into the normal status register.
1896 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
1897 // If a NaN is involved, i.e. the result is unordered (V set),
1898 // jump to false block label.
1899 __ b(vs, chunk_->GetAssemblyLabel(false_block));
1901 if (right->IsConstantOperand()) {
1902 __ cmp(ToRegister(left),
1903 Operand(ToInteger32(LConstantOperand::cast(right))));
1904 } else if (left->IsConstantOperand()) {
1905 __ cmp(ToRegister(right),
1906 Operand(ToInteger32(LConstantOperand::cast(left))));
1907 // We transposed the operands. Reverse the condition.
1908 cond = ReverseCondition(cond);
1910 __ cmp(ToRegister(left), ToRegister(right));
1913 EmitBranch(true_block, false_block, cond);
1918 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1919 Register left = ToRegister(instr->InputAt(0));
1920 Register right = ToRegister(instr->InputAt(1));
1921 int false_block = chunk_->LookupDestination(instr->false_block_id());
1922 int true_block = chunk_->LookupDestination(instr->true_block_id());
1924 __ cmp(left, Operand(right));
1925 EmitBranch(true_block, false_block, eq);
1929 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1930 Register left = ToRegister(instr->InputAt(0));
1931 int true_block = chunk_->LookupDestination(instr->true_block_id());
1932 int false_block = chunk_->LookupDestination(instr->false_block_id());
1934 __ cmp(left, Operand(instr->hydrogen()->right()));
1935 EmitBranch(true_block, false_block, eq);
1939 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1940 Register scratch = scratch0();
1941 Register reg = ToRegister(instr->InputAt(0));
1942 int false_block = chunk_->LookupDestination(instr->false_block_id());
1944 // If the expression is known to be untagged or a smi, then it's definitely
1945 // not null, and it can't be a an undetectable object.
1946 if (instr->hydrogen()->representation().IsSpecialization() ||
1947 instr->hydrogen()->type().IsSmi()) {
1948 EmitGoto(false_block);
1952 int true_block = chunk_->LookupDestination(instr->true_block_id());
1953 Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1954 Heap::kNullValueRootIndex :
1955 Heap::kUndefinedValueRootIndex;
1956 __ LoadRoot(ip, nil_value);
1958 if (instr->kind() == kStrictEquality) {
1959 EmitBranch(true_block, false_block, eq);
1961 Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1962 Heap::kUndefinedValueRootIndex :
1963 Heap::kNullValueRootIndex;
1964 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1965 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1966 __ b(eq, true_label);
1967 __ LoadRoot(ip, other_nil_value);
1969 __ b(eq, true_label);
1970 __ JumpIfSmi(reg, false_label);
1971 // Check for undetectable objects by looking in the bit field in
1972 // the map. The object has already been smi checked.
1973 __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1974 __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1975 __ tst(scratch, Operand(1 << Map::kIsUndetectable));
1976 EmitBranch(true_block, false_block, ne);
1981 Condition LCodeGen::EmitIsObject(Register input,
1983 Label* is_not_object,
1985 Register temp2 = scratch0();
1986 __ JumpIfSmi(input, is_not_object);
1988 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1989 __ cmp(input, temp2);
1990 __ b(eq, is_object);
1993 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1994 // Undetectable objects behave like undefined.
1995 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1996 __ tst(temp2, Operand(1 << Map::kIsUndetectable));
1997 __ b(ne, is_not_object);
1999 // Load instance type and check that it is in object type range.
2000 __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2001 __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2002 __ b(lt, is_not_object);
2003 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2008 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2009 Register reg = ToRegister(instr->InputAt(0));
2010 Register temp1 = ToRegister(instr->TempAt(0));
2012 int true_block = chunk_->LookupDestination(instr->true_block_id());
2013 int false_block = chunk_->LookupDestination(instr->false_block_id());
2014 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2015 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2017 Condition true_cond =
2018 EmitIsObject(reg, temp1, false_label, true_label);
2020 EmitBranch(true_block, false_block, true_cond);
2024 Condition LCodeGen::EmitIsString(Register input,
2026 Label* is_not_string) {
2027 __ JumpIfSmi(input, is_not_string);
2028 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2034 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2035 Register reg = ToRegister(instr->InputAt(0));
2036 Register temp1 = ToRegister(instr->TempAt(0));
2038 int true_block = chunk_->LookupDestination(instr->true_block_id());
2039 int false_block = chunk_->LookupDestination(instr->false_block_id());
2040 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2042 Condition true_cond =
2043 EmitIsString(reg, temp1, false_label);
2045 EmitBranch(true_block, false_block, true_cond);
2049 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2050 int true_block = chunk_->LookupDestination(instr->true_block_id());
2051 int false_block = chunk_->LookupDestination(instr->false_block_id());
2053 Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
2054 __ tst(input_reg, Operand(kSmiTagMask));
2055 EmitBranch(true_block, false_block, eq);
2059 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2060 Register input = ToRegister(instr->InputAt(0));
2061 Register temp = ToRegister(instr->TempAt(0));
2063 int true_block = chunk_->LookupDestination(instr->true_block_id());
2064 int false_block = chunk_->LookupDestination(instr->false_block_id());
2066 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
2067 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2068 __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2069 __ tst(temp, Operand(1 << Map::kIsUndetectable));
2070 EmitBranch(true_block, false_block, ne);
2074 static Condition ComputeCompareCondition(Token::Value op) {
2076 case Token::EQ_STRICT:
2089 return kNoCondition;
2094 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2095 Token::Value op = instr->op();
2096 int true_block = chunk_->LookupDestination(instr->true_block_id());
2097 int false_block = chunk_->LookupDestination(instr->false_block_id());
2099 Handle<Code> ic = CompareIC::GetUninitialized(op);
2100 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2101 __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
2103 Condition condition = ComputeCompareCondition(op);
2105 EmitBranch(true_block, false_block, condition);
2109 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2110 InstanceType from = instr->from();
2111 InstanceType to = instr->to();
2112 if (from == FIRST_TYPE) return to;
2113 ASSERT(from == to || to == LAST_TYPE);
2118 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2119 InstanceType from = instr->from();
2120 InstanceType to = instr->to();
2121 if (from == to) return eq;
2122 if (to == LAST_TYPE) return hs;
2123 if (from == FIRST_TYPE) return ls;
2129 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2130 Register scratch = scratch0();
2131 Register input = ToRegister(instr->InputAt(0));
2133 int true_block = chunk_->LookupDestination(instr->true_block_id());
2134 int false_block = chunk_->LookupDestination(instr->false_block_id());
2136 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2138 __ JumpIfSmi(input, false_label);
2140 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2141 EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
2145 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2146 Register input = ToRegister(instr->InputAt(0));
2147 Register result = ToRegister(instr->result());
2149 if (FLAG_debug_code) {
2150 __ AbortIfNotString(input);
2153 __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2154 __ IndexFromHash(result, result);
2158 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2159 LHasCachedArrayIndexAndBranch* instr) {
2160 Register input = ToRegister(instr->InputAt(0));
2161 Register scratch = scratch0();
2163 int true_block = chunk_->LookupDestination(instr->true_block_id());
2164 int false_block = chunk_->LookupDestination(instr->false_block_id());
2167 FieldMemOperand(input, String::kHashFieldOffset));
2168 __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2169 EmitBranch(true_block, false_block, eq);
2173 // Branches to a label or falls through with the answer in flags. Trashes
2174 // the temp registers, but not the input.
2175 void LCodeGen::EmitClassOfTest(Label* is_true,
2177 Handle<String>class_name,
2181 ASSERT(!input.is(temp));
2182 ASSERT(!input.is(temp2));
2183 ASSERT(!temp.is(temp2));
2185 __ JumpIfSmi(input, is_false);
2187 if (class_name->IsEqualTo(CStrVector("Function"))) {
2188 // Assuming the following assertions, we can use the same compares to test
2189 // for both being a function type and being in the object type range.
2190 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2191 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2192 FIRST_SPEC_OBJECT_TYPE + 1);
2193 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2194 LAST_SPEC_OBJECT_TYPE - 1);
2195 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2196 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2199 __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2202 // Faster code path to avoid two compares: subtract lower bound from the
2203 // actual type and do a signed compare with the width of the type range.
2204 __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2205 __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2206 __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2207 __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2208 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2212 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2213 // Check if the constructor in the map is a function.
2214 __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2216 // Objects with a non-function constructor have class 'Object'.
2217 __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2218 if (class_name->IsEqualTo(CStrVector("Object"))) {
2224 // temp now contains the constructor function. Grab the
2225 // instance class name from there.
2226 __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2227 __ ldr(temp, FieldMemOperand(temp,
2228 SharedFunctionInfo::kInstanceClassNameOffset));
2229 // The class name we are testing against is a symbol because it's a literal.
2230 // The name in the constructor is a symbol because of the way the context is
2231 // booted. This routine isn't expected to work for random API-created
2232 // classes and it doesn't have to because you can't access it with natives
2233 // syntax. Since both sides are symbols it is sufficient to use an identity
2235 __ cmp(temp, Operand(class_name));
2236 // End with the answer in flags.
2240 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2241 Register input = ToRegister(instr->InputAt(0));
2242 Register temp = scratch0();
2243 Register temp2 = ToRegister(instr->TempAt(0));
2244 Handle<String> class_name = instr->hydrogen()->class_name();
2246 int true_block = chunk_->LookupDestination(instr->true_block_id());
2247 int false_block = chunk_->LookupDestination(instr->false_block_id());
2249 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2250 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2252 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
2254 EmitBranch(true_block, false_block, eq);
2258 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2259 Register reg = ToRegister(instr->InputAt(0));
2260 Register temp = ToRegister(instr->TempAt(0));
2261 int true_block = instr->true_block_id();
2262 int false_block = instr->false_block_id();
2264 __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2265 __ cmp(temp, Operand(instr->map()));
2266 EmitBranch(true_block, false_block, eq);
2270 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2271 ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
2272 ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
2274 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2275 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2277 __ cmp(r0, Operand(0));
2278 __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2279 __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2283 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2284 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2286 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2287 LInstanceOfKnownGlobal* instr)
2288 : LDeferredCode(codegen), instr_(instr) { }
2289 virtual void Generate() {
2290 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2292 virtual LInstruction* instr() { return instr_; }
2293 Label* map_check() { return &map_check_; }
2295 LInstanceOfKnownGlobal* instr_;
2299 DeferredInstanceOfKnownGlobal* deferred;
2300 deferred = new DeferredInstanceOfKnownGlobal(this, instr);
2302 Label done, false_result;
2303 Register object = ToRegister(instr->InputAt(0));
2304 Register temp = ToRegister(instr->TempAt(0));
2305 Register result = ToRegister(instr->result());
2307 ASSERT(object.is(r0));
2308 ASSERT(result.is(r0));
2310 // A Smi is not instance of anything.
2311 __ JumpIfSmi(object, &false_result);
2313 // This is the inlined call site instanceof cache. The two occurences of the
2314 // hole value will be patched to the last map/result pair generated by the
2317 Register map = temp;
2318 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2319 __ bind(deferred->map_check()); // Label for calculating code patching.
2320 // We use Factory::the_hole_value() on purpose instead of loading from the
2321 // root array to force relocation to be able to later patch with
2323 Handle<JSGlobalPropertyCell> cell =
2324 factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2325 __ mov(ip, Operand(Handle<Object>(cell)));
2326 __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2327 __ cmp(map, Operand(ip));
2328 __ b(ne, &cache_miss);
2329 // We use Factory::the_hole_value() on purpose instead of loading from the
2330 // root array to force relocation to be able to later patch
2331 // with true or false.
2332 __ mov(result, Operand(factory()->the_hole_value()));
2335 // The inlined call site cache did not match. Check null and string before
2336 // calling the deferred code.
2337 __ bind(&cache_miss);
2338 // Null is not instance of anything.
2339 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2340 __ cmp(object, Operand(ip));
2341 __ b(eq, &false_result);
2343 // String values is not instance of anything.
2344 Condition is_string = masm_->IsObjectStringType(object, temp);
2345 __ b(is_string, &false_result);
2347 // Go to the deferred code.
2348 __ b(deferred->entry());
2350 __ bind(&false_result);
2351 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2353 // Here result has either true or false. Deferred code also produces true or
2355 __ bind(deferred->exit());
2360 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2362 Register result = ToRegister(instr->result());
2363 ASSERT(result.is(r0));
2365 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2366 flags = static_cast<InstanceofStub::Flags>(
2367 flags | InstanceofStub::kArgsInRegisters);
2368 flags = static_cast<InstanceofStub::Flags>(
2369 flags | InstanceofStub::kCallSiteInlineCheck);
2370 flags = static_cast<InstanceofStub::Flags>(
2371 flags | InstanceofStub::kReturnTrueFalseObject);
2372 InstanceofStub stub(flags);
2374 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2376 // Get the temp register reserved by the instruction. This needs to be r4 as
2377 // its slot of the pushing of safepoint registers is used to communicate the
2378 // offset to the location of the map check.
2379 Register temp = ToRegister(instr->TempAt(0));
2380 ASSERT(temp.is(r4));
2381 __ LoadHeapObject(InstanceofStub::right(), instr->function());
2382 static const int kAdditionalDelta = 4;
2383 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2384 Label before_push_delta;
2385 __ bind(&before_push_delta);
2386 __ BlockConstPoolFor(kAdditionalDelta);
2387 __ mov(temp, Operand(delta * kPointerSize));
2388 __ StoreToSafepointRegisterSlot(temp, temp);
2389 CallCodeGeneric(stub.GetCode(),
2390 RelocInfo::CODE_TARGET,
2392 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2393 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2394 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2395 // Put the result value into the result register slot and
2396 // restore all registers.
2397 __ StoreToSafepointRegisterSlot(result, result);
2401 void LCodeGen::DoCmpT(LCmpT* instr) {
2402 Token::Value op = instr->op();
2404 Handle<Code> ic = CompareIC::GetUninitialized(op);
2405 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2406 __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
2408 Condition condition = ComputeCompareCondition(op);
2409 __ LoadRoot(ToRegister(instr->result()),
2410 Heap::kTrueValueRootIndex,
2412 __ LoadRoot(ToRegister(instr->result()),
2413 Heap::kFalseValueRootIndex,
2414 NegateCondition(condition));
2418 void LCodeGen::DoReturn(LReturn* instr) {
2420 // Push the return value on the stack as the parameter.
2421 // Runtime::TraceExit returns its parameter in r0.
2423 __ CallRuntime(Runtime::kTraceExit, 1);
2425 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2427 __ ldm(ia_w, sp, fp.bit() | lr.bit());
2428 __ add(sp, sp, Operand(sp_delta));
2433 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2434 Register result = ToRegister(instr->result());
2435 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2436 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2437 if (instr->hydrogen()->RequiresHoleCheck()) {
2438 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2440 DeoptimizeIf(eq, instr->environment());
2445 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2446 ASSERT(ToRegister(instr->global_object()).is(r0));
2447 ASSERT(ToRegister(instr->result()).is(r0));
2449 __ mov(r2, Operand(instr->name()));
2450 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2451 : RelocInfo::CODE_TARGET_CONTEXT;
2452 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2453 CallCode(ic, mode, instr);
2457 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2458 Register value = ToRegister(instr->value());
2459 Register cell = scratch0();
2462 __ mov(cell, Operand(instr->hydrogen()->cell()));
2464 // If the cell we are storing to contains the hole it could have
2465 // been deleted from the property dictionary. In that case, we need
2466 // to update the property details in the property dictionary to mark
2467 // it as no longer deleted.
2468 if (instr->hydrogen()->RequiresHoleCheck()) {
2469 // We use a temp to check the payload (CompareRoot might clobber ip).
2470 Register payload = ToRegister(instr->TempAt(0));
2471 __ ldr(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2472 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2473 DeoptimizeIf(eq, instr->environment());
2477 __ str(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2478 // Cells are always rescanned, so no write barrier here.
2482 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2483 ASSERT(ToRegister(instr->global_object()).is(r1));
2484 ASSERT(ToRegister(instr->value()).is(r0));
2486 __ mov(r2, Operand(instr->name()));
2487 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2488 ? isolate()->builtins()->StoreIC_Initialize_Strict()
2489 : isolate()->builtins()->StoreIC_Initialize();
2490 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2494 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2495 Register context = ToRegister(instr->context());
2496 Register result = ToRegister(instr->result());
2497 __ ldr(result, ContextOperand(context, instr->slot_index()));
2498 if (instr->hydrogen()->RequiresHoleCheck()) {
2499 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2501 if (instr->hydrogen()->DeoptimizesOnHole()) {
2502 DeoptimizeIf(eq, instr->environment());
2504 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
2510 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2511 Register context = ToRegister(instr->context());
2512 Register value = ToRegister(instr->value());
2513 Register scratch = scratch0();
2514 MemOperand target = ContextOperand(context, instr->slot_index());
2516 Label skip_assignment;
2518 if (instr->hydrogen()->RequiresHoleCheck()) {
2519 __ ldr(scratch, target);
2520 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2521 __ cmp(scratch, ip);
2522 if (instr->hydrogen()->DeoptimizesOnHole()) {
2523 DeoptimizeIf(eq, instr->environment());
2525 __ b(ne, &skip_assignment);
2529 __ str(value, target);
2530 if (instr->hydrogen()->NeedsWriteBarrier()) {
2531 HType type = instr->hydrogen()->value()->type();
2532 SmiCheck check_needed =
2533 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2534 __ RecordWriteContextSlot(context,
2540 EMIT_REMEMBERED_SET,
2544 __ bind(&skip_assignment);
2548 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2549 Register object = ToRegister(instr->InputAt(0));
2550 Register result = ToRegister(instr->result());
2551 if (instr->hydrogen()->is_in_object()) {
2552 __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2554 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2555 __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2560 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2563 Handle<String> name) {
2564 LookupResult lookup(isolate());
2565 type->LookupInDescriptors(NULL, *name, &lookup);
2566 ASSERT(lookup.IsFound() &&
2567 (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2568 if (lookup.type() == FIELD) {
2569 int index = lookup.GetLocalFieldIndexFromMap(*type);
2570 int offset = index * kPointerSize;
2572 // Negative property indices are in-object properties, indexed
2573 // from the end of the fixed part of the object.
2574 __ ldr(result, FieldMemOperand(object, offset + type->instance_size()));
2576 // Non-negative property indices are in the properties array.
2577 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2578 __ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2581 Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2582 __ LoadHeapObject(result, function);
2587 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2588 Register object = ToRegister(instr->object());
2589 Register result = ToRegister(instr->result());
2590 Register scratch = scratch0();
2592 int map_count = instr->hydrogen()->types()->length();
2593 bool need_generic = instr->hydrogen()->need_generic();
2595 if (map_count == 0 && !need_generic) {
2596 DeoptimizeIf(al, instr->environment());
2599 Handle<String> name = instr->hydrogen()->name();
2601 __ ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2602 for (int i = 0; i < map_count; ++i) {
2603 bool last = (i == map_count - 1);
2604 Handle<Map> map = instr->hydrogen()->types()->at(i);
2605 __ cmp(scratch, Operand(map));
2606 if (last && !need_generic) {
2607 DeoptimizeIf(ne, instr->environment());
2608 EmitLoadFieldOrConstantFunction(result, object, map, name);
2612 EmitLoadFieldOrConstantFunction(result, object, map, name);
2618 __ mov(r2, Operand(name));
2619 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2620 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2626 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2627 ASSERT(ToRegister(instr->object()).is(r0));
2628 ASSERT(ToRegister(instr->result()).is(r0));
2630 // Name is always in r2.
2631 __ mov(r2, Operand(instr->name()));
2632 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2633 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2637 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2638 Register scratch = scratch0();
2639 Register function = ToRegister(instr->function());
2640 Register result = ToRegister(instr->result());
2642 // Check that the function really is a function. Load map into the
2644 __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2645 DeoptimizeIf(ne, instr->environment());
2647 // Make sure that the function has an instance prototype.
2649 __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2650 __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2651 __ b(ne, &non_instance);
2653 // Get the prototype or initial map from the function.
2655 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2657 // Check that the function has a prototype or an initial map.
2658 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2660 DeoptimizeIf(eq, instr->environment());
2662 // If the function does not have an initial map, we're done.
2664 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
2667 // Get the prototype from the initial map.
2668 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2671 // Non-instance prototype: Fetch prototype from constructor field
2673 __ bind(&non_instance);
2674 __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2681 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2682 Register result = ToRegister(instr->result());
2683 Register input = ToRegister(instr->InputAt(0));
2684 Register scratch = scratch0();
2686 __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
2687 if (FLAG_debug_code) {
2689 __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2690 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2691 __ cmp(scratch, ip);
2693 __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2694 __ cmp(scratch, ip);
2696 // |scratch| still contains |input|'s map.
2697 __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2698 __ ubfx(scratch, scratch, Map::kElementsKindShift,
2699 Map::kElementsKindBitCount);
2700 __ cmp(scratch, Operand(FAST_ELEMENTS));
2702 __ cmp(scratch, Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2704 __ cmp(scratch, Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2707 __ Abort("Check for fast or external elements failed.");
2713 void LCodeGen::DoLoadExternalArrayPointer(
2714 LLoadExternalArrayPointer* instr) {
2715 Register to_reg = ToRegister(instr->result());
2716 Register from_reg = ToRegister(instr->InputAt(0));
2717 __ ldr(to_reg, FieldMemOperand(from_reg,
2718 ExternalArray::kExternalPointerOffset));
2722 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2723 Register arguments = ToRegister(instr->arguments());
2724 Register length = ToRegister(instr->length());
2725 Register index = ToRegister(instr->index());
2726 Register result = ToRegister(instr->result());
2728 // Bailout index is not a valid argument index. Use unsigned check to get
2729 // negative check for free.
2730 __ sub(length, length, index, SetCC);
2731 DeoptimizeIf(ls, instr->environment());
2733 // There are two words between the frame pointer and the last argument.
2734 // Subtracting from length accounts for one of them add one more.
2735 __ add(length, length, Operand(1));
2736 __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
2740 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2741 Register elements = ToRegister(instr->elements());
2742 Register key = EmitLoadRegister(instr->key(), scratch0());
2743 Register result = ToRegister(instr->result());
2744 Register scratch = scratch0();
2747 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
2748 uint32_t offset = FixedArray::kHeaderSize +
2749 (instr->additional_index() << kPointerSizeLog2);
2750 __ ldr(result, FieldMemOperand(scratch, offset));
2752 // Check for the hole value.
2753 if (instr->hydrogen()->RequiresHoleCheck()) {
2754 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2755 __ cmp(result, scratch);
2756 DeoptimizeIf(eq, instr->environment());
2761 void LCodeGen::DoLoadKeyedFastDoubleElement(
2762 LLoadKeyedFastDoubleElement* instr) {
2763 Register elements = ToRegister(instr->elements());
2764 bool key_is_constant = instr->key()->IsConstantOperand();
2765 Register key = no_reg;
2766 DwVfpRegister result = ToDoubleRegister(instr->result());
2767 Register scratch = scratch0();
2770 ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2771 int constant_key = 0;
2772 if (key_is_constant) {
2773 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2774 if (constant_key & 0xF0000000) {
2775 Abort("array index constant value too big.");
2778 key = ToRegister(instr->key());
2781 Operand operand = key_is_constant
2782 ? Operand(((constant_key + instr->additional_index()) << shift_size) +
2783 FixedDoubleArray::kHeaderSize - kHeapObjectTag)
2784 : Operand(key, LSL, shift_size);
2785 __ add(elements, elements, operand);
2786 if (!key_is_constant) {
2787 __ add(elements, elements,
2788 Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
2789 (instr->additional_index() << shift_size)));
2792 __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2793 __ cmp(scratch, Operand(kHoleNanUpper32));
2794 DeoptimizeIf(eq, instr->environment());
2796 __ vldr(result, elements, 0);
2800 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2801 LLoadKeyedSpecializedArrayElement* instr) {
2802 Register external_pointer = ToRegister(instr->external_pointer());
2803 Register key = no_reg;
2804 ElementsKind elements_kind = instr->elements_kind();
2805 bool key_is_constant = instr->key()->IsConstantOperand();
2806 int constant_key = 0;
2807 if (key_is_constant) {
2808 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2809 if (constant_key & 0xF0000000) {
2810 Abort("array index constant value too big.");
2813 key = ToRegister(instr->key());
2815 int shift_size = ElementsKindToShiftSize(elements_kind);
2816 int additional_offset = instr->additional_index() << shift_size;
2818 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2819 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2820 CpuFeatures::Scope scope(VFP3);
2821 DwVfpRegister result = ToDoubleRegister(instr->result());
2822 Operand operand = key_is_constant
2823 ? Operand(constant_key << shift_size)
2824 : Operand(key, LSL, shift_size);
2825 __ add(scratch0(), external_pointer, operand);
2826 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2827 __ vldr(result.low(), scratch0(), additional_offset);
2828 __ vcvt_f64_f32(result, result.low());
2829 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2830 __ vldr(result, scratch0(), additional_offset);
2833 Register result = ToRegister(instr->result());
2834 if (instr->additional_index() != 0 && !key_is_constant) {
2835 __ add(scratch0(), key, Operand(instr->additional_index()));
2837 MemOperand mem_operand(key_is_constant
2838 ? MemOperand(external_pointer,
2839 (constant_key << shift_size) + additional_offset)
2840 : (instr->additional_index() == 0
2841 ? MemOperand(external_pointer, key, LSL, shift_size)
2842 : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
2843 switch (elements_kind) {
2844 case EXTERNAL_BYTE_ELEMENTS:
2845 __ ldrsb(result, mem_operand);
2847 case EXTERNAL_PIXEL_ELEMENTS:
2848 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
2849 __ ldrb(result, mem_operand);
2851 case EXTERNAL_SHORT_ELEMENTS:
2852 __ ldrsh(result, mem_operand);
2854 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
2855 __ ldrh(result, mem_operand);
2857 case EXTERNAL_INT_ELEMENTS:
2858 __ ldr(result, mem_operand);
2860 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
2861 __ ldr(result, mem_operand);
2862 __ cmp(result, Operand(0x80000000));
2863 // TODO(danno): we could be more clever here, perhaps having a special
2864 // version of the stub that detects if the overflow case actually
2865 // happens, and generate code that returns a double rather than int.
2866 DeoptimizeIf(cs, instr->environment());
2868 case EXTERNAL_FLOAT_ELEMENTS:
2869 case EXTERNAL_DOUBLE_ELEMENTS:
2870 case FAST_DOUBLE_ELEMENTS:
2872 case FAST_SMI_ONLY_ELEMENTS:
2873 case DICTIONARY_ELEMENTS:
2874 case NON_STRICT_ARGUMENTS_ELEMENTS:
2882 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2883 ASSERT(ToRegister(instr->object()).is(r1));
2884 ASSERT(ToRegister(instr->key()).is(r0));
2886 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2887 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2891 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2892 Register scratch = scratch0();
2893 Register result = ToRegister(instr->result());
2895 if (instr->hydrogen()->from_inlined()) {
2896 __ sub(result, sp, Operand(2 * kPointerSize));
2898 // Check if the calling frame is an arguments adaptor frame.
2899 Label done, adapted;
2900 __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2901 __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
2902 __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2904 // Result is the frame pointer for the frame if not adapted and for the real
2905 // frame below the adaptor frame if adapted.
2906 __ mov(result, fp, LeaveCC, ne);
2907 __ mov(result, scratch, LeaveCC, eq);
2912 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2913 Register elem = ToRegister(instr->InputAt(0));
2914 Register result = ToRegister(instr->result());
2918 // If no arguments adaptor frame the number of arguments is fixed.
2920 __ mov(result, Operand(scope()->num_parameters()));
2923 // Arguments adaptor frame present. Get argument length from there.
2924 __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2926 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
2927 __ SmiUntag(result);
2929 // Argument length is in result register.
2934 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2935 Register receiver = ToRegister(instr->receiver());
2936 Register function = ToRegister(instr->function());
2937 Register scratch = scratch0();
2939 // If the receiver is null or undefined, we have to pass the global
2940 // object as a receiver to normal functions. Values have to be
2941 // passed unchanged to builtins and strict-mode functions.
2942 Label global_object, receiver_ok;
2944 // Do not transform the receiver to object for strict mode
2947 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2949 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2951 Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
2952 __ b(ne, &receiver_ok);
2954 // Do not transform the receiver to object for builtins.
2955 __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
2956 __ b(ne, &receiver_ok);
2958 // Normal function. Replace undefined or null with global receiver.
2959 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2960 __ cmp(receiver, scratch);
2961 __ b(eq, &global_object);
2962 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2963 __ cmp(receiver, scratch);
2964 __ b(eq, &global_object);
2966 // Deoptimize if the receiver is not a JS object.
2967 __ tst(receiver, Operand(kSmiTagMask));
2968 DeoptimizeIf(eq, instr->environment());
2969 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
2970 DeoptimizeIf(lt, instr->environment());
2971 __ jmp(&receiver_ok);
2973 __ bind(&global_object);
2974 __ ldr(receiver, GlobalObjectOperand());
2976 FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
2977 __ bind(&receiver_ok);
2981 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2982 Register receiver = ToRegister(instr->receiver());
2983 Register function = ToRegister(instr->function());
2984 Register length = ToRegister(instr->length());
2985 Register elements = ToRegister(instr->elements());
2986 Register scratch = scratch0();
2987 ASSERT(receiver.is(r0)); // Used for parameter count.
2988 ASSERT(function.is(r1)); // Required by InvokeFunction.
2989 ASSERT(ToRegister(instr->result()).is(r0));
2991 // Copy the arguments to this function possibly from the
2992 // adaptor frame below it.
2993 const uint32_t kArgumentsLimit = 1 * KB;
2994 __ cmp(length, Operand(kArgumentsLimit));
2995 DeoptimizeIf(hi, instr->environment());
2997 // Push the receiver and use the register to keep the original
2998 // number of arguments.
3000 __ mov(receiver, length);
3001 // The arguments are at a one pointer size offset from elements.
3002 __ add(elements, elements, Operand(1 * kPointerSize));
3004 // Loop through the arguments pushing them onto the execution
3007 // length is a small non-negative integer, due to the test above.
3008 __ cmp(length, Operand(0));
3011 __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3013 __ sub(length, length, Operand(1), SetCC);
3017 ASSERT(instr->HasPointerMap());
3018 LPointerMap* pointers = instr->pointer_map();
3019 RecordPosition(pointers->position());
3020 SafepointGenerator safepoint_generator(
3021 this, pointers, Safepoint::kLazyDeopt);
3022 // The number of arguments is stored in receiver which is r0, as expected
3023 // by InvokeFunction.
3024 ParameterCount actual(receiver);
3025 __ InvokeFunction(function, actual, CALL_FUNCTION,
3026 safepoint_generator, CALL_AS_METHOD);
3027 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3031 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3032 LOperand* argument = instr->InputAt(0);
3033 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3034 Abort("DoPushArgument not implemented for double type.");
3036 Register argument_reg = EmitLoadRegister(argument, ip);
3037 __ push(argument_reg);
3042 void LCodeGen::DoDrop(LDrop* instr) {
3043 __ Drop(instr->count());
3047 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3048 Register result = ToRegister(instr->result());
3049 __ LoadHeapObject(result, instr->hydrogen()->closure());
3053 void LCodeGen::DoContext(LContext* instr) {
3054 Register result = ToRegister(instr->result());
3059 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3060 Register context = ToRegister(instr->context());
3061 Register result = ToRegister(instr->result());
3063 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3067 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3068 __ push(cp); // The context is the first argument.
3069 __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3070 __ push(scratch0());
3071 __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3072 __ push(scratch0());
3073 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3077 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3078 Register result = ToRegister(instr->result());
3079 __ ldr(result, ContextOperand(cp, instr->qml_global()?Context::QML_GLOBAL_INDEX:Context::GLOBAL_INDEX));
3083 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3084 Register global = ToRegister(instr->global());
3085 Register result = ToRegister(instr->result());
3086 __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
3090 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3092 LInstruction* instr,
3095 bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
3096 function->shared()->formal_parameter_count() == arity;
3098 LPointerMap* pointers = instr->pointer_map();
3099 RecordPosition(pointers->position());
3101 if (can_invoke_directly) {
3102 if (r1_state == R1_UNINITIALIZED) {
3103 __ LoadHeapObject(r1, function);
3106 // Change context if needed.
3107 bool change_context =
3108 (info()->closure()->context() != function->context()) ||
3109 scope()->contains_with() ||
3110 (scope()->num_heap_slots() > 0);
3111 if (change_context) {
3112 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
3115 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3116 // is available to write to at this point.
3117 if (!function->NeedsArgumentsAdaption()) {
3118 __ mov(r0, Operand(arity));
3122 __ SetCallKind(r5, call_kind);
3123 __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
3126 // Set up deoptimization.
3127 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3129 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3130 ParameterCount count(arity);
3131 __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
3135 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3139 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3140 ASSERT(ToRegister(instr->result()).is(r0));
3141 CallKnownFunction(instr->function(),
3149 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
3150 Register input = ToRegister(instr->InputAt(0));
3151 Register result = ToRegister(instr->result());
3152 Register scratch = scratch0();
3154 // Deoptimize if not a heap number.
3155 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3156 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3157 __ cmp(scratch, Operand(ip));
3158 DeoptimizeIf(ne, instr->environment());
3161 Register exponent = scratch0();
3163 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3164 // Check the sign of the argument. If the argument is positive, just
3166 __ tst(exponent, Operand(HeapNumber::kSignMask));
3167 // Move the input to the result if necessary.
3168 __ Move(result, input);
3171 // Input is negative. Reverse its sign.
3172 // Preserve the value of all registers.
3174 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3176 // Registers were saved at the safepoint, so we can use
3177 // many scratch registers.
3178 Register tmp1 = input.is(r1) ? r0 : r1;
3179 Register tmp2 = input.is(r2) ? r0 : r2;
3180 Register tmp3 = input.is(r3) ? r0 : r3;
3181 Register tmp4 = input.is(r4) ? r0 : r4;
3183 // exponent: floating point exponent value.
3185 Label allocated, slow;
3186 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3187 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3190 // Slow case: Call the runtime system to do the number allocation.
3193 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3194 // Set the pointer to the new heap number in tmp.
3195 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3196 // Restore input_reg after call to runtime.
3197 __ LoadFromSafepointRegisterSlot(input, input);
3198 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3200 __ bind(&allocated);
3201 // exponent: floating point exponent value.
3202 // tmp1: allocated heap number.
3203 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3204 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3205 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3206 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3208 __ StoreToSafepointRegisterSlot(tmp1, result);
3215 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
3216 Register input = ToRegister(instr->InputAt(0));
3217 Register result = ToRegister(instr->result());
3218 __ cmp(input, Operand(0));
3219 __ Move(result, input, pl);
3220 // We can make rsb conditional because the previous cmp instruction
3221 // will clear the V (overflow) flag and rsb won't set this flag
3222 // if input is positive.
3223 __ rsb(result, input, Operand(0), SetCC, mi);
3224 // Deoptimize on overflow.
3225 DeoptimizeIf(vs, instr->environment());
3229 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3230 // Class for deferred case.
3231 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3233 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3234 LUnaryMathOperation* instr)
3235 : LDeferredCode(codegen), instr_(instr) { }
3236 virtual void Generate() {
3237 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3239 virtual LInstruction* instr() { return instr_; }
3241 LUnaryMathOperation* instr_;
3244 Representation r = instr->hydrogen()->value()->representation();
3246 DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
3247 DwVfpRegister result = ToDoubleRegister(instr->result());
3248 __ vabs(result, input);
3249 } else if (r.IsInteger32()) {
3250 EmitIntegerMathAbs(instr);
3252 // Representation is tagged.
3253 DeferredMathAbsTaggedHeapNumber* deferred =
3254 new DeferredMathAbsTaggedHeapNumber(this, instr);
3255 Register input = ToRegister(instr->InputAt(0));
3257 __ JumpIfNotSmi(input, deferred->entry());
3258 // If smi, handle it directly.
3259 EmitIntegerMathAbs(instr);
3260 __ bind(deferred->exit());
3265 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3266 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3267 Register result = ToRegister(instr->result());
3268 SwVfpRegister single_scratch = double_scratch0().low();
3269 Register scratch1 = scratch0();
3270 Register scratch2 = ToRegister(instr->TempAt(0));
3272 __ EmitVFPTruncate(kRoundToMinusInf,
3277 DeoptimizeIf(ne, instr->environment());
3279 // Move the result back to general purpose register r0.
3280 __ vmov(result, single_scratch);
3282 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3285 __ cmp(result, Operand(0));
3287 __ vmov(scratch1, input.high());
3288 __ tst(scratch1, Operand(HeapNumber::kSignMask));
3289 DeoptimizeIf(ne, instr->environment());
3295 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3296 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3297 Register result = ToRegister(instr->result());
3298 Register scratch = scratch0();
3299 Label done, check_sign_on_zero;
3301 // Extract exponent bits.
3302 __ vmov(result, input.high());
3305 HeapNumber::kExponentShift,
3306 HeapNumber::kExponentBits);
3308 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3309 __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2));
3310 __ mov(result, Operand(0), LeaveCC, le);
3311 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3312 __ b(le, &check_sign_on_zero);
3317 // The following conversion will not work with numbers
3318 // outside of ]-2^32, 2^32[.
3319 __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32));
3320 DeoptimizeIf(ge, instr->environment());
3322 // Save the original sign for later comparison.
3323 __ and_(scratch, result, Operand(HeapNumber::kSignMask));
3325 __ Vmov(double_scratch0(), 0.5);
3326 __ vadd(double_scratch0(), input, double_scratch0());
3328 // Check sign of the result: if the sign changed, the input
3329 // value was in ]0.5, 0[ and the result should be -0.
3330 __ vmov(result, double_scratch0().high());
3331 __ eor(result, result, Operand(scratch), SetCC);
3332 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3333 DeoptimizeIf(mi, instr->environment());
3335 __ mov(result, Operand(0), LeaveCC, mi);
3339 __ EmitVFPTruncate(kRoundToMinusInf,
3340 double_scratch0().low(),
3344 DeoptimizeIf(ne, instr->environment());
3345 __ vmov(result, double_scratch0().low());
3347 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3349 __ cmp(result, Operand(0));
3351 __ bind(&check_sign_on_zero);
3352 __ vmov(scratch, input.high());
3353 __ tst(scratch, Operand(HeapNumber::kSignMask));
3354 DeoptimizeIf(ne, instr->environment());
3360 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3361 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3362 DoubleRegister result = ToDoubleRegister(instr->result());
3363 __ vsqrt(result, input);
3367 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3368 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3369 DoubleRegister result = ToDoubleRegister(instr->result());
3370 DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
3372 // Note that according to ECMA-262 15.8.2.13:
3373 // Math.pow(-Infinity, 0.5) == Infinity
3374 // Math.sqrt(-Infinity) == NaN
3376 __ vmov(temp, -V8_INFINITY);
3377 __ VFPCompareAndSetFlags(input, temp);
3378 __ vneg(result, temp, eq);
3381 // Add +0 to convert -0 to +0.
3382 __ vadd(result, input, kDoubleRegZero);
3383 __ vsqrt(result, result);
3388 void LCodeGen::DoPower(LPower* instr) {
3389 Representation exponent_type = instr->hydrogen()->right()->representation();
3390 // Having marked this as a call, we can use any registers.
3391 // Just make sure that the input/output registers are the expected ones.
3392 ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3393 ToDoubleRegister(instr->InputAt(1)).is(d2));
3394 ASSERT(!instr->InputAt(1)->IsRegister() ||
3395 ToRegister(instr->InputAt(1)).is(r2));
3396 ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
3397 ASSERT(ToDoubleRegister(instr->result()).is(d3));
3399 if (exponent_type.IsTagged()) {
3401 __ JumpIfSmi(r2, &no_deopt);
3402 __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
3403 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3404 __ cmp(r7, Operand(ip));
3405 DeoptimizeIf(ne, instr->environment());
3407 MathPowStub stub(MathPowStub::TAGGED);
3409 } else if (exponent_type.IsInteger32()) {
3410 MathPowStub stub(MathPowStub::INTEGER);
3413 ASSERT(exponent_type.IsDouble());
3414 MathPowStub stub(MathPowStub::DOUBLE);
3420 void LCodeGen::DoRandom(LRandom* instr) {
3421 class DeferredDoRandom: public LDeferredCode {
3423 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3424 : LDeferredCode(codegen), instr_(instr) { }
3425 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3426 virtual LInstruction* instr() { return instr_; }
3431 DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
3433 // Having marked this instruction as a call we can use any
3435 ASSERT(ToDoubleRegister(instr->result()).is(d7));
3436 ASSERT(ToRegister(instr->InputAt(0)).is(r0));
3438 static const int kSeedSize = sizeof(uint32_t);
3439 STATIC_ASSERT(kPointerSize == kSeedSize);
3441 __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalContextOffset));
3442 static const int kRandomSeedOffset =
3443 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3444 __ ldr(r2, FieldMemOperand(r0, kRandomSeedOffset));
3445 // r2: FixedArray of the global context's random seeds
3448 __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
3449 __ cmp(r1, Operand(0));
3450 __ b(eq, deferred->entry());
3452 __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
3456 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3457 __ and_(r3, r1, Operand(0xFFFF));
3458 __ mov(r4, Operand(18273));
3460 __ add(r1, r3, Operand(r1, LSR, 16));
3462 __ str(r1, FieldMemOperand(r2, ByteArray::kHeaderSize));
3464 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3465 __ and_(r3, r0, Operand(0xFFFF));
3466 __ mov(r4, Operand(36969));
3468 __ add(r0, r3, Operand(r0, LSR, 16));
3470 __ str(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize));
3472 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3473 __ and_(r0, r0, Operand(0x3FFFF));
3474 __ add(r0, r0, Operand(r1, LSL, 14));
3476 __ bind(deferred->exit());
3477 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3478 // Create this constant using mov/orr to avoid PC relative load.
3479 __ mov(r1, Operand(0x41000000));
3480 __ orr(r1, r1, Operand(0x300000));
3481 // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
3482 __ vmov(d7, r0, r1);
3483 // Move 0x4130000000000000 to VFP.
3484 __ mov(r0, Operand(0, RelocInfo::NONE));
3485 __ vmov(d8, r0, r1);
3486 // Subtract and store the result in the heap number.
3487 __ vsub(d7, d7, d8);
3491 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3492 __ PrepareCallCFunction(1, scratch0());
3493 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3494 // Return value is in r0.
3498 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3499 ASSERT(ToDoubleRegister(instr->result()).is(d2));
3500 TranscendentalCacheStub stub(TranscendentalCache::LOG,
3501 TranscendentalCacheStub::UNTAGGED);
3502 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3506 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3507 ASSERT(ToDoubleRegister(instr->result()).is(d2));
3508 TranscendentalCacheStub stub(TranscendentalCache::TAN,
3509 TranscendentalCacheStub::UNTAGGED);
3510 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3514 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3515 ASSERT(ToDoubleRegister(instr->result()).is(d2));
3516 TranscendentalCacheStub stub(TranscendentalCache::COS,
3517 TranscendentalCacheStub::UNTAGGED);
3518 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3522 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3523 ASSERT(ToDoubleRegister(instr->result()).is(d2));
3524 TranscendentalCacheStub stub(TranscendentalCache::SIN,
3525 TranscendentalCacheStub::UNTAGGED);
3526 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3530 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3531 switch (instr->op()) {
3545 DoMathPowHalf(instr);
3560 Abort("Unimplemented type of LUnaryMathOperation.");
3566 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3567 ASSERT(ToRegister(instr->function()).is(r1));
3568 ASSERT(instr->HasPointerMap());
3570 if (instr->known_function().is_null()) {
3571 LPointerMap* pointers = instr->pointer_map();
3572 RecordPosition(pointers->position());
3573 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3574 ParameterCount count(instr->arity());
3575 __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3576 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3578 CallKnownFunction(instr->known_function(),
3582 R1_CONTAINS_TARGET);
3587 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3588 ASSERT(ToRegister(instr->result()).is(r0));
3590 int arity = instr->arity();
3592 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3593 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3594 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3598 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3599 ASSERT(ToRegister(instr->result()).is(r0));
3601 int arity = instr->arity();
3602 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3604 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3605 __ mov(r2, Operand(instr->name()));
3606 CallCode(ic, mode, instr);
3607 // Restore context register.
3608 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3612 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3613 ASSERT(ToRegister(instr->function()).is(r1));
3614 ASSERT(ToRegister(instr->result()).is(r0));
3616 int arity = instr->arity();
3617 CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3618 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3619 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3623 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3624 ASSERT(ToRegister(instr->result()).is(r0));
3626 int arity = instr->arity();
3627 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3629 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3630 __ mov(r2, Operand(instr->name()));
3631 CallCode(ic, mode, instr);
3632 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3636 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3637 ASSERT(ToRegister(instr->result()).is(r0));
3638 CallKnownFunction(instr->target(),
3646 void LCodeGen::DoCallNew(LCallNew* instr) {
3647 ASSERT(ToRegister(instr->InputAt(0)).is(r1));
3648 ASSERT(ToRegister(instr->result()).is(r0));
3650 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3651 __ mov(r0, Operand(instr->arity()));
3652 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3656 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3657 CallRuntime(instr->function(), instr->arity(), instr);
3661 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3662 Register object = ToRegister(instr->object());
3663 Register value = ToRegister(instr->value());
3664 Register scratch = scratch0();
3665 int offset = instr->offset();
3667 ASSERT(!object.is(value));
3669 if (!instr->transition().is_null()) {
3670 __ mov(scratch, Operand(instr->transition()));
3671 __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3675 HType type = instr->hydrogen()->value()->type();
3676 SmiCheck check_needed =
3677 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3678 if (instr->is_in_object()) {
3679 __ str(value, FieldMemOperand(object, offset));
3680 if (instr->hydrogen()->NeedsWriteBarrier()) {
3681 // Update the write barrier for the object for in-object properties.
3682 __ RecordWriteField(object,
3688 EMIT_REMEMBERED_SET,
3692 __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3693 __ str(value, FieldMemOperand(scratch, offset));
3694 if (instr->hydrogen()->NeedsWriteBarrier()) {
3695 // Update the write barrier for the properties array.
3696 // object is used as a scratch register.
3697 __ RecordWriteField(scratch,
3703 EMIT_REMEMBERED_SET,
3710 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3711 ASSERT(ToRegister(instr->object()).is(r1));
3712 ASSERT(ToRegister(instr->value()).is(r0));
3714 // Name is always in r2.
3715 __ mov(r2, Operand(instr->name()));
3716 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3717 ? isolate()->builtins()->StoreIC_Initialize_Strict()
3718 : isolate()->builtins()->StoreIC_Initialize();
3719 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3723 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3724 __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
3725 DeoptimizeIf(hs, instr->environment());
3729 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3730 Register value = ToRegister(instr->value());
3731 Register elements = ToRegister(instr->object());
3732 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3733 Register scratch = scratch0();
3736 if (instr->key()->IsConstantOperand()) {
3737 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3738 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3740 (ToInteger32(const_operand) + instr->additional_index()) * kPointerSize
3741 + FixedArray::kHeaderSize;
3742 __ str(value, FieldMemOperand(elements, offset));
3744 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3745 if (instr->additional_index() != 0) {
3748 Operand(instr->additional_index() << kPointerSizeLog2));
3750 __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3753 if (instr->hydrogen()->NeedsWriteBarrier()) {
3754 HType type = instr->hydrogen()->value()->type();
3755 SmiCheck check_needed =
3756 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3757 // Compute address of modified element and store it into key register.
3758 __ add(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3759 __ RecordWrite(elements,
3764 EMIT_REMEMBERED_SET,
3770 void LCodeGen::DoStoreKeyedFastDoubleElement(
3771 LStoreKeyedFastDoubleElement* instr) {
3772 DwVfpRegister value = ToDoubleRegister(instr->value());
3773 Register elements = ToRegister(instr->elements());
3774 Register key = no_reg;
3775 Register scratch = scratch0();
3776 bool key_is_constant = instr->key()->IsConstantOperand();
3777 int constant_key = 0;
3779 // Calculate the effective address of the slot in the array to store the
3781 if (key_is_constant) {
3782 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3783 if (constant_key & 0xF0000000) {
3784 Abort("array index constant value too big.");
3787 key = ToRegister(instr->key());
3789 int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3790 Operand operand = key_is_constant
3791 ? Operand((constant_key << shift_size) +
3792 FixedDoubleArray::kHeaderSize - kHeapObjectTag)
3793 : Operand(key, LSL, shift_size);
3794 __ add(scratch, elements, operand);
3795 if (!key_is_constant) {
3796 __ add(scratch, scratch,
3797 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3800 if (instr->NeedsCanonicalization()) {
3801 // Check for NaN. All NaNs must be canonicalized.
3802 __ VFPCompareAndSetFlags(value, value);
3803 // Only load canonical NaN if the comparison above set the overflow.
3805 FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
3809 __ vstr(value, scratch, instr->additional_index() << shift_size);
3813 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3814 LStoreKeyedSpecializedArrayElement* instr) {
3816 Register external_pointer = ToRegister(instr->external_pointer());
3817 Register key = no_reg;
3818 ElementsKind elements_kind = instr->elements_kind();
3819 bool key_is_constant = instr->key()->IsConstantOperand();
3820 int constant_key = 0;
3821 if (key_is_constant) {
3822 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3823 if (constant_key & 0xF0000000) {
3824 Abort("array index constant value too big.");
3827 key = ToRegister(instr->key());
3829 int shift_size = ElementsKindToShiftSize(elements_kind);
3830 int additional_offset = instr->additional_index() << shift_size;
3832 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3833 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3834 CpuFeatures::Scope scope(VFP3);
3835 DwVfpRegister value(ToDoubleRegister(instr->value()));
3836 Operand operand(key_is_constant ? Operand(constant_key << shift_size)
3837 : Operand(key, LSL, shift_size));
3838 __ add(scratch0(), external_pointer, operand);
3839 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3840 __ vcvt_f32_f64(double_scratch0().low(), value);
3841 __ vstr(double_scratch0().low(), scratch0(), additional_offset);
3842 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3843 __ vstr(value, scratch0(), additional_offset);
3846 Register value(ToRegister(instr->value()));
3847 if (instr->additional_index() != 0 && !key_is_constant) {
3848 __ add(scratch0(), key, Operand(instr->additional_index()));
3850 MemOperand mem_operand(key_is_constant
3851 ? MemOperand(external_pointer,
3852 ((constant_key + instr->additional_index())
3854 : (instr->additional_index() == 0
3855 ? MemOperand(external_pointer, key, LSL, shift_size)
3856 : MemOperand(external_pointer, scratch0(), LSL, shift_size)));
3857 switch (elements_kind) {
3858 case EXTERNAL_PIXEL_ELEMENTS:
3859 case EXTERNAL_BYTE_ELEMENTS:
3860 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3861 __ strb(value, mem_operand);
3863 case EXTERNAL_SHORT_ELEMENTS:
3864 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3865 __ strh(value, mem_operand);
3867 case EXTERNAL_INT_ELEMENTS:
3868 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3869 __ str(value, mem_operand);
3871 case EXTERNAL_FLOAT_ELEMENTS:
3872 case EXTERNAL_DOUBLE_ELEMENTS:
3873 case FAST_DOUBLE_ELEMENTS:
3875 case FAST_SMI_ONLY_ELEMENTS:
3876 case DICTIONARY_ELEMENTS:
3877 case NON_STRICT_ARGUMENTS_ELEMENTS:
3885 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3886 ASSERT(ToRegister(instr->object()).is(r2));
3887 ASSERT(ToRegister(instr->key()).is(r1));
3888 ASSERT(ToRegister(instr->value()).is(r0));
3890 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3891 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3892 : isolate()->builtins()->KeyedStoreIC_Initialize();
3893 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3897 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3898 Register object_reg = ToRegister(instr->object());
3899 Register new_map_reg = ToRegister(instr->new_map_reg());
3900 Register scratch = scratch0();
3902 Handle<Map> from_map = instr->original_map();
3903 Handle<Map> to_map = instr->transitioned_map();
3904 ElementsKind from_kind = from_map->elements_kind();
3905 ElementsKind to_kind = to_map->elements_kind();
3907 Label not_applicable;
3908 __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3909 __ cmp(scratch, Operand(from_map));
3910 __ b(ne, ¬_applicable);
3911 __ mov(new_map_reg, Operand(to_map));
3912 if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
3913 __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3915 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3916 scratch, kLRHasBeenSaved, kDontSaveFPRegs);
3917 } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
3918 to_kind == FAST_DOUBLE_ELEMENTS) {
3919 Register fixed_object_reg = ToRegister(instr->temp_reg());
3920 ASSERT(fixed_object_reg.is(r2));
3921 ASSERT(new_map_reg.is(r3));
3922 __ mov(fixed_object_reg, object_reg);
3923 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3924 RelocInfo::CODE_TARGET, instr);
3925 } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
3926 Register fixed_object_reg = ToRegister(instr->temp_reg());
3927 ASSERT(fixed_object_reg.is(r2));
3928 ASSERT(new_map_reg.is(r3));
3929 __ mov(fixed_object_reg, object_reg);
3930 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3931 RelocInfo::CODE_TARGET, instr);
3935 __ bind(¬_applicable);
3939 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3940 __ push(ToRegister(instr->left()));
3941 __ push(ToRegister(instr->right()));
3942 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3943 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3947 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3948 class DeferredStringCharCodeAt: public LDeferredCode {
3950 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3951 : LDeferredCode(codegen), instr_(instr) { }
3952 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3953 virtual LInstruction* instr() { return instr_; }
3955 LStringCharCodeAt* instr_;
3958 DeferredStringCharCodeAt* deferred =
3959 new DeferredStringCharCodeAt(this, instr);
3961 StringCharLoadGenerator::Generate(masm(),
3962 ToRegister(instr->string()),
3963 ToRegister(instr->index()),
3964 ToRegister(instr->result()),
3966 __ bind(deferred->exit());
3970 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3971 Register string = ToRegister(instr->string());
3972 Register result = ToRegister(instr->result());
3973 Register scratch = scratch0();
3975 // TODO(3095996): Get rid of this. For now, we need to make the
3976 // result register contain a valid pointer because it is already
3977 // contained in the register pointer map.
3978 __ mov(result, Operand(0));
3980 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3982 // Push the index as a smi. This is safe because of the checks in
3983 // DoStringCharCodeAt above.
3984 if (instr->index()->IsConstantOperand()) {
3985 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3986 __ mov(scratch, Operand(Smi::FromInt(const_index)));
3989 Register index = ToRegister(instr->index());
3993 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3994 if (FLAG_debug_code) {
3995 __ AbortIfNotSmi(r0);
3998 __ StoreToSafepointRegisterSlot(r0, result);
4002 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4003 class DeferredStringCharFromCode: public LDeferredCode {
4005 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4006 : LDeferredCode(codegen), instr_(instr) { }
4007 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
4008 virtual LInstruction* instr() { return instr_; }
4010 LStringCharFromCode* instr_;
4013 DeferredStringCharFromCode* deferred =
4014 new DeferredStringCharFromCode(this, instr);
4016 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4017 Register char_code = ToRegister(instr->char_code());
4018 Register result = ToRegister(instr->result());
4019 ASSERT(!char_code.is(result));
4021 __ cmp(char_code, Operand(String::kMaxAsciiCharCode));
4022 __ b(hi, deferred->entry());
4023 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4024 __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4025 __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4026 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4028 __ b(eq, deferred->entry());
4029 __ bind(deferred->exit());
4033 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4034 Register char_code = ToRegister(instr->char_code());
4035 Register result = ToRegister(instr->result());
4037 // TODO(3095996): Get rid of this. For now, we need to make the
4038 // result register contain a valid pointer because it is already
4039 // contained in the register pointer map.
4040 __ mov(result, Operand(0));
4042 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4043 __ SmiTag(char_code);
4045 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4046 __ StoreToSafepointRegisterSlot(r0, result);
4050 void LCodeGen::DoStringLength(LStringLength* instr) {
4051 Register string = ToRegister(instr->InputAt(0));
4052 Register result = ToRegister(instr->result());
4053 __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
4057 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4058 LOperand* input = instr->InputAt(0);
4059 ASSERT(input->IsRegister() || input->IsStackSlot());
4060 LOperand* output = instr->result();
4061 ASSERT(output->IsDoubleRegister());
4062 SwVfpRegister single_scratch = double_scratch0().low();
4063 if (input->IsStackSlot()) {
4064 Register scratch = scratch0();
4065 __ ldr(scratch, ToMemOperand(input));
4066 __ vmov(single_scratch, scratch);
4068 __ vmov(single_scratch, ToRegister(input));
4070 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4074 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4075 class DeferredNumberTagI: public LDeferredCode {
4077 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4078 : LDeferredCode(codegen), instr_(instr) { }
4079 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
4080 virtual LInstruction* instr() { return instr_; }
4082 LNumberTagI* instr_;
4085 Register src = ToRegister(instr->InputAt(0));
4086 Register dst = ToRegister(instr->result());
4088 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
4089 __ SmiTag(dst, src, SetCC);
4090 __ b(vs, deferred->entry());
4091 __ bind(deferred->exit());
4095 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
4097 Register src = ToRegister(instr->InputAt(0));
4098 Register dst = ToRegister(instr->result());
4099 DoubleRegister dbl_scratch = double_scratch0();
4100 SwVfpRegister flt_scratch = dbl_scratch.low();
4102 // Preserve the value of all registers.
4103 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4105 // There was overflow, so bits 30 and 31 of the original integer
4106 // disagree. Try to allocate a heap number in new space and store
4107 // the value in there. If that fails, call the runtime system.
4110 __ SmiUntag(src, dst);
4111 __ eor(src, src, Operand(0x80000000));
4113 __ vmov(flt_scratch, src);
4114 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
4115 if (FLAG_inline_new) {
4116 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
4117 __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
4122 // Slow case: Call the runtime system to do the number allocation.
4125 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4126 // register is stored, as this register is in the pointer map, but contains an
4128 __ mov(ip, Operand(0));
4129 __ StoreToSafepointRegisterSlot(ip, dst);
4130 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4133 // Done. Put the value in dbl_scratch into the value of the allocated heap
4136 __ sub(ip, dst, Operand(kHeapObjectTag));
4137 __ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
4138 __ StoreToSafepointRegisterSlot(dst, dst);
4142 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4143 class DeferredNumberTagD: public LDeferredCode {
4145 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4146 : LDeferredCode(codegen), instr_(instr) { }
4147 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4148 virtual LInstruction* instr() { return instr_; }
4150 LNumberTagD* instr_;
4153 DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
4154 Register scratch = scratch0();
4155 Register reg = ToRegister(instr->result());
4156 Register temp1 = ToRegister(instr->TempAt(0));
4157 Register temp2 = ToRegister(instr->TempAt(1));
4159 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
4160 if (FLAG_inline_new) {
4161 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4162 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
4164 __ jmp(deferred->entry());
4166 __ bind(deferred->exit());
4167 __ sub(ip, reg, Operand(kHeapObjectTag));
4168 __ vstr(input_reg, ip, HeapNumber::kValueOffset);
4172 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4173 // TODO(3095996): Get rid of this. For now, we need to make the
4174 // result register contain a valid pointer because it is already
4175 // contained in the register pointer map.
4176 Register reg = ToRegister(instr->result());
4177 __ mov(reg, Operand(0));
4179 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4180 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4181 __ StoreToSafepointRegisterSlot(r0, reg);
4185 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4186 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4187 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
4191 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4192 Register input = ToRegister(instr->InputAt(0));
4193 Register result = ToRegister(instr->result());
4194 if (instr->needs_check()) {
4195 STATIC_ASSERT(kHeapObjectTag == 1);
4196 // If the input is a HeapObject, SmiUntag will set the carry flag.
4197 __ SmiUntag(result, input, SetCC);
4198 DeoptimizeIf(cs, instr->environment());
4200 __ SmiUntag(result, input);
4205 void LCodeGen::EmitNumberUntagD(Register input_reg,
4206 DoubleRegister result_reg,
4207 bool deoptimize_on_undefined,
4208 bool deoptimize_on_minus_zero,
4209 LEnvironment* env) {
4210 Register scratch = scratch0();
4211 SwVfpRegister flt_scratch = double_scratch0().low();
4212 ASSERT(!result_reg.is(double_scratch0()));
4214 Label load_smi, heap_number, done;
4217 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4219 // Heap number map check.
4220 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4221 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4222 __ cmp(scratch, Operand(ip));
4223 if (deoptimize_on_undefined) {
4224 DeoptimizeIf(ne, env);
4227 __ b(eq, &heap_number);
4229 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4230 __ cmp(input_reg, Operand(ip));
4231 DeoptimizeIf(ne, env);
4233 // Convert undefined to NaN.
4234 __ LoadRoot(ip, Heap::kNanValueRootIndex);
4235 __ sub(ip, ip, Operand(kHeapObjectTag));
4236 __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4239 __ bind(&heap_number);
4241 // Heap number to double register conversion.
4242 __ sub(ip, input_reg, Operand(kHeapObjectTag));
4243 __ vldr(result_reg, ip, HeapNumber::kValueOffset);
4244 if (deoptimize_on_minus_zero) {
4245 __ vmov(ip, result_reg.low());
4246 __ cmp(ip, Operand(0));
4248 __ vmov(ip, result_reg.high());
4249 __ cmp(ip, Operand(HeapNumber::kSignMask));
4250 DeoptimizeIf(eq, env);
4254 // Smi to double register conversion
4256 // scratch: untagged value of input_reg
4257 __ vmov(flt_scratch, scratch);
4258 __ vcvt_f64_s32(result_reg, flt_scratch);
4263 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4264 Register input_reg = ToRegister(instr->InputAt(0));
4265 Register scratch1 = scratch0();
4266 Register scratch2 = ToRegister(instr->TempAt(0));
4267 DwVfpRegister double_scratch = double_scratch0();
4268 SwVfpRegister single_scratch = double_scratch.low();
4270 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4271 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4275 // The input was optimistically untagged; revert it.
4276 // The carry flag is set when we reach this deferred code as we just executed
4277 // SmiUntag(heap_object, SetCC)
4278 STATIC_ASSERT(kHeapObjectTag == 1);
4279 __ adc(input_reg, input_reg, Operand(input_reg));
4281 // Heap number map check.
4282 __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4283 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4284 __ cmp(scratch1, Operand(ip));
4286 if (instr->truncating()) {
4287 Register scratch3 = ToRegister(instr->TempAt(1));
4288 DwVfpRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
4289 ASSERT(!scratch3.is(input_reg) &&
4290 !scratch3.is(scratch1) &&
4291 !scratch3.is(scratch2));
4292 // Performs a truncating conversion of a floating point number as used by
4293 // the JS bitwise operations.
4295 __ b(eq, &heap_number);
4296 // Check for undefined. Undefined is converted to zero for truncating
4298 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4299 __ cmp(input_reg, Operand(ip));
4300 DeoptimizeIf(ne, instr->environment());
4301 __ mov(input_reg, Operand(0));
4304 __ bind(&heap_number);
4305 __ sub(scratch1, input_reg, Operand(kHeapObjectTag));
4306 __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
4308 __ EmitECMATruncate(input_reg,
4316 CpuFeatures::Scope scope(VFP3);
4317 // Deoptimize if we don't have a heap number.
4318 DeoptimizeIf(ne, instr->environment());
4320 __ sub(ip, input_reg, Operand(kHeapObjectTag));
4321 __ vldr(double_scratch, ip, HeapNumber::kValueOffset);
4322 __ EmitVFPTruncate(kRoundToZero,
4327 kCheckForInexactConversion);
4328 DeoptimizeIf(ne, instr->environment());
4330 __ vmov(input_reg, single_scratch);
4332 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4333 __ cmp(input_reg, Operand(0));
4335 __ vmov(scratch1, double_scratch.high());
4336 __ tst(scratch1, Operand(HeapNumber::kSignMask));
4337 DeoptimizeIf(ne, instr->environment());
4344 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4345 class DeferredTaggedToI: public LDeferredCode {
4347 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4348 : LDeferredCode(codegen), instr_(instr) { }
4349 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4350 virtual LInstruction* instr() { return instr_; }
4355 LOperand* input = instr->InputAt(0);
4356 ASSERT(input->IsRegister());
4357 ASSERT(input->Equals(instr->result()));
4359 Register input_reg = ToRegister(input);
4361 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
4363 // Optimistically untag the input.
4364 // If the input is a HeapObject, SmiUntag will set the carry flag.
4365 __ SmiUntag(input_reg, SetCC);
4366 // Branch to deferred code if the input was tagged.
4367 // The deferred code will take care of restoring the tag.
4368 __ b(cs, deferred->entry());
4369 __ bind(deferred->exit());
4373 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4374 LOperand* input = instr->InputAt(0);
4375 ASSERT(input->IsRegister());
4376 LOperand* result = instr->result();
4377 ASSERT(result->IsDoubleRegister());
4379 Register input_reg = ToRegister(input);
4380 DoubleRegister result_reg = ToDoubleRegister(result);
4382 EmitNumberUntagD(input_reg, result_reg,
4383 instr->hydrogen()->deoptimize_on_undefined(),
4384 instr->hydrogen()->deoptimize_on_minus_zero(),
4385 instr->environment());
4389 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4390 Register result_reg = ToRegister(instr->result());
4391 Register scratch1 = scratch0();
4392 Register scratch2 = ToRegister(instr->TempAt(0));
4393 DwVfpRegister double_input = ToDoubleRegister(instr->InputAt(0));
4394 SwVfpRegister single_scratch = double_scratch0().low();
4398 if (instr->truncating()) {
4399 Register scratch3 = ToRegister(instr->TempAt(1));
4400 __ EmitECMATruncate(result_reg,
4407 VFPRoundingMode rounding_mode = kRoundToMinusInf;
4408 __ EmitVFPTruncate(rounding_mode,
4413 kCheckForInexactConversion);
4414 // Deoptimize if we had a vfp invalid exception,
4415 // including inexact operation.
4416 DeoptimizeIf(ne, instr->environment());
4417 // Retrieve the result.
4418 __ vmov(result_reg, single_scratch);
4424 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4425 LOperand* input = instr->InputAt(0);
4426 __ tst(ToRegister(input), Operand(kSmiTagMask));
4427 DeoptimizeIf(ne, instr->environment());
4431 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4432 LOperand* input = instr->InputAt(0);
4433 __ tst(ToRegister(input), Operand(kSmiTagMask));
4434 DeoptimizeIf(eq, instr->environment());
4438 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4439 Register input = ToRegister(instr->InputAt(0));
4440 Register scratch = scratch0();
4442 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
4443 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4445 if (instr->hydrogen()->is_interval_check()) {
4448 instr->hydrogen()->GetCheckInterval(&first, &last);
4450 __ cmp(scratch, Operand(first));
4452 // If there is only one type in the interval check for equality.
4453 if (first == last) {
4454 DeoptimizeIf(ne, instr->environment());
4456 DeoptimizeIf(lo, instr->environment());
4457 // Omit check for the last type.
4458 if (last != LAST_TYPE) {
4459 __ cmp(scratch, Operand(last));
4460 DeoptimizeIf(hi, instr->environment());
4466 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4468 if (IsPowerOf2(mask)) {
4469 ASSERT(tag == 0 || IsPowerOf2(tag));
4470 __ tst(scratch, Operand(mask));
4471 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
4473 __ and_(scratch, scratch, Operand(mask));
4474 __ cmp(scratch, Operand(tag));
4475 DeoptimizeIf(ne, instr->environment());
4481 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4482 Register reg = ToRegister(instr->value());
4483 Handle<JSFunction> target = instr->hydrogen()->target();
4484 if (isolate()->heap()->InNewSpace(*target)) {
4485 Register reg = ToRegister(instr->value());
4486 Handle<JSGlobalPropertyCell> cell =
4487 isolate()->factory()->NewJSGlobalPropertyCell(target);
4488 __ mov(ip, Operand(Handle<Object>(cell)));
4489 __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
4492 __ cmp(reg, Operand(target));
4494 DeoptimizeIf(ne, instr->environment());
4498 void LCodeGen::DoCheckMapCommon(Register reg,
4501 CompareMapMode mode,
4502 LEnvironment* env) {
4504 __ CompareMap(reg, scratch, map, &success, mode);
4505 DeoptimizeIf(ne, env);
4510 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4511 Register scratch = scratch0();
4512 LOperand* input = instr->InputAt(0);
4513 ASSERT(input->IsRegister());
4514 Register reg = ToRegister(input);
4517 SmallMapList* map_set = instr->hydrogen()->map_set();
4518 for (int i = 0; i < map_set->length() - 1; i++) {
4519 Handle<Map> map = map_set->at(i);
4520 __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
4523 Handle<Map> map = map_set->last();
4524 DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
4529 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4530 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4531 Register result_reg = ToRegister(instr->result());
4532 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4533 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4537 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4538 Register unclamped_reg = ToRegister(instr->unclamped());
4539 Register result_reg = ToRegister(instr->result());
4540 __ ClampUint8(result_reg, unclamped_reg);
4544 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4545 Register scratch = scratch0();
4546 Register input_reg = ToRegister(instr->unclamped());
4547 Register result_reg = ToRegister(instr->result());
4548 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4549 Label is_smi, done, heap_number;
4551 // Both smi and heap number cases are handled.
4552 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
4554 // Check for heap number
4555 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4556 __ cmp(scratch, Operand(factory()->heap_number_map()));
4557 __ b(eq, &heap_number);
4559 // Check for undefined. Undefined is converted to zero for clamping
4561 __ cmp(input_reg, Operand(factory()->undefined_value()));
4562 DeoptimizeIf(ne, instr->environment());
4563 __ mov(result_reg, Operand(0));
4567 __ bind(&heap_number);
4568 __ vldr(double_scratch0(), FieldMemOperand(input_reg,
4569 HeapNumber::kValueOffset));
4570 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4575 __ ClampUint8(result_reg, result_reg);
4581 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4582 Register temp1 = ToRegister(instr->TempAt(0));
4583 Register temp2 = ToRegister(instr->TempAt(1));
4585 Handle<JSObject> holder = instr->holder();
4586 Handle<JSObject> current_prototype = instr->prototype();
4588 // Load prototype object.
4589 __ LoadHeapObject(temp1, current_prototype);
4591 // Check prototype maps up to the holder.
4592 while (!current_prototype.is_identical_to(holder)) {
4593 DoCheckMapCommon(temp1, temp2,
4594 Handle<Map>(current_prototype->map()),
4595 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4597 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4598 // Load next prototype object.
4599 __ LoadHeapObject(temp1, current_prototype);
4602 // Check the holder map.
4603 DoCheckMapCommon(temp1, temp2,
4604 Handle<Map>(current_prototype->map()),
4605 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4606 DeoptimizeIf(ne, instr->environment());
4610 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4611 class DeferredAllocateObject: public LDeferredCode {
4613 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4614 : LDeferredCode(codegen), instr_(instr) { }
4615 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4616 virtual LInstruction* instr() { return instr_; }
4618 LAllocateObject* instr_;
4621 DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
4623 Register result = ToRegister(instr->result());
4624 Register scratch = ToRegister(instr->TempAt(0));
4625 Register scratch2 = ToRegister(instr->TempAt(1));
4626 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4627 Handle<Map> initial_map(constructor->initial_map());
4628 int instance_size = initial_map->instance_size();
4629 ASSERT(initial_map->pre_allocated_property_fields() +
4630 initial_map->unused_property_fields() -
4631 initial_map->inobject_properties() == 0);
4633 // Allocate memory for the object. The initial map might change when
4634 // the constructor's prototype changes, but instance size and property
4635 // counts remain unchanged (if slack tracking finished).
4636 ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4637 __ AllocateInNewSpace(instance_size,
4644 __ bind(deferred->exit());
4645 if (FLAG_debug_code) {
4646 Label is_in_new_space;
4647 __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4648 __ Abort("Allocated object is not in new-space");
4649 __ bind(&is_in_new_space);
4652 // Load the initial map.
4653 Register map = scratch;
4654 __ LoadHeapObject(map, constructor);
4655 __ ldr(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
4657 // Initialize map and fields of the newly allocated object.
4658 ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4659 __ str(map, FieldMemOperand(result, JSObject::kMapOffset));
4660 __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4661 __ str(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
4662 __ str(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
4663 if (initial_map->inobject_properties() != 0) {
4664 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4665 for (int i = 0; i < initial_map->inobject_properties(); i++) {
4666 int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4667 __ str(scratch, FieldMemOperand(result, property_offset));
4673 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4674 Register result = ToRegister(instr->result());
4675 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4676 Handle<Map> initial_map(constructor->initial_map());
4677 int instance_size = initial_map->instance_size();
4679 // TODO(3095996): Get rid of this. For now, we need to make the
4680 // result register contain a valid pointer because it is already
4681 // contained in the register pointer map.
4682 __ mov(result, Operand(0));
4684 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4685 __ mov(r0, Operand(Smi::FromInt(instance_size)));
4687 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
4688 __ StoreToSafepointRegisterSlot(r0, result);
4692 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4693 Heap* heap = isolate()->heap();
4694 ElementsKind boilerplate_elements_kind =
4695 instr->hydrogen()->boilerplate_elements_kind();
4697 // Deopt if the array literal boilerplate ElementsKind is of a type different
4698 // than the expected one. The check isn't necessary if the boilerplate has
4699 // already been converted to FAST_ELEMENTS.
4700 if (boilerplate_elements_kind != FAST_ELEMENTS) {
4701 __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
4702 // Load map into r2.
4703 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
4704 // Load the map's "bit field 2".
4705 __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
4706 // Retrieve elements_kind from bit field 2.
4707 __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
4708 __ cmp(r2, Operand(boilerplate_elements_kind));
4709 DeoptimizeIf(ne, instr->environment());
4712 __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4713 __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
4714 __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4715 // Boilerplate already exists, constant elements are never accessed.
4716 // Pass an empty fixed array.
4717 __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
4718 __ Push(r3, r2, r1);
4720 // Pick the right runtime function or stub to call.
4721 int length = instr->hydrogen()->length();
4722 if (instr->hydrogen()->IsCopyOnWrite()) {
4723 ASSERT(instr->hydrogen()->depth() == 1);
4724 FastCloneShallowArrayStub::Mode mode =
4725 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4726 FastCloneShallowArrayStub stub(mode, length);
4727 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4728 } else if (instr->hydrogen()->depth() > 1) {
4729 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4730 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4731 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4733 FastCloneShallowArrayStub::Mode mode =
4734 boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4735 ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
4736 : FastCloneShallowArrayStub::CLONE_ELEMENTS;
4737 FastCloneShallowArrayStub stub(mode, length);
4738 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4743 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4747 ASSERT(!source.is(r2));
4748 ASSERT(!result.is(r2));
4750 // Only elements backing stores for non-COW arrays need to be copied.
4751 Handle<FixedArrayBase> elements(object->elements());
4752 bool has_elements = elements->length() > 0 &&
4753 elements->map() != isolate()->heap()->fixed_cow_array_map();
4755 // Increase the offset so that subsequent objects end up right after
4756 // this object and its backing store.
4757 int object_offset = *offset;
4758 int object_size = object->map()->instance_size();
4759 int elements_offset = *offset + object_size;
4760 int elements_size = has_elements ? elements->Size() : 0;
4761 *offset += object_size + elements_size;
4763 // Copy object header.
4764 ASSERT(object->properties()->length() == 0);
4765 int inobject_properties = object->map()->inobject_properties();
4766 int header_size = object_size - inobject_properties * kPointerSize;
4767 for (int i = 0; i < header_size; i += kPointerSize) {
4768 if (has_elements && i == JSObject::kElementsOffset) {
4769 __ add(r2, result, Operand(elements_offset));
4771 __ ldr(r2, FieldMemOperand(source, i));
4773 __ str(r2, FieldMemOperand(result, object_offset + i));
4776 // Copy in-object properties.
4777 for (int i = 0; i < inobject_properties; i++) {
4778 int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4779 Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4780 if (value->IsJSObject()) {
4781 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4782 __ add(r2, result, Operand(*offset));
4783 __ str(r2, FieldMemOperand(result, total_offset));
4784 __ LoadHeapObject(source, value_object);
4785 EmitDeepCopy(value_object, result, source, offset);
4786 } else if (value->IsHeapObject()) {
4787 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
4788 __ str(r2, FieldMemOperand(result, total_offset));
4790 __ mov(r2, Operand(value));
4791 __ str(r2, FieldMemOperand(result, total_offset));
4796 // Copy elements backing store header.
4797 __ LoadHeapObject(source, elements);
4798 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4799 __ ldr(r2, FieldMemOperand(source, i));
4800 __ str(r2, FieldMemOperand(result, elements_offset + i));
4803 // Copy elements backing store content.
4804 int elements_length = has_elements ? elements->length() : 0;
4805 if (elements->IsFixedDoubleArray()) {
4806 Handle<FixedDoubleArray> double_array =
4807 Handle<FixedDoubleArray>::cast(elements);
4808 for (int i = 0; i < elements_length; i++) {
4809 int64_t value = double_array->get_representation(i);
4810 // We only support little endian mode...
4811 int32_t value_low = value & 0xFFFFFFFF;
4812 int32_t value_high = value >> 32;
4814 elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4815 __ mov(r2, Operand(value_low));
4816 __ str(r2, FieldMemOperand(result, total_offset));
4817 __ mov(r2, Operand(value_high));
4818 __ str(r2, FieldMemOperand(result, total_offset + 4));
4820 } else if (elements->IsFixedArray()) {
4821 Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
4822 for (int i = 0; i < elements_length; i++) {
4823 int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4824 Handle<Object> value(fast_elements->get(i));
4825 if (value->IsJSObject()) {
4826 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4827 __ add(r2, result, Operand(*offset));
4828 __ str(r2, FieldMemOperand(result, total_offset));
4829 __ LoadHeapObject(source, value_object);
4830 EmitDeepCopy(value_object, result, source, offset);
4831 } else if (value->IsHeapObject()) {
4832 __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
4833 __ str(r2, FieldMemOperand(result, total_offset));
4835 __ mov(r2, Operand(value));
4836 __ str(r2, FieldMemOperand(result, total_offset));
4846 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4847 int size = instr->hydrogen()->total_size();
4848 ElementsKind boilerplate_elements_kind =
4849 instr->hydrogen()->boilerplate()->GetElementsKind();
4851 // Deopt if the literal boilerplate ElementsKind is of a type different than
4852 // the expected one. The check isn't necessary if the boilerplate has already
4853 // been converted to FAST_ELEMENTS.
4854 if (boilerplate_elements_kind != FAST_ELEMENTS) {
4855 __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
4856 // Load map into r2.
4857 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
4858 // Load the map's "bit field 2".
4859 __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
4860 // Retrieve elements_kind from bit field 2.
4861 __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
4862 __ cmp(r2, Operand(boilerplate_elements_kind));
4863 DeoptimizeIf(ne, instr->environment());
4866 // Allocate all objects that are part of the literal in one big
4867 // allocation. This avoids multiple limit checks.
4868 Label allocated, runtime_allocate;
4869 __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
4872 __ bind(&runtime_allocate);
4873 __ mov(r0, Operand(Smi::FromInt(size)));
4875 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4877 __ bind(&allocated);
4879 __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
4880 EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
4881 ASSERT_EQ(size, offset);
4885 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4886 Handle<FixedArray> literals(instr->environment()->closure()->literals());
4887 Handle<FixedArray> constant_properties =
4888 instr->hydrogen()->constant_properties();
4890 // Set up the parameters to the stub/runtime call.
4891 __ LoadHeapObject(r4, literals);
4892 __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4893 __ mov(r2, Operand(constant_properties));
4894 int flags = instr->hydrogen()->fast_elements()
4895 ? ObjectLiteral::kFastElements
4896 : ObjectLiteral::kNoFlags;
4897 __ mov(r1, Operand(Smi::FromInt(flags)));
4898 __ Push(r4, r3, r2, r1);
4900 // Pick the right runtime function or stub to call.
4901 int properties_count = constant_properties->length() / 2;
4902 if (instr->hydrogen()->depth() > 1) {
4903 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4904 } else if (flags != ObjectLiteral::kFastElements ||
4905 properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
4906 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4908 FastCloneShallowObjectStub stub(properties_count);
4909 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4914 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4915 ASSERT(ToRegister(instr->InputAt(0)).is(r0));
4917 CallRuntime(Runtime::kToFastProperties, 1, instr);
4921 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4923 // Registers will be used as follows:
4924 // r3 = JS function.
4925 // r7 = literals array.
4926 // r1 = regexp literal.
4927 // r0 = regexp literal clone.
4928 // r2 and r4-r6 are used as temporaries.
4929 __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4930 __ ldr(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
4931 int literal_offset = FixedArray::kHeaderSize +
4932 instr->hydrogen()->literal_index() * kPointerSize;
4933 __ ldr(r1, FieldMemOperand(r7, literal_offset));
4934 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4936 __ b(ne, &materialized);
4938 // Create regexp literal using runtime function
4939 // Result will be in r0.
4940 __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4941 __ mov(r5, Operand(instr->hydrogen()->pattern()));
4942 __ mov(r4, Operand(instr->hydrogen()->flags()));
4943 __ Push(r7, r6, r5, r4);
4944 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4947 __ bind(&materialized);
4948 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4949 Label allocated, runtime_allocate;
4951 __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
4954 __ bind(&runtime_allocate);
4955 __ mov(r0, Operand(Smi::FromInt(size)));
4957 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4960 __ bind(&allocated);
4961 // Copy the content into the newly allocated memory.
4962 // (Unroll copy loop once for better throughput).
4963 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4964 __ ldr(r3, FieldMemOperand(r1, i));
4965 __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
4966 __ str(r3, FieldMemOperand(r0, i));
4967 __ str(r2, FieldMemOperand(r0, i + kPointerSize));
4969 if ((size % (2 * kPointerSize)) != 0) {
4970 __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
4971 __ str(r3, FieldMemOperand(r0, size - kPointerSize));
4976 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4977 // Use the fast case closure allocation code that allocates in new
4978 // space for nested functions that don't need literals cloning.
4979 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4980 bool pretenure = instr->hydrogen()->pretenure();
4981 if (!pretenure && shared_info->num_literals() == 0) {
4982 FastNewClosureStub stub(shared_info->language_mode());
4983 __ mov(r1, Operand(shared_info));
4985 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4987 __ mov(r2, Operand(shared_info));
4988 __ mov(r1, Operand(pretenure
4989 ? factory()->true_value()
4990 : factory()->false_value()));
4991 __ Push(cp, r2, r1);
4992 CallRuntime(Runtime::kNewClosure, 3, instr);
4997 void LCodeGen::DoTypeof(LTypeof* instr) {
4998 Register input = ToRegister(instr->InputAt(0));
5000 CallRuntime(Runtime::kTypeof, 1, instr);
5004 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5005 Register input = ToRegister(instr->InputAt(0));
5006 int true_block = chunk_->LookupDestination(instr->true_block_id());
5007 int false_block = chunk_->LookupDestination(instr->false_block_id());
5008 Label* true_label = chunk_->GetAssemblyLabel(true_block);
5009 Label* false_label = chunk_->GetAssemblyLabel(false_block);
5011 Condition final_branch_condition = EmitTypeofIs(true_label,
5014 instr->type_literal());
5015 if (final_branch_condition != kNoCondition) {
5016 EmitBranch(true_block, false_block, final_branch_condition);
5021 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5024 Handle<String> type_name) {
5025 Condition final_branch_condition = kNoCondition;
5026 Register scratch = scratch0();
5027 if (type_name->Equals(heap()->number_symbol())) {
5028 __ JumpIfSmi(input, true_label);
5029 __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
5030 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5031 __ cmp(input, Operand(ip));
5032 final_branch_condition = eq;
5034 } else if (type_name->Equals(heap()->string_symbol())) {
5035 __ JumpIfSmi(input, false_label);
5036 __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
5037 __ b(ge, false_label);
5038 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5039 __ tst(ip, Operand(1 << Map::kIsUndetectable));
5040 final_branch_condition = eq;
5042 } else if (type_name->Equals(heap()->boolean_symbol())) {
5043 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5044 __ b(eq, true_label);
5045 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5046 final_branch_condition = eq;
5048 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
5049 __ CompareRoot(input, Heap::kNullValueRootIndex);
5050 final_branch_condition = eq;
5052 } else if (type_name->Equals(heap()->undefined_symbol())) {
5053 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5054 __ b(eq, true_label);
5055 __ JumpIfSmi(input, false_label);
5056 // Check for undetectable objects => true.
5057 __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
5058 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5059 __ tst(ip, Operand(1 << Map::kIsUndetectable));
5060 final_branch_condition = ne;
5062 } else if (type_name->Equals(heap()->function_symbol())) {
5063 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5064 __ JumpIfSmi(input, false_label);
5065 __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
5066 __ b(eq, true_label);
5067 __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
5068 final_branch_condition = eq;
5070 } else if (type_name->Equals(heap()->object_symbol())) {
5071 __ JumpIfSmi(input, false_label);
5072 if (!FLAG_harmony_typeof) {
5073 __ CompareRoot(input, Heap::kNullValueRootIndex);
5074 __ b(eq, true_label);
5076 __ CompareObjectType(input, input, scratch,
5077 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
5078 __ b(lt, false_label);
5079 __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5080 __ b(gt, false_label);
5081 // Check for undetectable objects => false.
5082 __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5083 __ tst(ip, Operand(1 << Map::kIsUndetectable));
5084 final_branch_condition = eq;
5090 return final_branch_condition;
5094 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5095 Register temp1 = ToRegister(instr->TempAt(0));
5096 int true_block = chunk_->LookupDestination(instr->true_block_id());
5097 int false_block = chunk_->LookupDestination(instr->false_block_id());
5099 EmitIsConstructCall(temp1, scratch0());
5100 EmitBranch(true_block, false_block, eq);
5104 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5105 ASSERT(!temp1.is(temp2));
5106 // Get the frame pointer for the calling frame.
5107 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5109 // Skip the arguments adaptor frame if it exists.
5110 Label check_frame_marker;
5111 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5112 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5113 __ b(ne, &check_frame_marker);
5114 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5116 // Check the marker in the calling frame.
5117 __ bind(&check_frame_marker);
5118 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5119 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5123 void LCodeGen::EnsureSpaceForLazyDeopt() {
5124 // Ensure that we have enough space after the previous lazy-bailout
5125 // instruction for patching the code here.
5126 int current_pc = masm()->pc_offset();
5127 int patch_size = Deoptimizer::patch_size();
5128 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5129 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5130 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5131 while (padding_size > 0) {
5133 padding_size -= Assembler::kInstrSize;
5136 last_lazy_deopt_pc_ = masm()->pc_offset();
5140 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5141 EnsureSpaceForLazyDeopt();
5142 ASSERT(instr->HasEnvironment());
5143 LEnvironment* env = instr->environment();
5144 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5145 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5149 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5150 DeoptimizeIf(al, instr->environment());
5154 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
5155 Register object = ToRegister(instr->object());
5156 Register key = ToRegister(instr->key());
5157 Register strict = scratch0();
5158 __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
5159 __ Push(object, key, strict);
5160 ASSERT(instr->HasPointerMap());
5161 LPointerMap* pointers = instr->pointer_map();
5162 RecordPosition(pointers->position());
5163 SafepointGenerator safepoint_generator(
5164 this, pointers, Safepoint::kLazyDeopt);
5165 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
5169 void LCodeGen::DoIn(LIn* instr) {
5170 Register obj = ToRegister(instr->object());
5171 Register key = ToRegister(instr->key());
5173 ASSERT(instr->HasPointerMap());
5174 LPointerMap* pointers = instr->pointer_map();
5175 RecordPosition(pointers->position());
5176 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
5177 __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
5181 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5182 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5183 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5184 RecordSafepointWithLazyDeopt(
5185 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5186 ASSERT(instr->HasEnvironment());
5187 LEnvironment* env = instr->environment();
5188 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5192 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5193 class DeferredStackCheck: public LDeferredCode {
5195 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5196 : LDeferredCode(codegen), instr_(instr) { }
5197 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
5198 virtual LInstruction* instr() { return instr_; }
5200 LStackCheck* instr_;
5203 ASSERT(instr->HasEnvironment());
5204 LEnvironment* env = instr->environment();
5205 // There is no LLazyBailout instruction for stack-checks. We have to
5206 // prepare for lazy deoptimization explicitly here.
5207 if (instr->hydrogen()->is_function_entry()) {
5208 // Perform stack overflow check.
5210 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5211 __ cmp(sp, Operand(ip));
5213 StackCheckStub stub;
5214 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5215 EnsureSpaceForLazyDeopt();
5217 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5218 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5220 ASSERT(instr->hydrogen()->is_backwards_branch());
5221 // Perform stack overflow check if this goto needs it before jumping.
5222 DeferredStackCheck* deferred_stack_check =
5223 new DeferredStackCheck(this, instr);
5224 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5225 __ cmp(sp, Operand(ip));
5226 __ b(lo, deferred_stack_check->entry());
5227 EnsureSpaceForLazyDeopt();
5228 __ bind(instr->done_label());
5229 deferred_stack_check->SetExit(instr->done_label());
5230 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5231 // Don't record a deoptimization index for the safepoint here.
5232 // This will be done explicitly when emitting call and the safepoint in
5233 // the deferred code.
5238 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5239 // This is a pseudo-instruction that ensures that the environment here is
5240 // properly registered for deoptimization and records the assembler's PC
5242 LEnvironment* environment = instr->environment();
5243 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5244 instr->SpilledDoubleRegisterArray());
5246 // If the environment were already registered, we would have no way of
5247 // backpatching it with the spill slot operands.
5248 ASSERT(!environment->HasBeenRegistered());
5249 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5250 ASSERT(osr_pc_offset_ == -1);
5251 osr_pc_offset_ = masm()->pc_offset();
5255 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5256 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5258 DeoptimizeIf(eq, instr->environment());
5260 Register null_value = r5;
5261 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5262 __ cmp(r0, null_value);
5263 DeoptimizeIf(eq, instr->environment());
5265 __ tst(r0, Operand(kSmiTagMask));
5266 DeoptimizeIf(eq, instr->environment());
5268 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5269 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5270 DeoptimizeIf(le, instr->environment());
5272 Label use_cache, call_runtime;
5273 __ CheckEnumCache(null_value, &call_runtime);
5275 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5278 // Get the set of properties to enumerate.
5279 __ bind(&call_runtime);
5281 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5283 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
5284 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5286 DeoptimizeIf(ne, instr->environment());
5287 __ bind(&use_cache);
5291 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5292 Register map = ToRegister(instr->map());
5293 Register result = ToRegister(instr->result());
5294 __ LoadInstanceDescriptors(map, result);
5296 FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
5298 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5299 __ cmp(result, Operand(0));
5300 DeoptimizeIf(eq, instr->environment());
5304 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5305 Register object = ToRegister(instr->value());
5306 Register map = ToRegister(instr->map());
5307 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5308 __ cmp(map, scratch0());
5309 DeoptimizeIf(ne, instr->environment());
5313 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5314 Register object = ToRegister(instr->object());
5315 Register index = ToRegister(instr->index());
5316 Register result = ToRegister(instr->result());
5317 Register scratch = scratch0();
5319 Label out_of_object, done;
5320 __ cmp(index, Operand(0));
5321 __ b(lt, &out_of_object);
5323 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5324 __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5325 __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5329 __ bind(&out_of_object);
5330 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5331 // Index is equal to negated out of object property index plus 1.
5332 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5333 __ ldr(result, FieldMemOperand(scratch,
5334 FixedArray::kHeaderSize - kPointerSize));
5341 } } // namespace v8::internal