1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "mips/lithium-codegen-mips.h"
31 #include "mips/lithium-gap-resolver-mips.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
39 class SafepointGenerator : public CallWrapper {
41 SafepointGenerator(LCodeGen* codegen,
42 LPointerMap* pointers,
43 Safepoint::DeoptMode mode)
47 virtual ~SafepointGenerator() { }
49 virtual void BeforeCall(int call_size) const { }
51 virtual void AfterCall() const {
52 codegen_->RecordSafepoint(pointers_, deopt_mode_);
57 LPointerMap* pointers_;
58 Safepoint::DeoptMode deopt_mode_;
64 bool LCodeGen::GenerateCode() {
65 HPhase phase("Z_Code generation", chunk());
68 CpuFeatures::Scope scope(FPU);
70 CodeStub::GenerateFPStubs();
72 // Open a frame scope to indicate that there is a frame on the stack. The
73 // NONE indicates that the scope shouldn't actually generate code to set up
74 // the frame (that is done in GeneratePrologue).
75 FrameScope frame_scope(masm_, StackFrame::NONE);
77 return GeneratePrologue() &&
79 GenerateDeferredCode() &&
80 GenerateSafepointTable();
84 void LCodeGen::FinishCode(Handle<Code> code) {
86 code->set_stack_slots(GetStackSlotCount());
87 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
88 PopulateDeoptimizationData(code);
92 void LCodeGen::Abort(const char* format, ...) {
93 if (FLAG_trace_bailout) {
94 SmartArrayPointer<char> name(
95 info()->shared_info()->DebugName()->ToCString());
96 PrintF("Aborting LCodeGen in @\"%s\": ", *name);
98 va_start(arguments, format);
99 OS::VPrint(format, arguments);
107 void LCodeGen::Comment(const char* format, ...) {
108 if (!FLAG_code_comments) return;
110 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
112 va_start(arguments, format);
113 builder.AddFormattedList(format, arguments);
116 // Copy the string before recording it in the assembler to avoid
117 // issues when the stack allocated buffer goes out of scope.
118 size_t length = builder.position();
119 Vector<char> copy = Vector<char>::New(length + 1);
120 memcpy(copy.start(), builder.Finalize(), copy.length());
121 masm()->RecordComment(copy.start());
125 bool LCodeGen::GeneratePrologue() {
126 ASSERT(is_generating());
129 if (strlen(FLAG_stop_at) > 0 &&
130 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
135 // a1: Callee's JS function.
136 // cp: Callee's context.
137 // fp: Caller's frame pointer.
140 // Strict mode functions and builtins need to replace the receiver
141 // with undefined when called as functions (without an explicit
142 // receiver object). r5 is zero for method calls and non-zero for
144 if (!info_->is_classic_mode() || info_->is_native()) {
146 __ Branch(&ok, eq, t1, Operand(zero_reg));
148 int receiver_offset = scope()->num_parameters() * kPointerSize;
149 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
150 __ sw(a2, MemOperand(sp, receiver_offset));
154 __ Push(ra, fp, cp, a1);
155 __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
157 // Reserve space for the stack slots needed by the code.
158 int slots = GetStackSlotCount();
160 if (FLAG_debug_code) {
161 __ li(a0, Operand(slots));
162 __ li(a2, Operand(kSlotsZapValue));
167 __ Branch(&loop, ne, a0, Operand(zero_reg));
169 __ Subu(sp, sp, Operand(slots * kPointerSize));
173 // Possibly allocate a local context.
174 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
175 if (heap_slots > 0 ||
176 (scope()->is_qml_mode() && scope()->is_global_scope())) {
177 Comment(";;; Allocate local context");
178 // Argument to NewContext is the function, which is in a1.
180 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
181 FastNewContextStub stub((heap_slots < 0)?0:heap_slots);
184 __ CallRuntime(Runtime::kNewFunctionContext, 1);
186 RecordSafepoint(Safepoint::kNoLazyDeopt);
187 // Context is returned in both v0 and cp. It replaces the context
188 // passed to us. It's saved in the stack and kept live in cp.
189 __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
190 // Copy any necessary parameters into the context.
191 int num_parameters = scope()->num_parameters();
192 for (int i = 0; i < num_parameters; i++) {
193 Variable* var = scope()->parameter(i);
194 if (var->IsContextSlot()) {
195 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
196 (num_parameters - 1 - i) * kPointerSize;
197 // Load parameter from stack.
198 __ lw(a0, MemOperand(fp, parameter_offset));
199 // Store it in the context.
200 MemOperand target = ContextOperand(cp, var->index());
202 // Update the write barrier. This clobbers a3 and a0.
203 __ RecordWriteContextSlot(
204 cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
207 Comment(";;; End allocate local context");
212 __ CallRuntime(Runtime::kTraceEnter, 0);
214 EnsureSpaceForLazyDeopt();
215 return !is_aborted();
219 bool LCodeGen::GenerateBody() {
220 ASSERT(is_generating());
221 bool emit_instructions = true;
222 for (current_instruction_ = 0;
223 !is_aborted() && current_instruction_ < instructions_->length();
224 current_instruction_++) {
225 LInstruction* instr = instructions_->at(current_instruction_);
226 if (instr->IsLabel()) {
227 LLabel* label = LLabel::cast(instr);
228 emit_instructions = !label->HasReplacement();
231 if (emit_instructions) {
232 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
233 instr->CompileToNative(this);
236 return !is_aborted();
240 bool LCodeGen::GenerateDeferredCode() {
241 ASSERT(is_generating());
242 if (deferred_.length() > 0) {
243 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
244 LDeferredCode* code = deferred_[i];
245 __ bind(code->entry());
246 Comment(";;; Deferred code @%d: %s.",
247 code->instruction_index(),
248 code->instr()->Mnemonic());
250 __ jmp(code->exit());
253 // Deferred code is the last part of the instruction sequence. Mark
254 // the generated code as done unless we bailed out.
255 if (!is_aborted()) status_ = DONE;
256 return !is_aborted();
260 bool LCodeGen::GenerateDeoptJumpTable() {
261 // TODO(plind): not clear that this will have advantage for MIPS.
262 // Skipping it for now. Raised issue #100 for this.
263 Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
268 bool LCodeGen::GenerateSafepointTable() {
270 safepoints_.Emit(masm(), GetStackSlotCount());
271 return !is_aborted();
275 Register LCodeGen::ToRegister(int index) const {
276 return Register::FromAllocationIndex(index);
280 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
281 return DoubleRegister::FromAllocationIndex(index);
285 Register LCodeGen::ToRegister(LOperand* op) const {
286 ASSERT(op->IsRegister());
287 return ToRegister(op->index());
291 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
292 if (op->IsRegister()) {
293 return ToRegister(op->index());
294 } else if (op->IsConstantOperand()) {
295 LConstantOperand* const_op = LConstantOperand::cast(op);
296 Handle<Object> literal = chunk_->LookupLiteral(const_op);
297 Representation r = chunk_->LookupLiteralRepresentation(const_op);
298 if (r.IsInteger32()) {
299 ASSERT(literal->IsNumber());
300 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
301 } else if (r.IsDouble()) {
302 Abort("EmitLoadRegister: Unsupported double immediate.");
304 ASSERT(r.IsTagged());
305 if (literal->IsSmi()) {
306 __ li(scratch, Operand(literal));
308 __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
312 } else if (op->IsStackSlot() || op->IsArgument()) {
313 __ lw(scratch, ToMemOperand(op));
321 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
322 ASSERT(op->IsDoubleRegister());
323 return ToDoubleRegister(op->index());
327 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
328 FloatRegister flt_scratch,
329 DoubleRegister dbl_scratch) {
330 if (op->IsDoubleRegister()) {
331 return ToDoubleRegister(op->index());
332 } else if (op->IsConstantOperand()) {
333 LConstantOperand* const_op = LConstantOperand::cast(op);
334 Handle<Object> literal = chunk_->LookupLiteral(const_op);
335 Representation r = chunk_->LookupLiteralRepresentation(const_op);
336 if (r.IsInteger32()) {
337 ASSERT(literal->IsNumber());
338 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
339 __ mtc1(at, flt_scratch);
340 __ cvt_d_w(dbl_scratch, flt_scratch);
342 } else if (r.IsDouble()) {
343 Abort("unsupported double immediate");
344 } else if (r.IsTagged()) {
345 Abort("unsupported tagged immediate");
347 } else if (op->IsStackSlot() || op->IsArgument()) {
348 MemOperand mem_op = ToMemOperand(op);
349 __ ldc1(dbl_scratch, mem_op);
357 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
358 Handle<Object> literal = chunk_->LookupLiteral(op);
359 ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
364 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
365 return chunk_->LookupLiteralRepresentation(op).IsInteger32();
369 int LCodeGen::ToInteger32(LConstantOperand* op) const {
370 Handle<Object> value = chunk_->LookupLiteral(op);
371 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
372 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
374 return static_cast<int32_t>(value->Number());
378 double LCodeGen::ToDouble(LConstantOperand* op) const {
379 Handle<Object> value = chunk_->LookupLiteral(op);
380 return value->Number();
384 Operand LCodeGen::ToOperand(LOperand* op) {
385 if (op->IsConstantOperand()) {
386 LConstantOperand* const_op = LConstantOperand::cast(op);
387 Handle<Object> literal = chunk_->LookupLiteral(const_op);
388 Representation r = chunk_->LookupLiteralRepresentation(const_op);
389 if (r.IsInteger32()) {
390 ASSERT(literal->IsNumber());
391 return Operand(static_cast<int32_t>(literal->Number()));
392 } else if (r.IsDouble()) {
393 Abort("ToOperand Unsupported double immediate.");
395 ASSERT(r.IsTagged());
396 return Operand(literal);
397 } else if (op->IsRegister()) {
398 return Operand(ToRegister(op));
399 } else if (op->IsDoubleRegister()) {
400 Abort("ToOperand IsDoubleRegister unimplemented");
403 // Stack slots not implemented, use ToMemOperand instead.
409 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
410 ASSERT(!op->IsRegister());
411 ASSERT(!op->IsDoubleRegister());
412 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
413 int index = op->index();
415 // Local or spill slot. Skip the frame pointer, function, and
416 // context in the fixed part of the frame.
417 return MemOperand(fp, -(index + 3) * kPointerSize);
419 // Incoming parameter. Skip the return address.
420 return MemOperand(fp, -(index - 1) * kPointerSize);
425 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
426 ASSERT(op->IsDoubleStackSlot());
427 int index = op->index();
429 // Local or spill slot. Skip the frame pointer, function, context,
430 // and the first word of the double in the fixed part of the frame.
431 return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
433 // Incoming parameter. Skip the return address and the first word of
435 return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
440 void LCodeGen::WriteTranslation(LEnvironment* environment,
441 Translation* translation) {
442 if (environment == NULL) return;
444 // The translation includes one command per value in the environment.
445 int translation_size = environment->values()->length();
446 // The output frame height does not include the parameters.
447 int height = translation_size - environment->parameter_count();
449 WriteTranslation(environment->outer(), translation);
450 int closure_id = DefineDeoptimizationLiteral(environment->closure());
451 switch (environment->frame_type()) {
453 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
456 translation->BeginConstructStubFrame(closure_id, translation_size);
458 case ARGUMENTS_ADAPTOR:
459 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
464 for (int i = 0; i < translation_size; ++i) {
465 LOperand* value = environment->values()->at(i);
466 // spilled_registers_ and spilled_double_registers_ are either
467 // both NULL or both set.
468 if (environment->spilled_registers() != NULL && value != NULL) {
469 if (value->IsRegister() &&
470 environment->spilled_registers()[value->index()] != NULL) {
471 translation->MarkDuplicate();
472 AddToTranslation(translation,
473 environment->spilled_registers()[value->index()],
474 environment->HasTaggedValueAt(i));
476 value->IsDoubleRegister() &&
477 environment->spilled_double_registers()[value->index()] != NULL) {
478 translation->MarkDuplicate();
481 environment->spilled_double_registers()[value->index()],
486 AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
491 void LCodeGen::AddToTranslation(Translation* translation,
495 // TODO(twuerthinger): Introduce marker operands to indicate that this value
496 // is not present and must be reconstructed from the deoptimizer. Currently
497 // this is only used for the arguments object.
498 translation->StoreArgumentsObject();
499 } else if (op->IsStackSlot()) {
501 translation->StoreStackSlot(op->index());
503 translation->StoreInt32StackSlot(op->index());
505 } else if (op->IsDoubleStackSlot()) {
506 translation->StoreDoubleStackSlot(op->index());
507 } else if (op->IsArgument()) {
509 int src_index = GetStackSlotCount() + op->index();
510 translation->StoreStackSlot(src_index);
511 } else if (op->IsRegister()) {
512 Register reg = ToRegister(op);
514 translation->StoreRegister(reg);
516 translation->StoreInt32Register(reg);
518 } else if (op->IsDoubleRegister()) {
519 DoubleRegister reg = ToDoubleRegister(op);
520 translation->StoreDoubleRegister(reg);
521 } else if (op->IsConstantOperand()) {
522 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
523 int src_index = DefineDeoptimizationLiteral(literal);
524 translation->StoreLiteral(src_index);
531 void LCodeGen::CallCode(Handle<Code> code,
532 RelocInfo::Mode mode,
533 LInstruction* instr) {
534 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
538 void LCodeGen::CallCodeGeneric(Handle<Code> code,
539 RelocInfo::Mode mode,
541 SafepointMode safepoint_mode) {
542 ASSERT(instr != NULL);
543 LPointerMap* pointers = instr->pointer_map();
544 RecordPosition(pointers->position());
546 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
550 void LCodeGen::CallRuntime(const Runtime::Function* function,
552 LInstruction* instr) {
553 ASSERT(instr != NULL);
554 LPointerMap* pointers = instr->pointer_map();
555 ASSERT(pointers != NULL);
556 RecordPosition(pointers->position());
558 __ CallRuntime(function, num_arguments);
559 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
563 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
565 LInstruction* instr) {
566 __ CallRuntimeSaveDoubles(id);
567 RecordSafepointWithRegisters(
568 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
572 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
573 Safepoint::DeoptMode mode) {
574 if (!environment->HasBeenRegistered()) {
575 // Physical stack frame layout:
576 // -x ............. -4 0 ..................................... y
577 // [incoming arguments] [spill slots] [pushed outgoing arguments]
579 // Layout of the environment:
580 // 0 ..................................................... size-1
581 // [parameters] [locals] [expression stack including arguments]
583 // Layout of the translation:
584 // 0 ........................................................ size - 1 + 4
585 // [expression stack including arguments] [locals] [4 words] [parameters]
586 // |>------------ translation_size ------------<|
589 int jsframe_count = 0;
590 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
592 if (e->frame_type() == JS_FUNCTION) {
596 Translation translation(&translations_, frame_count, jsframe_count);
597 WriteTranslation(environment, &translation);
598 int deoptimization_index = deoptimizations_.length();
599 int pc_offset = masm()->pc_offset();
600 environment->Register(deoptimization_index,
602 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
603 deoptimizations_.Add(environment);
608 void LCodeGen::DeoptimizeIf(Condition cc,
609 LEnvironment* environment,
611 const Operand& src2) {
612 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
613 ASSERT(environment->HasBeenRegistered());
614 int id = environment->deoptimization_index();
615 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
617 Abort("bailout was not prepared");
621 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
623 if (FLAG_deopt_every_n_times == 1 &&
624 info_->shared_info()->opt_count() == id) {
625 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
629 if (FLAG_trap_on_deopt) {
632 __ Branch(&skip, NegateCondition(cc), src1, src2);
634 __ stop("trap_on_deopt");
638 // TODO(plind): The Arm port is a little different here, due to their
639 // DeOpt jump table, which is not used for Mips yet.
640 __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
644 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
645 int length = deoptimizations_.length();
646 if (length == 0) return;
647 Handle<DeoptimizationInputData> data =
648 factory()->NewDeoptimizationInputData(length, TENURED);
650 Handle<ByteArray> translations = translations_.CreateByteArray();
651 data->SetTranslationByteArray(*translations);
652 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
654 Handle<FixedArray> literals =
655 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
656 for (int i = 0; i < deoptimization_literals_.length(); i++) {
657 literals->set(i, *deoptimization_literals_[i]);
659 data->SetLiteralArray(*literals);
661 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
662 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
664 // Populate the deoptimization entries.
665 for (int i = 0; i < length; i++) {
666 LEnvironment* env = deoptimizations_[i];
667 data->SetAstId(i, Smi::FromInt(env->ast_id()));
668 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
669 data->SetArgumentsStackHeight(i,
670 Smi::FromInt(env->arguments_stack_height()));
671 data->SetPc(i, Smi::FromInt(env->pc_offset()));
673 code->set_deoptimization_data(*data);
677 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
678 int result = deoptimization_literals_.length();
679 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
680 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
682 deoptimization_literals_.Add(literal);
687 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
688 ASSERT(deoptimization_literals_.length() == 0);
690 const ZoneList<Handle<JSFunction> >* inlined_closures =
691 chunk()->inlined_closures();
693 for (int i = 0, length = inlined_closures->length();
696 DefineDeoptimizationLiteral(inlined_closures->at(i));
699 inlined_function_count_ = deoptimization_literals_.length();
703 void LCodeGen::RecordSafepointWithLazyDeopt(
704 LInstruction* instr, SafepointMode safepoint_mode) {
705 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
706 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
708 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
709 RecordSafepointWithRegisters(
710 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
715 void LCodeGen::RecordSafepoint(
716 LPointerMap* pointers,
717 Safepoint::Kind kind,
719 Safepoint::DeoptMode deopt_mode) {
720 ASSERT(expected_safepoint_kind_ == kind);
722 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
723 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
724 kind, arguments, deopt_mode);
725 for (int i = 0; i < operands->length(); i++) {
726 LOperand* pointer = operands->at(i);
727 if (pointer->IsStackSlot()) {
728 safepoint.DefinePointerSlot(pointer->index());
729 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
730 safepoint.DefinePointerRegister(ToRegister(pointer));
733 if (kind & Safepoint::kWithRegisters) {
734 // Register cp always contains a pointer to the context.
735 safepoint.DefinePointerRegister(cp);
740 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
741 Safepoint::DeoptMode deopt_mode) {
742 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
746 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
747 LPointerMap empty_pointers(RelocInfo::kNoPosition);
748 RecordSafepoint(&empty_pointers, deopt_mode);
752 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
754 Safepoint::DeoptMode deopt_mode) {
756 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
760 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
761 LPointerMap* pointers,
763 Safepoint::DeoptMode deopt_mode) {
765 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
769 void LCodeGen::RecordPosition(int position) {
770 if (position == RelocInfo::kNoPosition) return;
771 masm()->positions_recorder()->RecordPosition(position);
775 void LCodeGen::DoLabel(LLabel* label) {
776 if (label->is_loop_header()) {
777 Comment(";;; B%d - LOOP entry", label->block_id());
779 Comment(";;; B%d", label->block_id());
781 __ bind(label->label());
782 current_block_ = label->block_id();
787 void LCodeGen::DoParallelMove(LParallelMove* move) {
788 resolver_.Resolve(move);
792 void LCodeGen::DoGap(LGap* gap) {
793 for (int i = LGap::FIRST_INNER_POSITION;
794 i <= LGap::LAST_INNER_POSITION;
796 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
797 LParallelMove* move = gap->GetParallelMove(inner_pos);
798 if (move != NULL) DoParallelMove(move);
803 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
808 void LCodeGen::DoParameter(LParameter* instr) {
813 void LCodeGen::DoCallStub(LCallStub* instr) {
814 ASSERT(ToRegister(instr->result()).is(v0));
815 switch (instr->hydrogen()->major_key()) {
816 case CodeStub::RegExpConstructResult: {
817 RegExpConstructResultStub stub;
818 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
821 case CodeStub::RegExpExec: {
823 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
826 case CodeStub::SubString: {
828 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
831 case CodeStub::NumberToString: {
832 NumberToStringStub stub;
833 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
836 case CodeStub::StringAdd: {
837 StringAddStub stub(NO_STRING_ADD_FLAGS);
838 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
841 case CodeStub::StringCompare: {
842 StringCompareStub stub;
843 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
846 case CodeStub::TranscendentalCache: {
847 __ lw(a0, MemOperand(sp, 0));
848 TranscendentalCacheStub stub(instr->transcendental_type(),
849 TranscendentalCacheStub::TAGGED);
850 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
859 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
864 void LCodeGen::DoModI(LModI* instr) {
865 Register scratch = scratch0();
866 const Register left = ToRegister(instr->InputAt(0));
867 const Register result = ToRegister(instr->result());
871 if (instr->hydrogen()->HasPowerOf2Divisor()) {
872 Register scratch = scratch0();
873 ASSERT(!left.is(scratch));
874 __ mov(scratch, left);
875 int32_t p2constant = HConstant::cast(
876 instr->hydrogen()->right())->Integer32Value();
877 ASSERT(p2constant != 0);
878 // Result always takes the sign of the dividend (left).
879 p2constant = abs(p2constant);
881 Label positive_dividend;
882 __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
883 __ subu(result, zero_reg, left);
884 __ And(result, result, p2constant - 1);
885 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
886 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
888 __ Branch(USE_DELAY_SLOT, &done);
889 __ subu(result, zero_reg, result);
890 __ bind(&positive_dividend);
891 __ And(result, scratch, p2constant - 1);
893 // div runs in the background while we check for special cases.
894 Register right = EmitLoadRegister(instr->InputAt(1), scratch);
898 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
899 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
902 __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
905 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
906 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
913 void LCodeGen::DoDivI(LDivI* instr) {
914 const Register left = ToRegister(instr->InputAt(0));
915 const Register right = ToRegister(instr->InputAt(1));
916 const Register result = ToRegister(instr->result());
918 // On MIPS div is asynchronous - it will run in the background while we
919 // check for special cases.
923 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
924 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
927 // Check for (0 / -x) that will produce negative zero.
928 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
930 __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
931 DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
932 __ bind(&left_not_zero);
935 // Check for (-kMinInt / -1).
936 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
937 Label left_not_min_int;
938 __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
939 DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
940 __ bind(&left_not_min_int);
944 DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
949 void LCodeGen::DoMulI(LMulI* instr) {
950 Register scratch = scratch0();
951 Register result = ToRegister(instr->result());
952 // Note that result may alias left.
953 Register left = ToRegister(instr->InputAt(0));
954 LOperand* right_op = instr->InputAt(1);
956 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
957 bool bailout_on_minus_zero =
958 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
960 if (right_op->IsConstantOperand() && !can_overflow) {
961 // Use optimized code for specific constants.
962 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
964 if (bailout_on_minus_zero && (constant < 0)) {
965 // The case of a null constant will be handled separately.
966 // If constant is negative and left is null, the result should be -0.
967 DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
972 __ Subu(result, zero_reg, left);
975 if (bailout_on_minus_zero) {
976 // If left is strictly negative and the constant is null, the
977 // result is -0. Deoptimize if required, otherwise return 0.
978 DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
980 __ mov(result, zero_reg);
984 __ Move(result, left);
987 // Multiplying by powers of two and powers of two plus or minus
988 // one can be done faster with shifted operands.
989 // For other constants we emit standard code.
990 int32_t mask = constant >> 31;
991 uint32_t constant_abs = (constant + mask) ^ mask;
993 if (IsPowerOf2(constant_abs) ||
994 IsPowerOf2(constant_abs - 1) ||
995 IsPowerOf2(constant_abs + 1)) {
996 if (IsPowerOf2(constant_abs)) {
997 int32_t shift = WhichPowerOf2(constant_abs);
998 __ sll(result, left, shift);
999 } else if (IsPowerOf2(constant_abs - 1)) {
1000 int32_t shift = WhichPowerOf2(constant_abs - 1);
1001 __ sll(result, left, shift);
1002 __ Addu(result, result, left);
1003 } else if (IsPowerOf2(constant_abs + 1)) {
1004 int32_t shift = WhichPowerOf2(constant_abs + 1);
1005 __ sll(result, left, shift);
1006 __ Subu(result, result, left);
1009 // Correct the sign of the result is the constant is negative.
1011 __ Subu(result, zero_reg, result);
1015 // Generate standard code.
1016 __ li(at, constant);
1017 __ Mul(result, left, at);
1022 Register right = EmitLoadRegister(right_op, scratch);
1023 if (bailout_on_minus_zero) {
1024 __ Or(ToRegister(instr->TempAt(0)), left, right);
1028 // hi:lo = left * right.
1029 __ mult(left, right);
1032 __ sra(at, result, 31);
1033 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1035 __ Mul(result, left, right);
1038 if (bailout_on_minus_zero) {
1039 // Bail out if the result is supposed to be negative zero.
1041 __ Branch(&done, ne, result, Operand(zero_reg));
1043 instr->environment(),
1044 ToRegister(instr->TempAt(0)),
1052 void LCodeGen::DoBitI(LBitI* instr) {
1053 LOperand* left_op = instr->InputAt(0);
1054 LOperand* right_op = instr->InputAt(1);
1055 ASSERT(left_op->IsRegister());
1056 Register left = ToRegister(left_op);
1057 Register result = ToRegister(instr->result());
1058 Operand right(no_reg);
1060 if (right_op->IsStackSlot() || right_op->IsArgument()) {
1061 right = Operand(EmitLoadRegister(right_op, at));
1063 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1064 right = ToOperand(right_op);
1067 switch (instr->op()) {
1068 case Token::BIT_AND:
1069 __ And(result, left, right);
1072 __ Or(result, left, right);
1074 case Token::BIT_XOR:
1075 __ Xor(result, left, right);
1084 void LCodeGen::DoShiftI(LShiftI* instr) {
1085 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1086 // result may alias either of them.
1087 LOperand* right_op = instr->InputAt(1);
1088 Register left = ToRegister(instr->InputAt(0));
1089 Register result = ToRegister(instr->result());
1091 if (right_op->IsRegister()) {
1092 // No need to mask the right operand on MIPS, it is built into the variable
1093 // shift instructions.
1094 switch (instr->op()) {
1096 __ srav(result, left, ToRegister(right_op));
1099 __ srlv(result, left, ToRegister(right_op));
1100 if (instr->can_deopt()) {
1101 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1105 __ sllv(result, left, ToRegister(right_op));
1112 // Mask the right_op operand.
1113 int value = ToInteger32(LConstantOperand::cast(right_op));
1114 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1115 switch (instr->op()) {
1117 if (shift_count != 0) {
1118 __ sra(result, left, shift_count);
1120 __ Move(result, left);
1124 if (shift_count != 0) {
1125 __ srl(result, left, shift_count);
1127 if (instr->can_deopt()) {
1128 __ And(at, left, Operand(0x80000000));
1129 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1131 __ Move(result, left);
1135 if (shift_count != 0) {
1136 __ sll(result, left, shift_count);
1138 __ Move(result, left);
1149 void LCodeGen::DoSubI(LSubI* instr) {
1150 LOperand* left = instr->InputAt(0);
1151 LOperand* right = instr->InputAt(1);
1152 LOperand* result = instr->result();
1153 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1155 if (!can_overflow) {
1156 if (right->IsStackSlot() || right->IsArgument()) {
1157 Register right_reg = EmitLoadRegister(right, at);
1158 __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1160 ASSERT(right->IsRegister() || right->IsConstantOperand());
1161 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1163 } else { // can_overflow.
1164 Register overflow = scratch0();
1165 Register scratch = scratch1();
1166 if (right->IsStackSlot() ||
1167 right->IsArgument() ||
1168 right->IsConstantOperand()) {
1169 Register right_reg = EmitLoadRegister(right, scratch);
1170 __ SubuAndCheckForOverflow(ToRegister(result),
1173 overflow); // Reg at also used as scratch.
1175 ASSERT(right->IsRegister());
1176 // Due to overflow check macros not supporting constant operands,
1177 // handling the IsConstantOperand case was moved to prev if clause.
1178 __ SubuAndCheckForOverflow(ToRegister(result),
1181 overflow); // Reg at also used as scratch.
1183 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1188 void LCodeGen::DoConstantI(LConstantI* instr) {
1189 ASSERT(instr->result()->IsRegister());
1190 __ li(ToRegister(instr->result()), Operand(instr->value()));
1194 void LCodeGen::DoConstantD(LConstantD* instr) {
1195 ASSERT(instr->result()->IsDoubleRegister());
1196 DoubleRegister result = ToDoubleRegister(instr->result());
1197 double v = instr->value();
1202 void LCodeGen::DoConstantT(LConstantT* instr) {
1203 Handle<Object> value = instr->value();
1204 if (value->IsSmi()) {
1205 __ li(ToRegister(instr->result()), Operand(value));
1207 __ LoadHeapObject(ToRegister(instr->result()),
1208 Handle<HeapObject>::cast(value));
1213 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1214 Register result = ToRegister(instr->result());
1215 Register array = ToRegister(instr->InputAt(0));
1216 __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
1220 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1221 Register result = ToRegister(instr->result());
1222 Register array = ToRegister(instr->InputAt(0));
1223 __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1227 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1228 Register result = ToRegister(instr->result());
1229 Register input = ToRegister(instr->InputAt(0));
1231 // Load map into |result|.
1232 __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
1233 // Load the map's "bit field 2" into |result|. We only need the first byte,
1234 // but the following bit field extraction takes care of that anyway.
1235 __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
1236 // Retrieve elements_kind from bit field 2.
1237 __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1241 void LCodeGen::DoValueOf(LValueOf* instr) {
1242 Register input = ToRegister(instr->InputAt(0));
1243 Register result = ToRegister(instr->result());
1244 Register map = ToRegister(instr->TempAt(0));
1247 // If the object is a smi return the object.
1248 __ Move(result, input);
1249 __ JumpIfSmi(input, &done);
1251 // If the object is not a value type, return the object.
1252 __ GetObjectType(input, map, map);
1253 __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
1254 __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
1260 void LCodeGen::DoDateField(LDateField* instr) {
1261 Register object = ToRegister(instr->InputAt(0));
1262 Register result = ToRegister(instr->result());
1263 Register scratch = ToRegister(instr->TempAt(0));
1264 Smi* index = instr->index();
1265 Label runtime, done;
1266 ASSERT(object.is(a0));
1267 ASSERT(result.is(v0));
1268 ASSERT(!scratch.is(scratch0()));
1269 ASSERT(!scratch.is(object));
1272 __ AbortIfSmi(object);
1273 __ GetObjectType(object, scratch, scratch);
1274 __ Assert(eq, "Trying to get date field from non-date.",
1275 scratch, Operand(JS_DATE_TYPE));
1278 if (index->value() == 0) {
1279 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1281 if (index->value() < JSDate::kFirstUncachedField) {
1282 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1283 __ li(scratch, Operand(stamp));
1284 __ lw(scratch, MemOperand(scratch));
1285 __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1286 __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1287 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1288 kPointerSize * index->value()));
1292 __ PrepareCallCFunction(2, scratch);
1293 __ li(a1, Operand(index));
1294 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1300 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1301 Register input = ToRegister(instr->InputAt(0));
1302 Register result = ToRegister(instr->result());
1303 __ Nor(result, zero_reg, Operand(input));
1307 void LCodeGen::DoThrow(LThrow* instr) {
1308 Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1310 CallRuntime(Runtime::kThrow, 1, instr);
1312 if (FLAG_debug_code) {
1313 __ stop("Unreachable code.");
1318 void LCodeGen::DoAddI(LAddI* instr) {
1319 LOperand* left = instr->InputAt(0);
1320 LOperand* right = instr->InputAt(1);
1321 LOperand* result = instr->result();
1322 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1324 if (!can_overflow) {
1325 if (right->IsStackSlot() || right->IsArgument()) {
1326 Register right_reg = EmitLoadRegister(right, at);
1327 __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1329 ASSERT(right->IsRegister() || right->IsConstantOperand());
1330 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1332 } else { // can_overflow.
1333 Register overflow = scratch0();
1334 Register scratch = scratch1();
1335 if (right->IsStackSlot() ||
1336 right->IsArgument() ||
1337 right->IsConstantOperand()) {
1338 Register right_reg = EmitLoadRegister(right, scratch);
1339 __ AdduAndCheckForOverflow(ToRegister(result),
1342 overflow); // Reg at also used as scratch.
1344 ASSERT(right->IsRegister());
1345 // Due to overflow check macros not supporting constant operands,
1346 // handling the IsConstantOperand case was moved to prev if clause.
1347 __ AdduAndCheckForOverflow(ToRegister(result),
1350 overflow); // Reg at also used as scratch.
1352 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1357 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1358 DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1359 DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1360 DoubleRegister result = ToDoubleRegister(instr->result());
1361 switch (instr->op()) {
1363 __ add_d(result, left, right);
1366 __ sub_d(result, left, right);
1369 __ mul_d(result, left, right);
1372 __ div_d(result, left, right);
1375 // Save a0-a3 on the stack.
1376 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1377 __ MultiPush(saved_regs);
1379 __ PrepareCallCFunction(0, 2, scratch0());
1380 __ SetCallCDoubleArguments(left, right);
1382 ExternalReference::double_fp_operation(Token::MOD, isolate()),
1384 // Move the result in the double result register.
1385 __ GetCFunctionDoubleResult(result);
1387 // Restore saved register.
1388 __ MultiPop(saved_regs);
1398 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1399 ASSERT(ToRegister(instr->InputAt(0)).is(a1));
1400 ASSERT(ToRegister(instr->InputAt(1)).is(a0));
1401 ASSERT(ToRegister(instr->result()).is(v0));
1403 BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1404 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1405 // Other arch use a nop here, to signal that there is no inlined
1406 // patchable code. Mips does not need the nop, since our marker
1407 // instruction (andi zero_reg) will never be used in normal code.
1411 int LCodeGen::GetNextEmittedBlock(int block) {
1412 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1413 LLabel* label = chunk_->GetLabel(i);
1414 if (!label->HasReplacement()) return i;
1420 void LCodeGen::EmitBranch(int left_block, int right_block,
1421 Condition cc, Register src1, const Operand& src2) {
1422 int next_block = GetNextEmittedBlock(current_block_);
1423 right_block = chunk_->LookupDestination(right_block);
1424 left_block = chunk_->LookupDestination(left_block);
1425 if (right_block == left_block) {
1426 EmitGoto(left_block);
1427 } else if (left_block == next_block) {
1428 __ Branch(chunk_->GetAssemblyLabel(right_block),
1429 NegateCondition(cc), src1, src2);
1430 } else if (right_block == next_block) {
1431 __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1433 __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1434 __ Branch(chunk_->GetAssemblyLabel(right_block));
1439 void LCodeGen::EmitBranchF(int left_block, int right_block,
1440 Condition cc, FPURegister src1, FPURegister src2) {
1441 int next_block = GetNextEmittedBlock(current_block_);
1442 right_block = chunk_->LookupDestination(right_block);
1443 left_block = chunk_->LookupDestination(left_block);
1444 if (right_block == left_block) {
1445 EmitGoto(left_block);
1446 } else if (left_block == next_block) {
1447 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1448 NegateCondition(cc), src1, src2);
1449 } else if (right_block == next_block) {
1450 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1452 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1453 __ Branch(chunk_->GetAssemblyLabel(right_block));
1458 void LCodeGen::DoBranch(LBranch* instr) {
1459 int true_block = chunk_->LookupDestination(instr->true_block_id());
1460 int false_block = chunk_->LookupDestination(instr->false_block_id());
1462 Representation r = instr->hydrogen()->value()->representation();
1463 if (r.IsInteger32()) {
1464 Register reg = ToRegister(instr->InputAt(0));
1465 EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1466 } else if (r.IsDouble()) {
1467 DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1468 // Test the double value. Zero and NaN are false.
1469 EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
1471 ASSERT(r.IsTagged());
1472 Register reg = ToRegister(instr->InputAt(0));
1473 HType type = instr->hydrogen()->value()->type();
1474 if (type.IsBoolean()) {
1475 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1476 EmitBranch(true_block, false_block, eq, reg, Operand(at));
1477 } else if (type.IsSmi()) {
1478 EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1480 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1481 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1483 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1484 // Avoid deopts in the case where we've never executed this path before.
1485 if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1487 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1488 // undefined -> false.
1489 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1490 __ Branch(false_label, eq, reg, Operand(at));
1492 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1493 // Boolean -> its value.
1494 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1495 __ Branch(true_label, eq, reg, Operand(at));
1496 __ LoadRoot(at, Heap::kFalseValueRootIndex);
1497 __ Branch(false_label, eq, reg, Operand(at));
1499 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1501 __ LoadRoot(at, Heap::kNullValueRootIndex);
1502 __ Branch(false_label, eq, reg, Operand(at));
1505 if (expected.Contains(ToBooleanStub::SMI)) {
1506 // Smis: 0 -> false, all other -> true.
1507 __ Branch(false_label, eq, reg, Operand(zero_reg));
1508 __ JumpIfSmi(reg, true_label);
1509 } else if (expected.NeedsMap()) {
1510 // If we need a map later and have a Smi -> deopt.
1511 __ And(at, reg, Operand(kSmiTagMask));
1512 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1515 const Register map = scratch0();
1516 if (expected.NeedsMap()) {
1517 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1518 if (expected.CanBeUndetectable()) {
1519 // Undetectable -> false.
1520 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1521 __ And(at, at, Operand(1 << Map::kIsUndetectable));
1522 __ Branch(false_label, ne, at, Operand(zero_reg));
1526 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1527 // spec object -> true.
1528 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1529 __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1532 if (expected.Contains(ToBooleanStub::STRING)) {
1533 // String value -> false iff empty.
1535 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1536 __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
1537 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
1538 __ Branch(true_label, ne, at, Operand(zero_reg));
1539 __ Branch(false_label);
1540 __ bind(¬_string);
1543 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1544 // heap number -> false iff +0, -0, or NaN.
1545 DoubleRegister dbl_scratch = double_scratch0();
1546 Label not_heap_number;
1547 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1548 __ Branch(¬_heap_number, ne, map, Operand(at));
1549 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1550 __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
1551 // Falls through if dbl_scratch == 0.
1552 __ Branch(false_label);
1553 __ bind(¬_heap_number);
1556 // We've seen something for the first time -> deopt.
1557 DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
1563 void LCodeGen::EmitGoto(int block) {
1564 block = chunk_->LookupDestination(block);
1565 int next_block = GetNextEmittedBlock(current_block_);
1566 if (block != next_block) {
1567 __ jmp(chunk_->GetAssemblyLabel(block));
1572 void LCodeGen::DoGoto(LGoto* instr) {
1573 EmitGoto(instr->block_id());
1577 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1578 Condition cond = kNoCondition;
1581 case Token::EQ_STRICT:
1585 cond = is_unsigned ? lo : lt;
1588 cond = is_unsigned ? hi : gt;
1591 cond = is_unsigned ? ls : le;
1594 cond = is_unsigned ? hs : ge;
1597 case Token::INSTANCEOF:
1605 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1606 LOperand* left = instr->InputAt(0);
1607 LOperand* right = instr->InputAt(1);
1608 int false_block = chunk_->LookupDestination(instr->false_block_id());
1609 int true_block = chunk_->LookupDestination(instr->true_block_id());
1611 Condition cond = TokenToCondition(instr->op(), false);
1613 if (left->IsConstantOperand() && right->IsConstantOperand()) {
1614 // We can statically evaluate the comparison.
1615 double left_val = ToDouble(LConstantOperand::cast(left));
1616 double right_val = ToDouble(LConstantOperand::cast(right));
1618 EvalComparison(instr->op(), left_val, right_val) ? true_block
1620 EmitGoto(next_block);
1622 if (instr->is_double()) {
1623 // Compare left and right as doubles and load the
1624 // resulting flags into the normal status register.
1625 FPURegister left_reg = ToDoubleRegister(left);
1626 FPURegister right_reg = ToDoubleRegister(right);
1628 // If a NaN is involved, i.e. the result is unordered,
1629 // jump to false block label.
1630 __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
1631 left_reg, right_reg);
1633 EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
1636 Operand cmp_right = Operand(0);
1638 if (right->IsConstantOperand()) {
1639 cmp_left = ToRegister(left);
1640 cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
1641 } else if (left->IsConstantOperand()) {
1642 cmp_left = ToRegister(right);
1643 cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
1644 // We transposed the operands. Reverse the condition.
1645 cond = ReverseCondition(cond);
1647 cmp_left = ToRegister(left);
1648 cmp_right = Operand(ToRegister(right));
1651 EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
1657 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1658 Register left = ToRegister(instr->InputAt(0));
1659 Register right = ToRegister(instr->InputAt(1));
1660 int false_block = chunk_->LookupDestination(instr->false_block_id());
1661 int true_block = chunk_->LookupDestination(instr->true_block_id());
1663 EmitBranch(true_block, false_block, eq, left, Operand(right));
1667 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1668 Register left = ToRegister(instr->InputAt(0));
1669 int true_block = chunk_->LookupDestination(instr->true_block_id());
1670 int false_block = chunk_->LookupDestination(instr->false_block_id());
1672 EmitBranch(true_block, false_block, eq, left,
1673 Operand(instr->hydrogen()->right()));
1678 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1679 Register scratch = scratch0();
1680 Register reg = ToRegister(instr->InputAt(0));
1681 int false_block = chunk_->LookupDestination(instr->false_block_id());
1683 // If the expression is known to be untagged or a smi, then it's definitely
1684 // not null, and it can't be a an undetectable object.
1685 if (instr->hydrogen()->representation().IsSpecialization() ||
1686 instr->hydrogen()->type().IsSmi()) {
1687 EmitGoto(false_block);
1691 int true_block = chunk_->LookupDestination(instr->true_block_id());
1693 Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1694 Heap::kNullValueRootIndex :
1695 Heap::kUndefinedValueRootIndex;
1696 __ LoadRoot(at, nil_value);
1697 if (instr->kind() == kStrictEquality) {
1698 EmitBranch(true_block, false_block, eq, reg, Operand(at));
1700 Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1701 Heap::kUndefinedValueRootIndex :
1702 Heap::kNullValueRootIndex;
1703 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1704 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1705 __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1706 __ LoadRoot(at, other_nil_value); // In the delay slot.
1707 __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1708 __ JumpIfSmi(reg, false_label); // In the delay slot.
1709 // Check for undetectable objects by looking in the bit field in
1710 // the map. The object has already been smi checked.
1711 __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1712 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1713 __ And(scratch, scratch, 1 << Map::kIsUndetectable);
1714 EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
1719 Condition LCodeGen::EmitIsObject(Register input,
1722 Label* is_not_object,
1724 __ JumpIfSmi(input, is_not_object);
1726 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1727 __ Branch(is_object, eq, input, Operand(temp2));
1730 __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1731 // Undetectable objects behave like undefined.
1732 __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1733 __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
1734 __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
1736 // Load instance type and check that it is in object type range.
1737 __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
1738 __ Branch(is_not_object,
1739 lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1745 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1746 Register reg = ToRegister(instr->InputAt(0));
1747 Register temp1 = ToRegister(instr->TempAt(0));
1748 Register temp2 = scratch0();
1750 int true_block = chunk_->LookupDestination(instr->true_block_id());
1751 int false_block = chunk_->LookupDestination(instr->false_block_id());
1752 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1753 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1755 Condition true_cond =
1756 EmitIsObject(reg, temp1, temp2, false_label, true_label);
1758 EmitBranch(true_block, false_block, true_cond, temp2,
1759 Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1763 Condition LCodeGen::EmitIsString(Register input,
1765 Label* is_not_string) {
1766 __ JumpIfSmi(input, is_not_string);
1767 __ GetObjectType(input, temp1, temp1);
1773 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1774 Register reg = ToRegister(instr->InputAt(0));
1775 Register temp1 = ToRegister(instr->TempAt(0));
1777 int true_block = chunk_->LookupDestination(instr->true_block_id());
1778 int false_block = chunk_->LookupDestination(instr->false_block_id());
1779 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1781 Condition true_cond =
1782 EmitIsString(reg, temp1, false_label);
1784 EmitBranch(true_block, false_block, true_cond, temp1,
1785 Operand(FIRST_NONSTRING_TYPE));
1789 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1790 int true_block = chunk_->LookupDestination(instr->true_block_id());
1791 int false_block = chunk_->LookupDestination(instr->false_block_id());
1793 Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1794 __ And(at, input_reg, kSmiTagMask);
1795 EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1799 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1800 Register input = ToRegister(instr->InputAt(0));
1801 Register temp = ToRegister(instr->TempAt(0));
1803 int true_block = chunk_->LookupDestination(instr->true_block_id());
1804 int false_block = chunk_->LookupDestination(instr->false_block_id());
1806 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1807 __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
1808 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1809 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
1810 EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
1814 static Condition ComputeCompareCondition(Token::Value op) {
1816 case Token::EQ_STRICT:
1829 return kNoCondition;
1834 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1835 Token::Value op = instr->op();
1836 int true_block = chunk_->LookupDestination(instr->true_block_id());
1837 int false_block = chunk_->LookupDestination(instr->false_block_id());
1839 Handle<Code> ic = CompareIC::GetUninitialized(op);
1840 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1842 Condition condition = ComputeCompareCondition(op);
1844 EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
1848 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1849 InstanceType from = instr->from();
1850 InstanceType to = instr->to();
1851 if (from == FIRST_TYPE) return to;
1852 ASSERT(from == to || to == LAST_TYPE);
1857 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1858 InstanceType from = instr->from();
1859 InstanceType to = instr->to();
1860 if (from == to) return eq;
1861 if (to == LAST_TYPE) return hs;
1862 if (from == FIRST_TYPE) return ls;
1868 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1869 Register scratch = scratch0();
1870 Register input = ToRegister(instr->InputAt(0));
1872 int true_block = chunk_->LookupDestination(instr->true_block_id());
1873 int false_block = chunk_->LookupDestination(instr->false_block_id());
1875 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1877 __ JumpIfSmi(input, false_label);
1879 __ GetObjectType(input, scratch, scratch);
1880 EmitBranch(true_block,
1882 BranchCondition(instr->hydrogen()),
1884 Operand(TestType(instr->hydrogen())));
1888 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1889 Register input = ToRegister(instr->InputAt(0));
1890 Register result = ToRegister(instr->result());
1892 if (FLAG_debug_code) {
1893 __ AbortIfNotString(input);
1896 __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
1897 __ IndexFromHash(result, result);
1901 void LCodeGen::DoHasCachedArrayIndexAndBranch(
1902 LHasCachedArrayIndexAndBranch* instr) {
1903 Register input = ToRegister(instr->InputAt(0));
1904 Register scratch = scratch0();
1906 int true_block = chunk_->LookupDestination(instr->true_block_id());
1907 int false_block = chunk_->LookupDestination(instr->false_block_id());
1910 FieldMemOperand(input, String::kHashFieldOffset));
1911 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
1912 EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1916 // Branches to a label or falls through with the answer in flags. Trashes
1917 // the temp registers, but not the input.
1918 void LCodeGen::EmitClassOfTest(Label* is_true,
1920 Handle<String>class_name,
1924 ASSERT(!input.is(temp));
1925 ASSERT(!input.is(temp2));
1926 ASSERT(!temp.is(temp2));
1928 __ JumpIfSmi(input, is_false);
1930 if (class_name->IsEqualTo(CStrVector("Function"))) {
1931 // Assuming the following assertions, we can use the same compares to test
1932 // for both being a function type and being in the object type range.
1933 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
1934 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1935 FIRST_SPEC_OBJECT_TYPE + 1);
1936 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1937 LAST_SPEC_OBJECT_TYPE - 1);
1938 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
1940 __ GetObjectType(input, temp, temp2);
1941 __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1942 __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1943 __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
1945 // Faster code path to avoid two compares: subtract lower bound from the
1946 // actual type and do a signed compare with the width of the type range.
1947 __ GetObjectType(input, temp, temp2);
1948 __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1949 __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
1950 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1953 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
1954 // Check if the constructor in the map is a function.
1955 __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
1957 // Objects with a non-function constructor have class 'Object'.
1958 __ GetObjectType(temp, temp2, temp2);
1959 if (class_name->IsEqualTo(CStrVector("Object"))) {
1960 __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
1962 __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
1965 // temp now contains the constructor function. Grab the
1966 // instance class name from there.
1967 __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1968 __ lw(temp, FieldMemOperand(temp,
1969 SharedFunctionInfo::kInstanceClassNameOffset));
1970 // The class name we are testing against is a symbol because it's a literal.
1971 // The name in the constructor is a symbol because of the way the context is
1972 // booted. This routine isn't expected to work for random API-created
1973 // classes and it doesn't have to because you can't access it with natives
1974 // syntax. Since both sides are symbols it is sufficient to use an identity
1977 // End with the address of this class_name instance in temp register.
1978 // On MIPS, the caller must do the comparison with Handle<String>class_name.
1982 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1983 Register input = ToRegister(instr->InputAt(0));
1984 Register temp = scratch0();
1985 Register temp2 = ToRegister(instr->TempAt(0));
1986 Handle<String> class_name = instr->hydrogen()->class_name();
1988 int true_block = chunk_->LookupDestination(instr->true_block_id());
1989 int false_block = chunk_->LookupDestination(instr->false_block_id());
1991 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1992 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1994 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1996 EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
2000 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2001 Register reg = ToRegister(instr->InputAt(0));
2002 Register temp = ToRegister(instr->TempAt(0));
2003 int true_block = instr->true_block_id();
2004 int false_block = instr->false_block_id();
2006 __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2007 EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
2011 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2012 Label true_label, done;
2013 ASSERT(ToRegister(instr->InputAt(0)).is(a0)); // Object is in a0.
2014 ASSERT(ToRegister(instr->InputAt(1)).is(a1)); // Function is in a1.
2015 Register result = ToRegister(instr->result());
2016 ASSERT(result.is(v0));
2018 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2019 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2021 __ Branch(&true_label, eq, result, Operand(zero_reg));
2022 __ li(result, Operand(factory()->false_value()));
2024 __ bind(&true_label);
2025 __ li(result, Operand(factory()->true_value()));
2030 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2031 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2033 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2034 LInstanceOfKnownGlobal* instr)
2035 : LDeferredCode(codegen), instr_(instr) { }
2036 virtual void Generate() {
2037 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2039 virtual LInstruction* instr() { return instr_; }
2040 Label* map_check() { return &map_check_; }
2043 LInstanceOfKnownGlobal* instr_;
2047 DeferredInstanceOfKnownGlobal* deferred;
2048 deferred = new DeferredInstanceOfKnownGlobal(this, instr);
2050 Label done, false_result;
2051 Register object = ToRegister(instr->InputAt(0));
2052 Register temp = ToRegister(instr->TempAt(0));
2053 Register result = ToRegister(instr->result());
2055 ASSERT(object.is(a0));
2056 ASSERT(result.is(v0));
2058 // A Smi is not instance of anything.
2059 __ JumpIfSmi(object, &false_result);
2061 // This is the inlined call site instanceof cache. The two occurences of the
2062 // hole value will be patched to the last map/result pair generated by the
2065 Register map = temp;
2066 __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2068 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2069 __ bind(deferred->map_check()); // Label for calculating code patching.
2070 // We use Factory::the_hole_value() on purpose instead of loading from the
2071 // root array to force relocation to be able to later patch with
2073 Handle<JSGlobalPropertyCell> cell =
2074 factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2075 __ li(at, Operand(Handle<Object>(cell)));
2076 __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
2077 __ Branch(&cache_miss, ne, map, Operand(at));
2078 // We use Factory::the_hole_value() on purpose instead of loading from the
2079 // root array to force relocation to be able to later patch
2080 // with true or false.
2081 __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2084 // The inlined call site cache did not match. Check null and string before
2085 // calling the deferred code.
2086 __ bind(&cache_miss);
2087 // Null is not instance of anything.
2088 __ LoadRoot(temp, Heap::kNullValueRootIndex);
2089 __ Branch(&false_result, eq, object, Operand(temp));
2091 // String values is not instance of anything.
2092 Condition cc = __ IsObjectStringType(object, temp, temp);
2093 __ Branch(&false_result, cc, temp, Operand(zero_reg));
2095 // Go to the deferred code.
2096 __ Branch(deferred->entry());
2098 __ bind(&false_result);
2099 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2101 // Here result has either true or false. Deferred code also produces true or
2103 __ bind(deferred->exit());
2108 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2110 Register result = ToRegister(instr->result());
2111 ASSERT(result.is(v0));
2113 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2114 flags = static_cast<InstanceofStub::Flags>(
2115 flags | InstanceofStub::kArgsInRegisters);
2116 flags = static_cast<InstanceofStub::Flags>(
2117 flags | InstanceofStub::kCallSiteInlineCheck);
2118 flags = static_cast<InstanceofStub::Flags>(
2119 flags | InstanceofStub::kReturnTrueFalseObject);
2120 InstanceofStub stub(flags);
2122 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2124 // Get the temp register reserved by the instruction. This needs to be t0 as
2125 // its slot of the pushing of safepoint registers is used to communicate the
2126 // offset to the location of the map check.
2127 Register temp = ToRegister(instr->TempAt(0));
2128 ASSERT(temp.is(t0));
2129 __ LoadHeapObject(InstanceofStub::right(), instr->function());
2130 static const int kAdditionalDelta = 7;
2131 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2132 Label before_push_delta;
2133 __ bind(&before_push_delta);
2135 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2136 __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2137 __ StoreToSafepointRegisterSlot(temp, temp);
2139 CallCodeGeneric(stub.GetCode(),
2140 RelocInfo::CODE_TARGET,
2142 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2143 ASSERT(instr->HasDeoptimizationEnvironment());
2144 LEnvironment* env = instr->deoptimization_environment();
2145 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2146 // Put the result value into the result register slot and
2147 // restore all registers.
2148 __ StoreToSafepointRegisterSlot(result, result);
2152 void LCodeGen::DoCmpT(LCmpT* instr) {
2153 Token::Value op = instr->op();
2155 Handle<Code> ic = CompareIC::GetUninitialized(op);
2156 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2157 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2159 Condition condition = ComputeCompareCondition(op);
2160 // A minor optimization that relies on LoadRoot always emitting one
2162 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2164 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2165 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2166 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2167 ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
2172 void LCodeGen::DoReturn(LReturn* instr) {
2174 // Push the return value on the stack as the parameter.
2175 // Runtime::TraceExit returns its parameter in v0.
2177 __ CallRuntime(Runtime::kTraceExit, 1);
2179 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2182 __ Addu(sp, sp, Operand(sp_delta));
2187 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2188 Register result = ToRegister(instr->result());
2189 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
2190 __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
2191 if (instr->hydrogen()->RequiresHoleCheck()) {
2192 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2193 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2198 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2199 ASSERT(ToRegister(instr->global_object()).is(a0));
2200 ASSERT(ToRegister(instr->result()).is(v0));
2202 __ li(a2, Operand(instr->name()));
2203 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2204 : RelocInfo::CODE_TARGET_CONTEXT;
2205 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2206 CallCode(ic, mode, instr);
2210 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2211 Register value = ToRegister(instr->value());
2212 Register cell = scratch0();
2215 __ li(cell, Operand(instr->hydrogen()->cell()));
2217 // If the cell we are storing to contains the hole it could have
2218 // been deleted from the property dictionary. In that case, we need
2219 // to update the property details in the property dictionary to mark
2220 // it as no longer deleted.
2221 if (instr->hydrogen()->RequiresHoleCheck()) {
2222 // We use a temp to check the payload.
2223 Register payload = ToRegister(instr->TempAt(0));
2224 __ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2225 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2226 DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
2230 __ sw(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2231 // Cells are always rescanned, so no write barrier here.
2235 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2236 ASSERT(ToRegister(instr->global_object()).is(a1));
2237 ASSERT(ToRegister(instr->value()).is(a0));
2239 __ li(a2, Operand(instr->name()));
2240 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2241 ? isolate()->builtins()->StoreIC_Initialize_Strict()
2242 : isolate()->builtins()->StoreIC_Initialize();
2243 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2247 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2248 Register context = ToRegister(instr->context());
2249 Register result = ToRegister(instr->result());
2251 __ lw(result, ContextOperand(context, instr->slot_index()));
2252 if (instr->hydrogen()->RequiresHoleCheck()) {
2253 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2255 if (instr->hydrogen()->DeoptimizesOnHole()) {
2256 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2259 __ Branch(&is_not_hole, ne, result, Operand(at));
2260 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2261 __ bind(&is_not_hole);
2267 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2268 Register context = ToRegister(instr->context());
2269 Register value = ToRegister(instr->value());
2270 Register scratch = scratch0();
2271 MemOperand target = ContextOperand(context, instr->slot_index());
2273 Label skip_assignment;
2275 if (instr->hydrogen()->RequiresHoleCheck()) {
2276 __ lw(scratch, target);
2277 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2279 if (instr->hydrogen()->DeoptimizesOnHole()) {
2280 DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
2282 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2286 __ sw(value, target);
2287 if (instr->hydrogen()->NeedsWriteBarrier()) {
2288 HType type = instr->hydrogen()->value()->type();
2289 SmiCheck check_needed =
2290 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2291 __ RecordWriteContextSlot(context,
2297 EMIT_REMEMBERED_SET,
2301 __ bind(&skip_assignment);
2305 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2306 Register object = ToRegister(instr->InputAt(0));
2307 Register result = ToRegister(instr->result());
2308 if (instr->hydrogen()->is_in_object()) {
2309 __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2311 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2312 __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2317 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2320 Handle<String> name) {
2321 LookupResult lookup(isolate());
2322 type->LookupInDescriptors(NULL, *name, &lookup);
2323 ASSERT(lookup.IsFound() &&
2324 (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2325 if (lookup.type() == FIELD) {
2326 int index = lookup.GetLocalFieldIndexFromMap(*type);
2327 int offset = index * kPointerSize;
2329 // Negative property indices are in-object properties, indexed
2330 // from the end of the fixed part of the object.
2331 __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
2333 // Non-negative property indices are in the properties array.
2334 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2335 __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2338 Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2339 __ LoadHeapObject(result, function);
2344 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2345 Register object = ToRegister(instr->object());
2346 Register result = ToRegister(instr->result());
2347 Register scratch = scratch0();
2348 int map_count = instr->hydrogen()->types()->length();
2349 Handle<String> name = instr->hydrogen()->name();
2350 if (map_count == 0) {
2351 ASSERT(instr->hydrogen()->need_generic());
2352 __ li(a2, Operand(name));
2353 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2354 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2357 __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2358 for (int i = 0; i < map_count - 1; ++i) {
2359 Handle<Map> map = instr->hydrogen()->types()->at(i);
2361 __ Branch(&next, ne, scratch, Operand(map));
2362 EmitLoadFieldOrConstantFunction(result, object, map, name);
2366 Handle<Map> map = instr->hydrogen()->types()->last();
2367 if (instr->hydrogen()->need_generic()) {
2369 __ Branch(&generic, ne, scratch, Operand(map));
2370 EmitLoadFieldOrConstantFunction(result, object, map, name);
2373 __ li(a2, Operand(name));
2374 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2375 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2377 DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
2378 EmitLoadFieldOrConstantFunction(result, object, map, name);
2385 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2386 ASSERT(ToRegister(instr->object()).is(a0));
2387 ASSERT(ToRegister(instr->result()).is(v0));
2389 // Name is always in a2.
2390 __ li(a2, Operand(instr->name()));
2391 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2392 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2396 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2397 Register scratch = scratch0();
2398 Register function = ToRegister(instr->function());
2399 Register result = ToRegister(instr->result());
2401 // Check that the function really is a function. Load map into the
2403 __ GetObjectType(function, result, scratch);
2404 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2406 // Make sure that the function has an instance prototype.
2408 __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2409 __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2410 __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2412 // Get the prototype or initial map from the function.
2414 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2416 // Check that the function has a prototype or an initial map.
2417 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2418 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2420 // If the function does not have an initial map, we're done.
2422 __ GetObjectType(result, scratch, scratch);
2423 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2425 // Get the prototype from the initial map.
2426 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2429 // Non-instance prototype: Fetch prototype from constructor field
2431 __ bind(&non_instance);
2432 __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2439 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2440 Register result = ToRegister(instr->result());
2441 Register input = ToRegister(instr->InputAt(0));
2442 Register scratch = scratch0();
2444 __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
2445 if (FLAG_debug_code) {
2447 __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2448 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2449 __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
2450 __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
2451 __ Branch(&done, eq, scratch, Operand(at));
2452 // |scratch| still contains |input|'s map.
2453 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2454 __ Ext(scratch, scratch, Map::kElementsKindShift,
2455 Map::kElementsKindBitCount);
2456 __ Branch(&done, eq, scratch,
2457 Operand(FAST_ELEMENTS));
2458 __ Branch(&fail, lt, scratch,
2459 Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2460 __ Branch(&done, le, scratch,
2461 Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2463 __ Abort("Check for fast or external elements failed.");
2469 void LCodeGen::DoLoadExternalArrayPointer(
2470 LLoadExternalArrayPointer* instr) {
2471 Register to_reg = ToRegister(instr->result());
2472 Register from_reg = ToRegister(instr->InputAt(0));
2473 __ lw(to_reg, FieldMemOperand(from_reg,
2474 ExternalArray::kExternalPointerOffset));
2478 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2479 Register arguments = ToRegister(instr->arguments());
2480 Register length = ToRegister(instr->length());
2481 Register index = ToRegister(instr->index());
2482 Register result = ToRegister(instr->result());
2484 // Bailout index is not a valid argument index. Use unsigned check to get
2485 // negative check for free.
2487 // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
2488 // as they do in Arm. It will save us an instruction.
2489 DeoptimizeIf(ls, instr->environment(), length, Operand(index));
2491 // There are two words between the frame pointer and the last argument.
2492 // Subtracting from length accounts for one of them, add one more.
2493 __ subu(length, length, index);
2494 __ Addu(length, length, Operand(1));
2495 __ sll(length, length, kPointerSizeLog2);
2496 __ Addu(at, arguments, Operand(length));
2497 __ lw(result, MemOperand(at, 0));
2501 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2502 Register elements = ToRegister(instr->elements());
2503 Register key = EmitLoadRegister(instr->key(), scratch0());
2504 Register result = ToRegister(instr->result());
2505 Register scratch = scratch0();
2508 __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
2509 __ addu(scratch, elements, scratch);
2510 __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2512 // Check for the hole value.
2513 if (instr->hydrogen()->RequiresHoleCheck()) {
2514 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2515 DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
2520 void LCodeGen::DoLoadKeyedFastDoubleElement(
2521 LLoadKeyedFastDoubleElement* instr) {
2522 Register elements = ToRegister(instr->elements());
2523 bool key_is_constant = instr->key()->IsConstantOperand();
2524 Register key = no_reg;
2525 DoubleRegister result = ToDoubleRegister(instr->result());
2526 Register scratch = scratch0();
2529 ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2530 int constant_key = 0;
2531 if (key_is_constant) {
2532 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2533 if (constant_key & 0xF0000000) {
2534 Abort("array index constant value too big.");
2537 key = ToRegister(instr->key());
2540 if (key_is_constant) {
2541 __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
2542 FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2544 __ sll(scratch, key, shift_size);
2545 __ Addu(elements, elements, Operand(scratch));
2546 __ Addu(elements, elements,
2547 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2550 __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2551 DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
2553 __ ldc1(result, MemOperand(elements));
2557 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2558 LLoadKeyedSpecializedArrayElement* instr) {
2559 Register external_pointer = ToRegister(instr->external_pointer());
2560 Register key = no_reg;
2561 ElementsKind elements_kind = instr->elements_kind();
2562 bool key_is_constant = instr->key()->IsConstantOperand();
2563 int constant_key = 0;
2564 if (key_is_constant) {
2565 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2566 if (constant_key & 0xF0000000) {
2567 Abort("array index constant value too big.");
2570 key = ToRegister(instr->key());
2572 int shift_size = ElementsKindToShiftSize(elements_kind);
2574 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2575 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2576 FPURegister result = ToDoubleRegister(instr->result());
2577 if (key_is_constant) {
2578 __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
2580 __ sll(scratch0(), key, shift_size);
2581 __ Addu(scratch0(), scratch0(), external_pointer);
2584 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2585 __ lwc1(result, MemOperand(scratch0()));
2586 __ cvt_d_s(result, result);
2587 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2588 __ ldc1(result, MemOperand(scratch0()));
2591 Register result = ToRegister(instr->result());
2592 Register scratch = scratch0();
2593 MemOperand mem_operand(zero_reg);
2594 if (key_is_constant) {
2595 mem_operand = MemOperand(external_pointer,
2596 constant_key * (1 << shift_size));
2598 __ sll(scratch, key, shift_size);
2599 __ Addu(scratch, scratch, external_pointer);
2600 mem_operand = MemOperand(scratch);
2602 switch (elements_kind) {
2603 case EXTERNAL_BYTE_ELEMENTS:
2604 __ lb(result, mem_operand);
2606 case EXTERNAL_PIXEL_ELEMENTS:
2607 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
2608 __ lbu(result, mem_operand);
2610 case EXTERNAL_SHORT_ELEMENTS:
2611 __ lh(result, mem_operand);
2613 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
2614 __ lhu(result, mem_operand);
2616 case EXTERNAL_INT_ELEMENTS:
2617 __ lw(result, mem_operand);
2619 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
2620 __ lw(result, mem_operand);
2621 // TODO(danno): we could be more clever here, perhaps having a special
2622 // version of the stub that detects if the overflow case actually
2623 // happens, and generate code that returns a double rather than int.
2624 DeoptimizeIf(Ugreater_equal, instr->environment(),
2625 result, Operand(0x80000000));
2627 case EXTERNAL_FLOAT_ELEMENTS:
2628 case EXTERNAL_DOUBLE_ELEMENTS:
2629 case FAST_DOUBLE_ELEMENTS:
2631 case FAST_SMI_ONLY_ELEMENTS:
2632 case DICTIONARY_ELEMENTS:
2633 case NON_STRICT_ARGUMENTS_ELEMENTS:
2641 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2642 ASSERT(ToRegister(instr->object()).is(a1));
2643 ASSERT(ToRegister(instr->key()).is(a0));
2645 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2646 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2650 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2651 Register scratch = scratch0();
2652 Register temp = scratch1();
2653 Register result = ToRegister(instr->result());
2655 // Check if the calling frame is an arguments adaptor frame.
2656 Label done, adapted;
2657 __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2658 __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
2659 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2661 // Result is the frame pointer for the frame if not adapted and for the real
2662 // frame below the adaptor frame if adapted.
2663 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
2664 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
2668 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2669 Register elem = ToRegister(instr->InputAt(0));
2670 Register result = ToRegister(instr->result());
2674 // If no arguments adaptor frame the number of arguments is fixed.
2675 __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
2676 __ Branch(&done, eq, fp, Operand(elem));
2678 // Arguments adaptor frame present. Get argument length from there.
2679 __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2681 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
2682 __ SmiUntag(result);
2684 // Argument length is in result register.
2689 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2690 Register receiver = ToRegister(instr->receiver());
2691 Register function = ToRegister(instr->function());
2692 Register scratch = scratch0();
2694 // If the receiver is null or undefined, we have to pass the global
2695 // object as a receiver to normal functions. Values have to be
2696 // passed unchanged to builtins and strict-mode functions.
2697 Label global_object, receiver_ok;
2699 // Do not transform the receiver to object for strict mode
2702 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2704 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2706 // Do not transform the receiver to object for builtins.
2707 int32_t strict_mode_function_mask =
2708 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2709 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2710 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
2711 __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
2713 // Normal function. Replace undefined or null with global receiver.
2714 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2715 __ Branch(&global_object, eq, receiver, Operand(scratch));
2716 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2717 __ Branch(&global_object, eq, receiver, Operand(scratch));
2719 // Deoptimize if the receiver is not a JS object.
2720 __ And(scratch, receiver, Operand(kSmiTagMask));
2721 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
2723 __ GetObjectType(receiver, scratch, scratch);
2724 DeoptimizeIf(lt, instr->environment(),
2725 scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
2726 __ Branch(&receiver_ok);
2728 __ bind(&global_object);
2729 __ lw(receiver, GlobalObjectOperand());
2731 FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
2732 __ bind(&receiver_ok);
2735 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2736 Register receiver = ToRegister(instr->receiver());
2737 Register function = ToRegister(instr->function());
2738 Register length = ToRegister(instr->length());
2739 Register elements = ToRegister(instr->elements());
2740 Register scratch = scratch0();
2741 ASSERT(receiver.is(a0)); // Used for parameter count.
2742 ASSERT(function.is(a1)); // Required by InvokeFunction.
2743 ASSERT(ToRegister(instr->result()).is(v0));
2745 // Copy the arguments to this function possibly from the
2746 // adaptor frame below it.
2747 const uint32_t kArgumentsLimit = 1 * KB;
2748 DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
2750 // Push the receiver and use the register to keep the original
2751 // number of arguments.
2753 __ Move(receiver, length);
2754 // The arguments are at a one pointer size offset from elements.
2755 __ Addu(elements, elements, Operand(1 * kPointerSize));
2757 // Loop through the arguments pushing them onto the execution
2760 // length is a small non-negative integer, due to the test above.
2761 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
2762 __ sll(scratch, length, 2);
2764 __ Addu(scratch, elements, scratch);
2765 __ lw(scratch, MemOperand(scratch));
2767 __ Subu(length, length, Operand(1));
2768 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
2769 __ sll(scratch, length, 2);
2772 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
2773 LPointerMap* pointers = instr->pointer_map();
2774 RecordPosition(pointers->position());
2775 SafepointGenerator safepoint_generator(
2776 this, pointers, Safepoint::kLazyDeopt);
2777 // The number of arguments is stored in receiver which is a0, as expected
2778 // by InvokeFunction.
2779 ParameterCount actual(receiver);
2780 __ InvokeFunction(function, actual, CALL_FUNCTION,
2781 safepoint_generator, CALL_AS_METHOD);
2782 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2786 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2787 LOperand* argument = instr->InputAt(0);
2788 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2789 Abort("DoPushArgument not implemented for double type.");
2791 Register argument_reg = EmitLoadRegister(argument, at);
2792 __ push(argument_reg);
2797 void LCodeGen::DoThisFunction(LThisFunction* instr) {
2798 Register result = ToRegister(instr->result());
2799 __ LoadHeapObject(result, instr->hydrogen()->closure());
2803 void LCodeGen::DoContext(LContext* instr) {
2804 Register result = ToRegister(instr->result());
2809 void LCodeGen::DoOuterContext(LOuterContext* instr) {
2810 Register context = ToRegister(instr->context());
2811 Register result = ToRegister(instr->result());
2813 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2817 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
2818 __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
2819 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
2820 // The context is the first argument.
2821 __ Push(cp, scratch0(), scratch1());
2822 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
2826 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2827 Register result = ToRegister(instr->result());
2828 __ lw(result, ContextOperand(cp, instr->qml_global()?Context::QML_GLOBAL_INDEX:Context::GLOBAL_INDEX));
2832 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2833 Register global = ToRegister(instr->global());
2834 Register result = ToRegister(instr->result());
2835 __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
2839 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2841 LInstruction* instr,
2842 CallKind call_kind) {
2843 bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
2844 function->shared()->formal_parameter_count() == arity;
2846 LPointerMap* pointers = instr->pointer_map();
2847 RecordPosition(pointers->position());
2849 if (can_invoke_directly) {
2850 __ LoadHeapObject(a1, function);
2851 // Change context if needed.
2852 bool change_context =
2853 (info()->closure()->context() != function->context()) ||
2854 scope()->contains_with() ||
2855 (scope()->num_heap_slots() > 0);
2856 if (change_context) {
2857 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2860 // Set r0 to arguments count if adaption is not needed. Assumes that r0
2861 // is available to write to at this point.
2862 if (!function->NeedsArgumentsAdaption()) {
2863 __ li(a0, Operand(arity));
2867 __ SetCallKind(t1, call_kind);
2868 __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
2871 // Set up deoptimization.
2872 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2874 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2875 ParameterCount count(arity);
2876 __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
2880 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2884 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2885 ASSERT(ToRegister(instr->result()).is(v0));
2887 CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
2891 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2892 Register input = ToRegister(instr->InputAt(0));
2893 Register result = ToRegister(instr->result());
2894 Register scratch = scratch0();
2896 // Deoptimize if not a heap number.
2897 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2898 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2899 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
2902 Register exponent = scratch0();
2904 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2905 // Check the sign of the argument. If the argument is positive, just
2907 __ Move(result, input);
2908 __ And(at, exponent, Operand(HeapNumber::kSignMask));
2909 __ Branch(&done, eq, at, Operand(zero_reg));
2911 // Input is negative. Reverse its sign.
2912 // Preserve the value of all registers.
2914 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2916 // Registers were saved at the safepoint, so we can use
2917 // many scratch registers.
2918 Register tmp1 = input.is(a1) ? a0 : a1;
2919 Register tmp2 = input.is(a2) ? a0 : a2;
2920 Register tmp3 = input.is(a3) ? a0 : a3;
2921 Register tmp4 = input.is(t0) ? a0 : t0;
2923 // exponent: floating point exponent value.
2925 Label allocated, slow;
2926 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
2927 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
2928 __ Branch(&allocated);
2930 // Slow case: Call the runtime system to do the number allocation.
2933 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2934 // Set the pointer to the new heap number in tmp.
2937 // Restore input_reg after call to runtime.
2938 __ LoadFromSafepointRegisterSlot(input, input);
2939 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2941 __ bind(&allocated);
2942 // exponent: floating point exponent value.
2943 // tmp1: allocated heap number.
2944 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
2945 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
2946 __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
2947 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
2949 __ StoreToSafepointRegisterSlot(tmp1, result);
2956 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2957 Register input = ToRegister(instr->InputAt(0));
2958 Register result = ToRegister(instr->result());
2959 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2961 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
2962 __ mov(result, input);
2963 ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
2964 __ subu(result, zero_reg, input);
2965 // Overflow if result is still negative, i.e. 0x80000000.
2966 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
2971 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2972 // Class for deferred case.
2973 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2975 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2976 LUnaryMathOperation* instr)
2977 : LDeferredCode(codegen), instr_(instr) { }
2978 virtual void Generate() {
2979 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2981 virtual LInstruction* instr() { return instr_; }
2983 LUnaryMathOperation* instr_;
2986 Representation r = instr->hydrogen()->value()->representation();
2988 FPURegister input = ToDoubleRegister(instr->InputAt(0));
2989 FPURegister result = ToDoubleRegister(instr->result());
2990 __ abs_d(result, input);
2991 } else if (r.IsInteger32()) {
2992 EmitIntegerMathAbs(instr);
2994 // Representation is tagged.
2995 DeferredMathAbsTaggedHeapNumber* deferred =
2996 new DeferredMathAbsTaggedHeapNumber(this, instr);
2997 Register input = ToRegister(instr->InputAt(0));
2999 __ JumpIfNotSmi(input, deferred->entry());
3000 // If smi, handle it directly.
3001 EmitIntegerMathAbs(instr);
3002 __ bind(deferred->exit());
3007 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3008 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3009 Register result = ToRegister(instr->result());
3010 FPURegister single_scratch = double_scratch0().low();
3011 Register scratch1 = scratch0();
3012 Register except_flag = ToRegister(instr->TempAt(0));
3014 __ EmitFPUTruncate(kRoundToMinusInf,
3020 // Deopt if the operation did not succeed.
3021 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3024 __ mfc1(result, single_scratch);
3026 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3029 __ Branch(&done, ne, result, Operand(zero_reg));
3030 __ mfc1(scratch1, input.high());
3031 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3032 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3038 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3039 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3040 Register result = ToRegister(instr->result());
3041 Register scratch = scratch0();
3042 Label done, check_sign_on_zero;
3044 // Extract exponent bits.
3045 __ mfc1(result, input.high());
3048 HeapNumber::kExponentShift,
3049 HeapNumber::kExponentBits);
3051 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3053 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3054 __ mov(result, zero_reg);
3055 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3056 __ Branch(&check_sign_on_zero);
3062 // The following conversion will not work with numbers
3063 // outside of ]-2^32, 2^32[.
3064 DeoptimizeIf(ge, instr->environment(), scratch,
3065 Operand(HeapNumber::kExponentBias + 32));
3067 // Save the original sign for later comparison.
3068 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3070 __ Move(double_scratch0(), 0.5);
3071 __ add_d(double_scratch0(), input, double_scratch0());
3073 // Check sign of the result: if the sign changed, the input
3074 // value was in ]0.5, 0[ and the result should be -0.
3075 __ mfc1(result, double_scratch0().high());
3076 __ Xor(result, result, Operand(scratch));
3077 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3078 // ARM uses 'mi' here, which is 'lt'
3079 DeoptimizeIf(lt, instr->environment(), result,
3083 // ARM uses 'mi' here, which is 'lt'
3084 // Negating it results in 'ge'
3085 __ Branch(&skip2, ge, result, Operand(zero_reg));
3086 __ mov(result, zero_reg);
3091 Register except_flag = scratch;
3093 __ EmitFPUTruncate(kRoundToMinusInf,
3094 double_scratch0().low(),
3099 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3101 __ mfc1(result, double_scratch0().low());
3103 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3105 __ Branch(&done, ne, result, Operand(zero_reg));
3106 __ bind(&check_sign_on_zero);
3107 __ mfc1(scratch, input.high());
3108 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3109 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3115 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3116 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3117 DoubleRegister result = ToDoubleRegister(instr->result());
3118 __ sqrt_d(result, input);
3122 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3123 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3124 DoubleRegister result = ToDoubleRegister(instr->result());
3125 DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
3127 ASSERT(!input.is(result));
3129 // Note that according to ECMA-262 15.8.2.13:
3130 // Math.pow(-Infinity, 0.5) == Infinity
3131 // Math.sqrt(-Infinity) == NaN
3133 __ Move(temp, -V8_INFINITY);
3134 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3135 // Set up Infinity in the delay slot.
3136 // result is overwritten if the branch is not taken.
3137 __ neg_d(result, temp);
3139 // Add +0 to convert -0 to +0.
3140 __ add_d(result, input, kDoubleRegZero);
3141 __ sqrt_d(result, result);
3146 void LCodeGen::DoPower(LPower* instr) {
3147 Representation exponent_type = instr->hydrogen()->right()->representation();
3148 // Having marked this as a call, we can use any registers.
3149 // Just make sure that the input/output registers are the expected ones.
3150 ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3151 ToDoubleRegister(instr->InputAt(1)).is(f4));
3152 ASSERT(!instr->InputAt(1)->IsRegister() ||
3153 ToRegister(instr->InputAt(1)).is(a2));
3154 ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
3155 ASSERT(ToDoubleRegister(instr->result()).is(f0));
3157 if (exponent_type.IsTagged()) {
3159 __ JumpIfSmi(a2, &no_deopt);
3160 __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
3161 DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
3163 MathPowStub stub(MathPowStub::TAGGED);
3165 } else if (exponent_type.IsInteger32()) {
3166 MathPowStub stub(MathPowStub::INTEGER);
3169 ASSERT(exponent_type.IsDouble());
3170 MathPowStub stub(MathPowStub::DOUBLE);
3176 void LCodeGen::DoRandom(LRandom* instr) {
3177 class DeferredDoRandom: public LDeferredCode {
3179 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3180 : LDeferredCode(codegen), instr_(instr) { }
3181 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3182 virtual LInstruction* instr() { return instr_; }
3187 DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
3188 // Having marked this instruction as a call we can use any
3190 ASSERT(ToDoubleRegister(instr->result()).is(f0));
3191 ASSERT(ToRegister(instr->InputAt(0)).is(a0));
3193 static const int kSeedSize = sizeof(uint32_t);
3194 STATIC_ASSERT(kPointerSize == kSeedSize);
3196 __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
3197 static const int kRandomSeedOffset =
3198 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3199 __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
3200 // a2: FixedArray of the global context's random seeds
3203 __ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
3204 __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
3206 __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3210 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3211 __ And(a3, a1, Operand(0xFFFF));
3212 __ li(t0, Operand(18273));
3215 __ Addu(a1, a3, a1);
3217 __ sw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
3219 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3220 __ And(a3, a0, Operand(0xFFFF));
3221 __ li(t0, Operand(36969));
3224 __ Addu(a0, a3, a0);
3226 __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3228 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3229 __ And(a0, a0, Operand(0x3FFFF));
3231 __ Addu(v0, a0, a1);
3233 __ bind(deferred->exit());
3235 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3236 __ li(a2, Operand(0x41300000));
3237 // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
3238 __ Move(f12, v0, a2);
3239 // Move 0x4130000000000000 to FPU.
3240 __ Move(f14, zero_reg, a2);
3241 // Subtract to get the result.
3242 __ sub_d(f0, f12, f14);
3245 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3246 __ PrepareCallCFunction(1, scratch0());
3247 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3248 // Return value is in v0.
3252 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3253 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3254 TranscendentalCacheStub stub(TranscendentalCache::LOG,
3255 TranscendentalCacheStub::UNTAGGED);
3256 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3260 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3261 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3262 TranscendentalCacheStub stub(TranscendentalCache::TAN,
3263 TranscendentalCacheStub::UNTAGGED);
3264 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3268 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3269 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3270 TranscendentalCacheStub stub(TranscendentalCache::COS,
3271 TranscendentalCacheStub::UNTAGGED);
3272 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3276 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3277 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3278 TranscendentalCacheStub stub(TranscendentalCache::SIN,
3279 TranscendentalCacheStub::UNTAGGED);
3280 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3284 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3285 switch (instr->op()) {
3299 DoMathPowHalf(instr);
3314 Abort("Unimplemented type of LUnaryMathOperation.");
3320 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3321 ASSERT(ToRegister(instr->function()).is(a1));
3322 ASSERT(instr->HasPointerMap());
3323 ASSERT(instr->HasDeoptimizationEnvironment());
3324 LPointerMap* pointers = instr->pointer_map();
3325 RecordPosition(pointers->position());
3326 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3327 ParameterCount count(instr->arity());
3328 __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3329 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3333 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3334 ASSERT(ToRegister(instr->result()).is(v0));
3336 int arity = instr->arity();
3338 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3339 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3340 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3344 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3345 ASSERT(ToRegister(instr->result()).is(v0));
3347 int arity = instr->arity();
3348 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3350 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3351 __ li(a2, Operand(instr->name()));
3352 CallCode(ic, mode, instr);
3353 // Restore context register.
3354 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3358 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3359 ASSERT(ToRegister(instr->function()).is(a1));
3360 ASSERT(ToRegister(instr->result()).is(v0));
3362 int arity = instr->arity();
3363 CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3364 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3365 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3369 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3370 ASSERT(ToRegister(instr->result()).is(v0));
3372 int arity = instr->arity();
3373 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3375 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3376 __ li(a2, Operand(instr->name()));
3377 CallCode(ic, mode, instr);
3378 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3382 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3383 ASSERT(ToRegister(instr->result()).is(v0));
3384 CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
3388 void LCodeGen::DoCallNew(LCallNew* instr) {
3389 ASSERT(ToRegister(instr->InputAt(0)).is(a1));
3390 ASSERT(ToRegister(instr->result()).is(v0));
3392 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3393 __ li(a0, Operand(instr->arity()));
3394 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3398 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3399 CallRuntime(instr->function(), instr->arity(), instr);
3403 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3404 Register object = ToRegister(instr->object());
3405 Register value = ToRegister(instr->value());
3406 Register scratch = scratch0();
3407 int offset = instr->offset();
3409 ASSERT(!object.is(value));
3411 if (!instr->transition().is_null()) {
3412 __ li(scratch, Operand(instr->transition()));
3413 __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3417 HType type = instr->hydrogen()->value()->type();
3418 SmiCheck check_needed =
3419 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3420 if (instr->is_in_object()) {
3421 __ sw(value, FieldMemOperand(object, offset));
3422 if (instr->hydrogen()->NeedsWriteBarrier()) {
3423 // Update the write barrier for the object for in-object properties.
3424 __ RecordWriteField(object,
3430 EMIT_REMEMBERED_SET,
3434 __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3435 __ sw(value, FieldMemOperand(scratch, offset));
3436 if (instr->hydrogen()->NeedsWriteBarrier()) {
3437 // Update the write barrier for the properties array.
3438 // object is used as a scratch register.
3439 __ RecordWriteField(scratch,
3445 EMIT_REMEMBERED_SET,
3452 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3453 ASSERT(ToRegister(instr->object()).is(a1));
3454 ASSERT(ToRegister(instr->value()).is(a0));
3456 // Name is always in a2.
3457 __ li(a2, Operand(instr->name()));
3458 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3459 ? isolate()->builtins()->StoreIC_Initialize_Strict()
3460 : isolate()->builtins()->StoreIC_Initialize();
3461 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3465 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3467 instr->environment(),
3468 ToRegister(instr->index()),
3469 Operand(ToRegister(instr->length())));
3473 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3474 Register value = ToRegister(instr->value());
3475 Register elements = ToRegister(instr->object());
3476 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3477 Register scratch = scratch0();
3480 if (instr->key()->IsConstantOperand()) {
3481 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3482 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3484 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3485 __ sw(value, FieldMemOperand(elements, offset));
3487 __ sll(scratch, key, kPointerSizeLog2);
3488 __ addu(scratch, elements, scratch);
3489 __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3492 if (instr->hydrogen()->NeedsWriteBarrier()) {
3493 HType type = instr->hydrogen()->value()->type();
3494 SmiCheck check_needed =
3495 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3496 // Compute address of modified element and store it into key register.
3497 __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3498 __ RecordWrite(elements,
3503 EMIT_REMEMBERED_SET,
3509 void LCodeGen::DoStoreKeyedFastDoubleElement(
3510 LStoreKeyedFastDoubleElement* instr) {
3511 DoubleRegister value = ToDoubleRegister(instr->value());
3512 Register elements = ToRegister(instr->elements());
3513 Register key = no_reg;
3514 Register scratch = scratch0();
3515 bool key_is_constant = instr->key()->IsConstantOperand();
3516 int constant_key = 0;
3519 // Calculate the effective address of the slot in the array to store the
3521 if (key_is_constant) {
3522 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3523 if (constant_key & 0xF0000000) {
3524 Abort("array index constant value too big.");
3527 key = ToRegister(instr->key());
3529 int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3530 if (key_is_constant) {
3531 __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
3532 FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3534 __ sll(scratch, key, shift_size);
3535 __ Addu(scratch, elements, Operand(scratch));
3536 __ Addu(scratch, scratch,
3537 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3541 // Check for NaN. All NaNs must be canonicalized.
3542 __ BranchF(NULL, &is_nan, eq, value, value);
3543 __ Branch(¬_nan);
3545 // Only load canonical NaN if the comparison above set the overflow.
3547 __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3550 __ sdc1(value, MemOperand(scratch));
3554 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3555 LStoreKeyedSpecializedArrayElement* instr) {
3557 Register external_pointer = ToRegister(instr->external_pointer());
3558 Register key = no_reg;
3559 ElementsKind elements_kind = instr->elements_kind();
3560 bool key_is_constant = instr->key()->IsConstantOperand();
3561 int constant_key = 0;
3562 if (key_is_constant) {
3563 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3564 if (constant_key & 0xF0000000) {
3565 Abort("array index constant value too big.");
3568 key = ToRegister(instr->key());
3570 int shift_size = ElementsKindToShiftSize(elements_kind);
3572 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3573 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3574 FPURegister value(ToDoubleRegister(instr->value()));
3575 if (key_is_constant) {
3576 __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
3578 __ sll(scratch0(), key, shift_size);
3579 __ Addu(scratch0(), scratch0(), external_pointer);
3582 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3583 __ cvt_s_d(double_scratch0(), value);
3584 __ swc1(double_scratch0(), MemOperand(scratch0()));
3585 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3586 __ sdc1(value, MemOperand(scratch0()));
3589 Register value(ToRegister(instr->value()));
3590 MemOperand mem_operand(zero_reg);
3591 Register scratch = scratch0();
3592 if (key_is_constant) {
3593 mem_operand = MemOperand(external_pointer,
3594 constant_key * (1 << shift_size));
3596 __ sll(scratch, key, shift_size);
3597 __ Addu(scratch, scratch, external_pointer);
3598 mem_operand = MemOperand(scratch);
3600 switch (elements_kind) {
3601 case EXTERNAL_PIXEL_ELEMENTS:
3602 case EXTERNAL_BYTE_ELEMENTS:
3603 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3604 __ sb(value, mem_operand);
3606 case EXTERNAL_SHORT_ELEMENTS:
3607 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3608 __ sh(value, mem_operand);
3610 case EXTERNAL_INT_ELEMENTS:
3611 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3612 __ sw(value, mem_operand);
3614 case EXTERNAL_FLOAT_ELEMENTS:
3615 case EXTERNAL_DOUBLE_ELEMENTS:
3616 case FAST_DOUBLE_ELEMENTS:
3618 case FAST_SMI_ONLY_ELEMENTS:
3619 case DICTIONARY_ELEMENTS:
3620 case NON_STRICT_ARGUMENTS_ELEMENTS:
3627 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3628 ASSERT(ToRegister(instr->object()).is(a2));
3629 ASSERT(ToRegister(instr->key()).is(a1));
3630 ASSERT(ToRegister(instr->value()).is(a0));
3632 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3633 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3634 : isolate()->builtins()->KeyedStoreIC_Initialize();
3635 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3639 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3640 Register object_reg = ToRegister(instr->object());
3641 Register new_map_reg = ToRegister(instr->new_map_reg());
3642 Register scratch = scratch0();
3644 Handle<Map> from_map = instr->original_map();
3645 Handle<Map> to_map = instr->transitioned_map();
3646 ElementsKind from_kind = from_map->elements_kind();
3647 ElementsKind to_kind = to_map->elements_kind();
3649 __ mov(ToRegister(instr->result()), object_reg);
3651 Label not_applicable;
3652 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3653 __ Branch(¬_applicable, ne, scratch, Operand(from_map));
3655 __ li(new_map_reg, Operand(to_map));
3656 if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
3657 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3659 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3660 scratch, kRAHasBeenSaved, kDontSaveFPRegs);
3661 } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
3662 to_kind == FAST_DOUBLE_ELEMENTS) {
3663 Register fixed_object_reg = ToRegister(instr->temp_reg());
3664 ASSERT(fixed_object_reg.is(a2));
3665 ASSERT(new_map_reg.is(a3));
3666 __ mov(fixed_object_reg, object_reg);
3667 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3668 RelocInfo::CODE_TARGET, instr);
3669 } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
3670 Register fixed_object_reg = ToRegister(instr->temp_reg());
3671 ASSERT(fixed_object_reg.is(a2));
3672 ASSERT(new_map_reg.is(a3));
3673 __ mov(fixed_object_reg, object_reg);
3674 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3675 RelocInfo::CODE_TARGET, instr);
3679 __ bind(¬_applicable);
3683 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3684 __ push(ToRegister(instr->left()));
3685 __ push(ToRegister(instr->right()));
3686 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3687 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3691 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3692 class DeferredStringCharCodeAt: public LDeferredCode {
3694 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3695 : LDeferredCode(codegen), instr_(instr) { }
3696 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3697 virtual LInstruction* instr() { return instr_; }
3699 LStringCharCodeAt* instr_;
3702 DeferredStringCharCodeAt* deferred =
3703 new DeferredStringCharCodeAt(this, instr);
3704 StringCharLoadGenerator::Generate(masm(),
3705 ToRegister(instr->string()),
3706 ToRegister(instr->index()),
3707 ToRegister(instr->result()),
3709 __ bind(deferred->exit());
3713 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3714 Register string = ToRegister(instr->string());
3715 Register result = ToRegister(instr->result());
3716 Register scratch = scratch0();
3718 // TODO(3095996): Get rid of this. For now, we need to make the
3719 // result register contain a valid pointer because it is already
3720 // contained in the register pointer map.
3721 __ mov(result, zero_reg);
3723 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3725 // Push the index as a smi. This is safe because of the checks in
3726 // DoStringCharCodeAt above.
3727 if (instr->index()->IsConstantOperand()) {
3728 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3729 __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
3732 Register index = ToRegister(instr->index());
3736 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3737 if (FLAG_debug_code) {
3738 __ AbortIfNotSmi(v0);
3741 __ StoreToSafepointRegisterSlot(v0, result);
3745 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3746 class DeferredStringCharFromCode: public LDeferredCode {
3748 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3749 : LDeferredCode(codegen), instr_(instr) { }
3750 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3751 virtual LInstruction* instr() { return instr_; }
3753 LStringCharFromCode* instr_;
3756 DeferredStringCharFromCode* deferred =
3757 new DeferredStringCharFromCode(this, instr);
3759 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3760 Register char_code = ToRegister(instr->char_code());
3761 Register result = ToRegister(instr->result());
3762 Register scratch = scratch0();
3763 ASSERT(!char_code.is(result));
3765 __ Branch(deferred->entry(), hi,
3766 char_code, Operand(String::kMaxAsciiCharCode));
3767 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3768 __ sll(scratch, char_code, kPointerSizeLog2);
3769 __ Addu(result, result, scratch);
3770 __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
3771 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3772 __ Branch(deferred->entry(), eq, result, Operand(scratch));
3773 __ bind(deferred->exit());
3777 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3778 Register char_code = ToRegister(instr->char_code());
3779 Register result = ToRegister(instr->result());
3781 // TODO(3095996): Get rid of this. For now, we need to make the
3782 // result register contain a valid pointer because it is already
3783 // contained in the register pointer map.
3784 __ mov(result, zero_reg);
3786 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3787 __ SmiTag(char_code);
3789 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3790 __ StoreToSafepointRegisterSlot(v0, result);
3794 void LCodeGen::DoStringLength(LStringLength* instr) {
3795 Register string = ToRegister(instr->InputAt(0));
3796 Register result = ToRegister(instr->result());
3797 __ lw(result, FieldMemOperand(string, String::kLengthOffset));
3801 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3802 LOperand* input = instr->InputAt(0);
3803 ASSERT(input->IsRegister() || input->IsStackSlot());
3804 LOperand* output = instr->result();
3805 ASSERT(output->IsDoubleRegister());
3806 FPURegister single_scratch = double_scratch0().low();
3807 if (input->IsStackSlot()) {
3808 Register scratch = scratch0();
3809 __ lw(scratch, ToMemOperand(input));
3810 __ mtc1(scratch, single_scratch);
3812 __ mtc1(ToRegister(input), single_scratch);
3814 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
3818 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3819 class DeferredNumberTagI: public LDeferredCode {
3821 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3822 : LDeferredCode(codegen), instr_(instr) { }
3823 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
3824 virtual LInstruction* instr() { return instr_; }
3826 LNumberTagI* instr_;
3829 Register src = ToRegister(instr->InputAt(0));
3830 Register dst = ToRegister(instr->result());
3831 Register overflow = scratch0();
3833 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
3834 __ SmiTagCheckOverflow(dst, src, overflow);
3835 __ BranchOnOverflow(deferred->entry(), overflow);
3836 __ bind(deferred->exit());
3840 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3842 Register src = ToRegister(instr->InputAt(0));
3843 Register dst = ToRegister(instr->result());
3844 FPURegister dbl_scratch = double_scratch0();
3846 // Preserve the value of all registers.
3847 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3849 // There was overflow, so bits 30 and 31 of the original integer
3850 // disagree. Try to allocate a heap number in new space and store
3851 // the value in there. If that fails, call the runtime system.
3854 __ SmiUntag(src, dst);
3855 __ Xor(src, src, Operand(0x80000000));
3857 __ mtc1(src, dbl_scratch);
3858 __ cvt_d_w(dbl_scratch, dbl_scratch);
3859 if (FLAG_inline_new) {
3860 __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
3861 __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
3866 // Slow case: Call the runtime system to do the number allocation.
3869 // TODO(3095996): Put a valid pointer value in the stack slot where the result
3870 // register is stored, as this register is in the pointer map, but contains an
3872 __ StoreToSafepointRegisterSlot(zero_reg, dst);
3873 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3876 // Done. Put the value in dbl_scratch into the value of the allocated heap
3879 __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
3880 __ StoreToSafepointRegisterSlot(dst, dst);
3884 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3885 class DeferredNumberTagD: public LDeferredCode {
3887 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3888 : LDeferredCode(codegen), instr_(instr) { }
3889 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3890 virtual LInstruction* instr() { return instr_; }
3892 LNumberTagD* instr_;
3895 DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3896 Register scratch = scratch0();
3897 Register reg = ToRegister(instr->result());
3898 Register temp1 = ToRegister(instr->TempAt(0));
3899 Register temp2 = ToRegister(instr->TempAt(1));
3901 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
3902 if (FLAG_inline_new) {
3903 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
3904 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
3906 __ Branch(deferred->entry());
3908 __ bind(deferred->exit());
3909 __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
3913 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3914 // TODO(3095996): Get rid of this. For now, we need to make the
3915 // result register contain a valid pointer because it is already
3916 // contained in the register pointer map.
3917 Register reg = ToRegister(instr->result());
3918 __ mov(reg, zero_reg);
3920 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3921 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3922 __ StoreToSafepointRegisterSlot(v0, reg);
3926 void LCodeGen::DoSmiTag(LSmiTag* instr) {
3927 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3928 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
3932 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3933 Register scratch = scratch0();
3934 Register input = ToRegister(instr->InputAt(0));
3935 Register result = ToRegister(instr->result());
3936 if (instr->needs_check()) {
3937 STATIC_ASSERT(kHeapObjectTag == 1);
3938 // If the input is a HeapObject, value of scratch won't be zero.
3939 __ And(scratch, input, Operand(kHeapObjectTag));
3940 __ SmiUntag(result, input);
3941 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3943 __ SmiUntag(result, input);
3948 void LCodeGen::EmitNumberUntagD(Register input_reg,
3949 DoubleRegister result_reg,
3950 bool deoptimize_on_undefined,
3951 bool deoptimize_on_minus_zero,
3952 LEnvironment* env) {
3953 Register scratch = scratch0();
3955 Label load_smi, heap_number, done;
3958 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
3960 // Heap number map check.
3961 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3962 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3963 if (deoptimize_on_undefined) {
3964 DeoptimizeIf(ne, env, scratch, Operand(at));
3967 __ Branch(&heap_number, eq, scratch, Operand(at));
3969 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3970 DeoptimizeIf(ne, env, input_reg, Operand(at));
3972 // Convert undefined to NaN.
3973 __ LoadRoot(at, Heap::kNanValueRootIndex);
3974 __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
3977 __ bind(&heap_number);
3979 // Heap number to double register conversion.
3980 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
3981 if (deoptimize_on_minus_zero) {
3982 __ mfc1(at, result_reg.low());
3983 __ Branch(&done, ne, at, Operand(zero_reg));
3984 __ mfc1(scratch, result_reg.high());
3985 DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
3989 // Smi to double register conversion
3991 // scratch: untagged value of input_reg
3992 __ mtc1(scratch, result_reg);
3993 __ cvt_d_w(result_reg, result_reg);
3998 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
3999 Register input_reg = ToRegister(instr->InputAt(0));
4000 Register scratch1 = scratch0();
4001 Register scratch2 = ToRegister(instr->TempAt(0));
4002 DoubleRegister double_scratch = double_scratch0();
4003 FPURegister single_scratch = double_scratch.low();
4005 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4006 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4010 // The input is a tagged HeapObject.
4011 // Heap number map check.
4012 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4013 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4014 // This 'at' value and scratch1 map value are used for tests in both clauses
4017 if (instr->truncating()) {
4018 Register scratch3 = ToRegister(instr->TempAt(1));
4019 DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
4020 ASSERT(!scratch3.is(input_reg) &&
4021 !scratch3.is(scratch1) &&
4022 !scratch3.is(scratch2));
4023 // Performs a truncating conversion of a floating point number as used by
4024 // the JS bitwise operations.
4026 __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
4027 // Check for undefined. Undefined is converted to zero for truncating
4029 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4030 DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
4031 ASSERT(ToRegister(instr->result()).is(input_reg));
4032 __ mov(input_reg, zero_reg);
4035 __ bind(&heap_number);
4036 __ ldc1(double_scratch2,
4037 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4038 __ EmitECMATruncate(input_reg,
4045 // Deoptimize if we don't have a heap number.
4046 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
4048 // Load the double value.
4049 __ ldc1(double_scratch,
4050 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4052 Register except_flag = scratch2;
4053 __ EmitFPUTruncate(kRoundToZero,
4058 kCheckForInexactConversion);
4060 // Deopt if the operation did not succeed.
4061 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4064 __ mfc1(input_reg, single_scratch);
4066 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4067 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4069 __ mfc1(scratch1, double_scratch.high());
4070 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4071 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4078 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4079 class DeferredTaggedToI: public LDeferredCode {
4081 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4082 : LDeferredCode(codegen), instr_(instr) { }
4083 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4084 virtual LInstruction* instr() { return instr_; }
4089 LOperand* input = instr->InputAt(0);
4090 ASSERT(input->IsRegister());
4091 ASSERT(input->Equals(instr->result()));
4093 Register input_reg = ToRegister(input);
4095 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
4097 // Let the deferred code handle the HeapObject case.
4098 __ JumpIfNotSmi(input_reg, deferred->entry());
4100 // Smi to int32 conversion.
4101 __ SmiUntag(input_reg);
4102 __ bind(deferred->exit());
4106 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4107 LOperand* input = instr->InputAt(0);
4108 ASSERT(input->IsRegister());
4109 LOperand* result = instr->result();
4110 ASSERT(result->IsDoubleRegister());
4112 Register input_reg = ToRegister(input);
4113 DoubleRegister result_reg = ToDoubleRegister(result);
4115 EmitNumberUntagD(input_reg, result_reg,
4116 instr->hydrogen()->deoptimize_on_undefined(),
4117 instr->hydrogen()->deoptimize_on_minus_zero(),
4118 instr->environment());
4122 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4123 Register result_reg = ToRegister(instr->result());
4124 Register scratch1 = scratch0();
4125 Register scratch2 = ToRegister(instr->TempAt(0));
4126 DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
4127 FPURegister single_scratch = double_scratch0().low();
4129 if (instr->truncating()) {
4130 Register scratch3 = ToRegister(instr->TempAt(1));
4131 __ EmitECMATruncate(result_reg,
4138 Register except_flag = scratch2;
4140 __ EmitFPUTruncate(kRoundToMinusInf,
4145 kCheckForInexactConversion);
4147 // Deopt if the operation did not succeed (except_flag != 0).
4148 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4151 __ mfc1(result_reg, single_scratch);
4156 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4157 LOperand* input = instr->InputAt(0);
4158 __ And(at, ToRegister(input), Operand(kSmiTagMask));
4159 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
4163 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4164 LOperand* input = instr->InputAt(0);
4165 __ And(at, ToRegister(input), Operand(kSmiTagMask));
4166 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
4170 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4171 Register input = ToRegister(instr->InputAt(0));
4172 Register scratch = scratch0();
4174 __ GetObjectType(input, scratch, scratch);
4176 if (instr->hydrogen()->is_interval_check()) {
4179 instr->hydrogen()->GetCheckInterval(&first, &last);
4181 // If there is only one type in the interval check for equality.
4182 if (first == last) {
4183 DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
4185 DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
4186 // Omit check for the last type.
4187 if (last != LAST_TYPE) {
4188 DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
4194 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4196 if (IsPowerOf2(mask)) {
4197 ASSERT(tag == 0 || IsPowerOf2(tag));
4198 __ And(at, scratch, mask);
4199 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
4200 at, Operand(zero_reg));
4202 __ And(scratch, scratch, Operand(mask));
4203 DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
4209 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4210 Register reg = ToRegister(instr->value());
4211 Handle<JSFunction> target = instr->hydrogen()->target();
4212 if (isolate()->heap()->InNewSpace(*target)) {
4213 Register reg = ToRegister(instr->value());
4214 Handle<JSGlobalPropertyCell> cell =
4215 isolate()->factory()->NewJSGlobalPropertyCell(target);
4216 __ li(at, Operand(Handle<Object>(cell)));
4217 __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
4218 DeoptimizeIf(ne, instr->environment(), reg,
4221 DeoptimizeIf(ne, instr->environment(), reg,
4227 void LCodeGen::DoCheckMapCommon(Register reg,
4230 CompareMapMode mode,
4231 LEnvironment* env) {
4233 __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
4234 DeoptimizeIf(al, env);
4239 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4240 Register scratch = scratch0();
4241 LOperand* input = instr->InputAt(0);
4242 ASSERT(input->IsRegister());
4243 Register reg = ToRegister(input);
4245 SmallMapList* map_set = instr->hydrogen()->map_set();
4246 for (int i = 0; i < map_set->length() - 1; i++) {
4247 Handle<Map> map = map_set->at(i);
4248 __ CompareMapAndBranch(
4249 reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP);
4251 Handle<Map> map = map_set->last();
4252 DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
4257 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4258 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4259 Register result_reg = ToRegister(instr->result());
4260 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4261 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4265 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4266 Register unclamped_reg = ToRegister(instr->unclamped());
4267 Register result_reg = ToRegister(instr->result());
4268 __ ClampUint8(result_reg, unclamped_reg);
4272 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4273 Register scratch = scratch0();
4274 Register input_reg = ToRegister(instr->unclamped());
4275 Register result_reg = ToRegister(instr->result());
4276 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4277 Label is_smi, done, heap_number;
4279 // Both smi and heap number cases are handled.
4280 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
4282 // Check for heap number
4283 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4284 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
4286 // Check for undefined. Undefined is converted to zero for clamping
4288 DeoptimizeIf(ne, instr->environment(), input_reg,
4289 Operand(factory()->undefined_value()));
4290 __ mov(result_reg, zero_reg);
4294 __ bind(&heap_number);
4295 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
4296 HeapNumber::kValueOffset));
4297 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4301 __ ClampUint8(result_reg, scratch);
4307 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4308 Register temp1 = ToRegister(instr->TempAt(0));
4309 Register temp2 = ToRegister(instr->TempAt(1));
4311 Handle<JSObject> holder = instr->holder();
4312 Handle<JSObject> current_prototype = instr->prototype();
4314 // Load prototype object.
4315 __ LoadHeapObject(temp1, current_prototype);
4317 // Check prototype maps up to the holder.
4318 while (!current_prototype.is_identical_to(holder)) {
4319 DoCheckMapCommon(temp1, temp2,
4320 Handle<Map>(current_prototype->map()),
4321 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4323 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4324 // Load next prototype object.
4325 __ LoadHeapObject(temp1, current_prototype);
4328 // Check the holder map.
4329 DoCheckMapCommon(temp1, temp2,
4330 Handle<Map>(current_prototype->map()),
4331 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4335 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4336 class DeferredAllocateObject: public LDeferredCode {
4338 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4339 : LDeferredCode(codegen), instr_(instr) { }
4340 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4341 virtual LInstruction* instr() { return instr_; }
4343 LAllocateObject* instr_;
4346 DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
4348 Register result = ToRegister(instr->result());
4349 Register scratch = ToRegister(instr->TempAt(0));
4350 Register scratch2 = ToRegister(instr->TempAt(1));
4351 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4352 Handle<Map> initial_map(constructor->initial_map());
4353 int instance_size = initial_map->instance_size();
4354 ASSERT(initial_map->pre_allocated_property_fields() +
4355 initial_map->unused_property_fields() -
4356 initial_map->inobject_properties() == 0);
4358 // Allocate memory for the object. The initial map might change when
4359 // the constructor's prototype changes, but instance size and property
4360 // counts remain unchanged (if slack tracking finished).
4361 ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4362 __ AllocateInNewSpace(instance_size,
4369 __ bind(deferred->exit());
4370 if (FLAG_debug_code) {
4371 Label is_in_new_space;
4372 __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4373 __ Abort("Allocated object is not in new-space");
4374 __ bind(&is_in_new_space);
4377 // Load the initial map.
4378 Register map = scratch;
4379 __ LoadHeapObject(map, constructor);
4380 __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
4382 // Initialize map and fields of the newly allocated object.
4383 ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4384 __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
4385 __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4386 __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
4387 __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
4388 if (initial_map->inobject_properties() != 0) {
4389 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4390 for (int i = 0; i < initial_map->inobject_properties(); i++) {
4391 int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4392 __ sw(scratch, FieldMemOperand(result, property_offset));
4398 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4399 Register result = ToRegister(instr->result());
4400 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4401 Handle<Map> initial_map(constructor->initial_map());
4402 int instance_size = initial_map->instance_size();
4404 // TODO(3095996): Get rid of this. For now, we need to make the
4405 // result register contain a valid pointer because it is already
4406 // contained in the register pointer map.
4407 __ mov(result, zero_reg);
4409 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4410 __ li(a0, Operand(Smi::FromInt(instance_size)));
4412 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
4413 __ StoreToSafepointRegisterSlot(v0, result);
4417 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4418 Heap* heap = isolate()->heap();
4419 ElementsKind boilerplate_elements_kind =
4420 instr->hydrogen()->boilerplate_elements_kind();
4422 // Deopt if the array literal boilerplate ElementsKind is of a type different
4423 // than the expected one. The check isn't necessary if the boilerplate has
4424 // already been converted to FAST_ELEMENTS.
4425 if (boilerplate_elements_kind != FAST_ELEMENTS) {
4426 __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
4427 // Load map into a2.
4428 __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
4429 // Load the map's "bit field 2".
4430 __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
4431 // Retrieve elements_kind from bit field 2.
4432 __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
4434 instr->environment(),
4436 Operand(boilerplate_elements_kind));
4438 __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4439 __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
4440 __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4441 // Boilerplate already exists, constant elements are never accessed.
4442 // Pass an empty fixed array.
4443 __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
4444 __ Push(a3, a2, a1);
4446 // Pick the right runtime function or stub to call.
4447 int length = instr->hydrogen()->length();
4448 if (instr->hydrogen()->IsCopyOnWrite()) {
4449 ASSERT(instr->hydrogen()->depth() == 1);
4450 FastCloneShallowArrayStub::Mode mode =
4451 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4452 FastCloneShallowArrayStub stub(mode, length);
4453 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4454 } else if (instr->hydrogen()->depth() > 1) {
4455 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4456 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4457 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4459 FastCloneShallowArrayStub::Mode mode =
4460 boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4461 ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
4462 : FastCloneShallowArrayStub::CLONE_ELEMENTS;
4463 FastCloneShallowArrayStub stub(mode, length);
4464 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4469 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4473 ASSERT(!source.is(a2));
4474 ASSERT(!result.is(a2));
4476 // Only elements backing stores for non-COW arrays need to be copied.
4477 Handle<FixedArrayBase> elements(object->elements());
4478 bool has_elements = elements->length() > 0 &&
4479 elements->map() != isolate()->heap()->fixed_cow_array_map();
4481 // Increase the offset so that subsequent objects end up right after
4482 // this object and its backing store.
4483 int object_offset = *offset;
4484 int object_size = object->map()->instance_size();
4485 int elements_offset = *offset + object_size;
4486 int elements_size = has_elements ? elements->Size() : 0;
4487 *offset += object_size + elements_size;
4489 // Copy object header.
4490 ASSERT(object->properties()->length() == 0);
4491 int inobject_properties = object->map()->inobject_properties();
4492 int header_size = object_size - inobject_properties * kPointerSize;
4493 for (int i = 0; i < header_size; i += kPointerSize) {
4494 if (has_elements && i == JSObject::kElementsOffset) {
4495 __ Addu(a2, result, Operand(elements_offset));
4497 __ lw(a2, FieldMemOperand(source, i));
4499 __ sw(a2, FieldMemOperand(result, object_offset + i));
4502 // Copy in-object properties.
4503 for (int i = 0; i < inobject_properties; i++) {
4504 int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4505 Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4506 if (value->IsJSObject()) {
4507 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4508 __ Addu(a2, result, Operand(*offset));
4509 __ sw(a2, FieldMemOperand(result, total_offset));
4510 __ LoadHeapObject(source, value_object);
4511 EmitDeepCopy(value_object, result, source, offset);
4512 } else if (value->IsHeapObject()) {
4513 __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4514 __ sw(a2, FieldMemOperand(result, total_offset));
4516 __ li(a2, Operand(value));
4517 __ sw(a2, FieldMemOperand(result, total_offset));
4523 // Copy elements backing store header.
4524 __ LoadHeapObject(source, elements);
4525 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4526 __ lw(a2, FieldMemOperand(source, i));
4527 __ sw(a2, FieldMemOperand(result, elements_offset + i));
4530 // Copy elements backing store content.
4531 int elements_length = has_elements ? elements->length() : 0;
4532 if (elements->IsFixedDoubleArray()) {
4533 Handle<FixedDoubleArray> double_array =
4534 Handle<FixedDoubleArray>::cast(elements);
4535 for (int i = 0; i < elements_length; i++) {
4536 int64_t value = double_array->get_representation(i);
4537 // We only support little endian mode...
4538 int32_t value_low = value & 0xFFFFFFFF;
4539 int32_t value_high = value >> 32;
4541 elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4542 __ li(a2, Operand(value_low));
4543 __ sw(a2, FieldMemOperand(result, total_offset));
4544 __ li(a2, Operand(value_high));
4545 __ sw(a2, FieldMemOperand(result, total_offset + 4));
4547 } else if (elements->IsFixedArray()) {
4548 for (int i = 0; i < elements_length; i++) {
4549 int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4550 Handle<Object> value = JSObject::GetElement(object, i);
4551 if (value->IsJSObject()) {
4552 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4553 __ Addu(a2, result, Operand(*offset));
4554 __ sw(a2, FieldMemOperand(result, total_offset));
4555 __ LoadHeapObject(source, value_object);
4556 EmitDeepCopy(value_object, result, source, offset);
4557 } else if (value->IsHeapObject()) {
4558 __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4559 __ sw(a2, FieldMemOperand(result, total_offset));
4561 __ li(a2, Operand(value));
4562 __ sw(a2, FieldMemOperand(result, total_offset));
4572 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4573 int size = instr->hydrogen()->total_size();
4575 // Allocate all objects that are part of the literal in one big
4576 // allocation. This avoids multiple limit checks.
4577 Label allocated, runtime_allocate;
4578 __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4581 __ bind(&runtime_allocate);
4582 __ li(a0, Operand(Smi::FromInt(size)));
4584 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4586 __ bind(&allocated);
4588 __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4589 EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
4590 ASSERT_EQ(size, offset);
4594 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4595 ASSERT(ToRegister(instr->result()).is(v0));
4596 Handle<FixedArray> literals(instr->environment()->closure()->literals());
4597 Handle<FixedArray> constant_properties =
4598 instr->hydrogen()->constant_properties();
4600 // Set up the parameters to the stub/runtime call.
4601 __ LoadHeapObject(t0, literals);
4602 __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4603 __ li(a2, Operand(constant_properties));
4604 int flags = instr->hydrogen()->fast_elements()
4605 ? ObjectLiteral::kFastElements
4606 : ObjectLiteral::kNoFlags;
4607 __ li(a1, Operand(Smi::FromInt(flags)));
4608 __ Push(t0, a3, a2, a1);
4610 // Pick the right runtime function or stub to call.
4611 int properties_count = constant_properties->length() / 2;
4612 if (instr->hydrogen()->depth() > 1) {
4613 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4614 } else if (flags != ObjectLiteral::kFastElements ||
4615 properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
4616 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4618 FastCloneShallowObjectStub stub(properties_count);
4619 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4624 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4625 ASSERT(ToRegister(instr->InputAt(0)).is(a0));
4626 ASSERT(ToRegister(instr->result()).is(v0));
4628 CallRuntime(Runtime::kToFastProperties, 1, instr);
4632 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4634 // Registers will be used as follows:
4635 // a3 = JS function.
4636 // t3 = literals array.
4637 // a1 = regexp literal.
4638 // a0 = regexp literal clone.
4639 // a2 and t0-t2 are used as temporaries.
4640 __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4641 __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
4642 int literal_offset = FixedArray::kHeaderSize +
4643 instr->hydrogen()->literal_index() * kPointerSize;
4644 __ lw(a1, FieldMemOperand(t3, literal_offset));
4645 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4646 __ Branch(&materialized, ne, a1, Operand(at));
4648 // Create regexp literal using runtime function
4649 // Result will be in v0.
4650 __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4651 __ li(t1, Operand(instr->hydrogen()->pattern()));
4652 __ li(t0, Operand(instr->hydrogen()->flags()));
4653 __ Push(t3, t2, t1, t0);
4654 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4657 __ bind(&materialized);
4658 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4659 Label allocated, runtime_allocate;
4661 __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4664 __ bind(&runtime_allocate);
4665 __ li(a0, Operand(Smi::FromInt(size)));
4667 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4670 __ bind(&allocated);
4671 // Copy the content into the newly allocated memory.
4672 // (Unroll copy loop once for better throughput).
4673 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4674 __ lw(a3, FieldMemOperand(a1, i));
4675 __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
4676 __ sw(a3, FieldMemOperand(v0, i));
4677 __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
4679 if ((size % (2 * kPointerSize)) != 0) {
4680 __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
4681 __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
4686 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4687 // Use the fast case closure allocation code that allocates in new
4688 // space for nested functions that don't need literals cloning.
4689 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4690 bool pretenure = instr->hydrogen()->pretenure();
4691 if (!pretenure && shared_info->num_literals() == 0) {
4692 FastNewClosureStub stub(shared_info->language_mode());
4693 __ li(a1, Operand(shared_info));
4695 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4697 __ li(a2, Operand(shared_info));
4698 __ li(a1, Operand(pretenure
4699 ? factory()->true_value()
4700 : factory()->false_value()));
4701 __ Push(cp, a2, a1);
4702 CallRuntime(Runtime::kNewClosure, 3, instr);
4707 void LCodeGen::DoTypeof(LTypeof* instr) {
4708 ASSERT(ToRegister(instr->result()).is(v0));
4709 Register input = ToRegister(instr->InputAt(0));
4711 CallRuntime(Runtime::kTypeof, 1, instr);
4715 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4716 Register input = ToRegister(instr->InputAt(0));
4717 int true_block = chunk_->LookupDestination(instr->true_block_id());
4718 int false_block = chunk_->LookupDestination(instr->false_block_id());
4719 Label* true_label = chunk_->GetAssemblyLabel(true_block);
4720 Label* false_label = chunk_->GetAssemblyLabel(false_block);
4722 Register cmp1 = no_reg;
4723 Operand cmp2 = Operand(no_reg);
4725 Condition final_branch_condition = EmitTypeofIs(true_label,
4728 instr->type_literal(),
4732 ASSERT(cmp1.is_valid());
4733 ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
4735 if (final_branch_condition != kNoCondition) {
4736 EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
4741 Condition LCodeGen::EmitTypeofIs(Label* true_label,
4744 Handle<String> type_name,
4747 // This function utilizes the delay slot heavily. This is used to load
4748 // values that are always usable without depending on the type of the input
4750 Condition final_branch_condition = kNoCondition;
4751 Register scratch = scratch0();
4752 if (type_name->Equals(heap()->number_symbol())) {
4753 __ JumpIfSmi(input, true_label);
4754 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4755 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4758 final_branch_condition = eq;
4760 } else if (type_name->Equals(heap()->string_symbol())) {
4761 __ JumpIfSmi(input, false_label);
4762 __ GetObjectType(input, input, scratch);
4763 __ Branch(USE_DELAY_SLOT, false_label,
4764 ge, scratch, Operand(FIRST_NONSTRING_TYPE));
4765 // input is an object so we can load the BitFieldOffset even if we take the
4767 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4768 __ And(at, at, 1 << Map::kIsUndetectable);
4770 cmp2 = Operand(zero_reg);
4771 final_branch_condition = eq;
4773 } else if (type_name->Equals(heap()->boolean_symbol())) {
4774 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4775 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4776 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4778 cmp2 = Operand(input);
4779 final_branch_condition = eq;
4781 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
4782 __ LoadRoot(at, Heap::kNullValueRootIndex);
4784 cmp2 = Operand(input);
4785 final_branch_condition = eq;
4787 } else if (type_name->Equals(heap()->undefined_symbol())) {
4788 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4789 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4790 // The first instruction of JumpIfSmi is an And - it is safe in the delay
4792 __ JumpIfSmi(input, false_label);
4793 // Check for undetectable objects => true.
4794 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4795 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4796 __ And(at, at, 1 << Map::kIsUndetectable);
4798 cmp2 = Operand(zero_reg);
4799 final_branch_condition = ne;
4801 } else if (type_name->Equals(heap()->function_symbol())) {
4802 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
4803 __ JumpIfSmi(input, false_label);
4804 __ GetObjectType(input, scratch, input);
4805 __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
4807 cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
4808 final_branch_condition = eq;
4810 } else if (type_name->Equals(heap()->object_symbol())) {
4811 __ JumpIfSmi(input, false_label);
4812 if (!FLAG_harmony_typeof) {
4813 __ LoadRoot(at, Heap::kNullValueRootIndex);
4814 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4816 // input is an object, it is safe to use GetObjectType in the delay slot.
4817 __ GetObjectType(input, input, scratch);
4818 __ Branch(USE_DELAY_SLOT, false_label,
4819 lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4820 // Still an object, so the InstanceType can be loaded.
4821 __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
4822 __ Branch(USE_DELAY_SLOT, false_label,
4823 gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4824 // Still an object, so the BitField can be loaded.
4825 // Check for undetectable objects => false.
4826 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4827 __ And(at, at, 1 << Map::kIsUndetectable);
4829 cmp2 = Operand(zero_reg);
4830 final_branch_condition = eq;
4834 cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
4835 __ Branch(false_label);
4838 return final_branch_condition;
4842 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4843 Register temp1 = ToRegister(instr->TempAt(0));
4844 int true_block = chunk_->LookupDestination(instr->true_block_id());
4845 int false_block = chunk_->LookupDestination(instr->false_block_id());
4847 EmitIsConstructCall(temp1, scratch0());
4849 EmitBranch(true_block, false_block, eq, temp1,
4850 Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4854 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
4855 ASSERT(!temp1.is(temp2));
4856 // Get the frame pointer for the calling frame.
4857 __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4859 // Skip the arguments adaptor frame if it exists.
4860 Label check_frame_marker;
4861 __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
4862 __ Branch(&check_frame_marker, ne, temp2,
4863 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4864 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
4866 // Check the marker in the calling frame.
4867 __ bind(&check_frame_marker);
4868 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
4872 void LCodeGen::EnsureSpaceForLazyDeopt() {
4873 // Ensure that we have enough space after the previous lazy-bailout
4874 // instruction for patching the code here.
4875 int current_pc = masm()->pc_offset();
4876 int patch_size = Deoptimizer::patch_size();
4877 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
4878 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
4879 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
4880 while (padding_size > 0) {
4882 padding_size -= Assembler::kInstrSize;
4885 last_lazy_deopt_pc_ = masm()->pc_offset();
4889 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4890 EnsureSpaceForLazyDeopt();
4891 ASSERT(instr->HasEnvironment());
4892 LEnvironment* env = instr->environment();
4893 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4894 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4898 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4899 DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
4903 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4904 Register object = ToRegister(instr->object());
4905 Register key = ToRegister(instr->key());
4906 Register strict = scratch0();
4907 __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
4908 __ Push(object, key, strict);
4909 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4910 LPointerMap* pointers = instr->pointer_map();
4911 RecordPosition(pointers->position());
4912 SafepointGenerator safepoint_generator(
4913 this, pointers, Safepoint::kLazyDeopt);
4914 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
4918 void LCodeGen::DoIn(LIn* instr) {
4919 Register obj = ToRegister(instr->object());
4920 Register key = ToRegister(instr->key());
4922 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
4923 LPointerMap* pointers = instr->pointer_map();
4924 RecordPosition(pointers->position());
4925 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
4926 __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
4930 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4931 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4932 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4933 RecordSafepointWithLazyDeopt(
4934 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4935 ASSERT(instr->HasEnvironment());
4936 LEnvironment* env = instr->environment();
4937 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4941 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4942 class DeferredStackCheck: public LDeferredCode {
4944 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4945 : LDeferredCode(codegen), instr_(instr) { }
4946 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4947 virtual LInstruction* instr() { return instr_; }
4949 LStackCheck* instr_;
4952 ASSERT(instr->HasEnvironment());
4953 LEnvironment* env = instr->environment();
4954 // There is no LLazyBailout instruction for stack-checks. We have to
4955 // prepare for lazy deoptimization explicitly here.
4956 if (instr->hydrogen()->is_function_entry()) {
4957 // Perform stack overflow check.
4959 __ LoadRoot(at, Heap::kStackLimitRootIndex);
4960 __ Branch(&done, hs, sp, Operand(at));
4961 StackCheckStub stub;
4962 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4963 EnsureSpaceForLazyDeopt();
4965 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4966 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4968 ASSERT(instr->hydrogen()->is_backwards_branch());
4969 // Perform stack overflow check if this goto needs it before jumping.
4970 DeferredStackCheck* deferred_stack_check =
4971 new DeferredStackCheck(this, instr);
4972 __ LoadRoot(at, Heap::kStackLimitRootIndex);
4973 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
4974 EnsureSpaceForLazyDeopt();
4975 __ bind(instr->done_label());
4976 deferred_stack_check->SetExit(instr->done_label());
4977 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4978 // Don't record a deoptimization index for the safepoint here.
4979 // This will be done explicitly when emitting call and the safepoint in
4980 // the deferred code.
4985 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
4986 // This is a pseudo-instruction that ensures that the environment here is
4987 // properly registered for deoptimization and records the assembler's PC
4989 LEnvironment* environment = instr->environment();
4990 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
4991 instr->SpilledDoubleRegisterArray());
4993 // If the environment were already registered, we would have no way of
4994 // backpatching it with the spill slot operands.
4995 ASSERT(!environment->HasBeenRegistered());
4996 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
4997 ASSERT(osr_pc_offset_ == -1);
4998 osr_pc_offset_ = masm()->pc_offset();
5002 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5003 Register result = ToRegister(instr->result());
5004 Register object = ToRegister(instr->object());
5005 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5006 DeoptimizeIf(eq, instr->environment(), object, Operand(at));
5008 Register null_value = t1;
5009 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5010 DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
5012 __ And(at, object, kSmiTagMask);
5013 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5015 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5016 __ GetObjectType(object, a1, a1);
5017 DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
5019 Label use_cache, call_runtime;
5020 ASSERT(object.is(a0));
5021 __ CheckEnumCache(null_value, &call_runtime);
5023 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5024 __ Branch(&use_cache);
5026 // Get the set of properties to enumerate.
5027 __ bind(&call_runtime);
5029 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5031 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5032 ASSERT(result.is(v0));
5033 __ LoadRoot(at, Heap::kMetaMapRootIndex);
5034 DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
5035 __ bind(&use_cache);
5039 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5040 Register map = ToRegister(instr->map());
5041 Register result = ToRegister(instr->result());
5042 __ LoadInstanceDescriptors(map, result);
5044 FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
5046 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5047 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5051 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5052 Register object = ToRegister(instr->value());
5053 Register map = ToRegister(instr->map());
5054 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5055 DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
5059 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5060 Register object = ToRegister(instr->object());
5061 Register index = ToRegister(instr->index());
5062 Register result = ToRegister(instr->result());
5063 Register scratch = scratch0();
5065 Label out_of_object, done;
5066 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5067 __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
5069 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5070 __ Addu(scratch, object, scratch);
5071 __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5075 __ bind(&out_of_object);
5076 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5077 // Index is equal to negated out of object property index plus 1.
5078 __ Subu(scratch, result, scratch);
5079 __ lw(result, FieldMemOperand(scratch,
5080 FixedArray::kHeaderSize - kPointerSize));
5087 } } // namespace v8::internal