1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "mips/lithium-codegen-mips.h"
31 #include "mips/lithium-gap-resolver-mips.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
39 class SafepointGenerator : public CallWrapper {
41 SafepointGenerator(LCodeGen* codegen,
42 LPointerMap* pointers,
43 Safepoint::DeoptMode mode)
47 virtual ~SafepointGenerator() { }
49 virtual void BeforeCall(int call_size) const { }
51 virtual void AfterCall() const {
52 codegen_->RecordSafepoint(pointers_, deopt_mode_);
57 LPointerMap* pointers_;
58 Safepoint::DeoptMode deopt_mode_;
64 bool LCodeGen::GenerateCode() {
65 HPhase phase("Z_Code generation", chunk());
68 CpuFeatures::Scope scope(FPU);
70 CodeStub::GenerateFPStubs();
72 // Open a frame scope to indicate that there is a frame on the stack. The
73 // NONE indicates that the scope shouldn't actually generate code to set up
74 // the frame (that is done in GeneratePrologue).
75 FrameScope frame_scope(masm_, StackFrame::NONE);
77 return GeneratePrologue() &&
79 GenerateDeferredCode() &&
80 GenerateSafepointTable();
84 void LCodeGen::FinishCode(Handle<Code> code) {
86 code->set_stack_slots(GetStackSlotCount());
87 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
88 PopulateDeoptimizationData(code);
92 void LCodeGen::Abort(const char* format, ...) {
93 if (FLAG_trace_bailout) {
94 SmartArrayPointer<char> name(
95 info()->shared_info()->DebugName()->ToCString());
96 PrintF("Aborting LCodeGen in @\"%s\": ", *name);
98 va_start(arguments, format);
99 OS::VPrint(format, arguments);
107 void LCodeGen::Comment(const char* format, ...) {
108 if (!FLAG_code_comments) return;
110 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
112 va_start(arguments, format);
113 builder.AddFormattedList(format, arguments);
116 // Copy the string before recording it in the assembler to avoid
117 // issues when the stack allocated buffer goes out of scope.
118 size_t length = builder.position();
119 Vector<char> copy = Vector<char>::New(length + 1);
120 memcpy(copy.start(), builder.Finalize(), copy.length());
121 masm()->RecordComment(copy.start());
125 bool LCodeGen::GeneratePrologue() {
126 ASSERT(is_generating());
129 if (strlen(FLAG_stop_at) > 0 &&
130 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
135 // a1: Callee's JS function.
136 // cp: Callee's context.
137 // fp: Caller's frame pointer.
140 // Strict mode functions and builtins need to replace the receiver
141 // with undefined when called as functions (without an explicit
142 // receiver object). r5 is zero for method calls and non-zero for
144 if (!info_->is_classic_mode() || info_->is_native()) {
146 __ Branch(&ok, eq, t1, Operand(zero_reg));
148 int receiver_offset = scope()->num_parameters() * kPointerSize;
149 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
150 __ sw(a2, MemOperand(sp, receiver_offset));
154 __ Push(ra, fp, cp, a1);
155 __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
157 // Reserve space for the stack slots needed by the code.
158 int slots = GetStackSlotCount();
160 if (FLAG_debug_code) {
161 __ li(a0, Operand(slots));
162 __ li(a2, Operand(kSlotsZapValue));
167 __ Branch(&loop, ne, a0, Operand(zero_reg));
169 __ Subu(sp, sp, Operand(slots * kPointerSize));
173 // Possibly allocate a local context.
174 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
175 if (heap_slots > 0) {
176 Comment(";;; Allocate local context");
177 // Argument to NewContext is the function, which is in a1.
179 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
180 FastNewContextStub stub(heap_slots);
183 __ CallRuntime(Runtime::kNewFunctionContext, 1);
185 RecordSafepoint(Safepoint::kNoLazyDeopt);
186 // Context is returned in both v0 and cp. It replaces the context
187 // passed to us. It's saved in the stack and kept live in cp.
188 __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
189 // Copy any necessary parameters into the context.
190 int num_parameters = scope()->num_parameters();
191 for (int i = 0; i < num_parameters; i++) {
192 Variable* var = scope()->parameter(i);
193 if (var->IsContextSlot()) {
194 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
195 (num_parameters - 1 - i) * kPointerSize;
196 // Load parameter from stack.
197 __ lw(a0, MemOperand(fp, parameter_offset));
198 // Store it in the context.
199 MemOperand target = ContextOperand(cp, var->index());
201 // Update the write barrier. This clobbers a3 and a0.
202 __ RecordWriteContextSlot(
203 cp, target.offset(), a0, a3, kRAHasBeenSaved, kSaveFPRegs);
206 Comment(";;; End allocate local context");
211 __ CallRuntime(Runtime::kTraceEnter, 0);
213 EnsureSpaceForLazyDeopt();
214 return !is_aborted();
218 bool LCodeGen::GenerateBody() {
219 ASSERT(is_generating());
220 bool emit_instructions = true;
221 for (current_instruction_ = 0;
222 !is_aborted() && current_instruction_ < instructions_->length();
223 current_instruction_++) {
224 LInstruction* instr = instructions_->at(current_instruction_);
225 if (instr->IsLabel()) {
226 LLabel* label = LLabel::cast(instr);
227 emit_instructions = !label->HasReplacement();
230 if (emit_instructions) {
231 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
232 instr->CompileToNative(this);
235 return !is_aborted();
239 bool LCodeGen::GenerateDeferredCode() {
240 ASSERT(is_generating());
241 if (deferred_.length() > 0) {
242 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
243 LDeferredCode* code = deferred_[i];
244 __ bind(code->entry());
245 Comment(";;; Deferred code @%d: %s.",
246 code->instruction_index(),
247 code->instr()->Mnemonic());
249 __ jmp(code->exit());
252 // Deferred code is the last part of the instruction sequence. Mark
253 // the generated code as done unless we bailed out.
254 if (!is_aborted()) status_ = DONE;
255 return !is_aborted();
259 bool LCodeGen::GenerateDeoptJumpTable() {
260 // TODO(plind): not clear that this will have advantage for MIPS.
261 // Skipping it for now. Raised issue #100 for this.
262 Abort("Unimplemented: %s", "GenerateDeoptJumpTable");
267 bool LCodeGen::GenerateSafepointTable() {
269 safepoints_.Emit(masm(), GetStackSlotCount());
270 return !is_aborted();
274 Register LCodeGen::ToRegister(int index) const {
275 return Register::FromAllocationIndex(index);
279 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
280 return DoubleRegister::FromAllocationIndex(index);
284 Register LCodeGen::ToRegister(LOperand* op) const {
285 ASSERT(op->IsRegister());
286 return ToRegister(op->index());
290 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
291 if (op->IsRegister()) {
292 return ToRegister(op->index());
293 } else if (op->IsConstantOperand()) {
294 LConstantOperand* const_op = LConstantOperand::cast(op);
295 Handle<Object> literal = chunk_->LookupLiteral(const_op);
296 Representation r = chunk_->LookupLiteralRepresentation(const_op);
297 if (r.IsInteger32()) {
298 ASSERT(literal->IsNumber());
299 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
300 } else if (r.IsDouble()) {
301 Abort("EmitLoadRegister: Unsupported double immediate.");
303 ASSERT(r.IsTagged());
304 if (literal->IsSmi()) {
305 __ li(scratch, Operand(literal));
307 __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
311 } else if (op->IsStackSlot() || op->IsArgument()) {
312 __ lw(scratch, ToMemOperand(op));
320 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
321 ASSERT(op->IsDoubleRegister());
322 return ToDoubleRegister(op->index());
326 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
327 FloatRegister flt_scratch,
328 DoubleRegister dbl_scratch) {
329 if (op->IsDoubleRegister()) {
330 return ToDoubleRegister(op->index());
331 } else if (op->IsConstantOperand()) {
332 LConstantOperand* const_op = LConstantOperand::cast(op);
333 Handle<Object> literal = chunk_->LookupLiteral(const_op);
334 Representation r = chunk_->LookupLiteralRepresentation(const_op);
335 if (r.IsInteger32()) {
336 ASSERT(literal->IsNumber());
337 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
338 __ mtc1(at, flt_scratch);
339 __ cvt_d_w(dbl_scratch, flt_scratch);
341 } else if (r.IsDouble()) {
342 Abort("unsupported double immediate");
343 } else if (r.IsTagged()) {
344 Abort("unsupported tagged immediate");
346 } else if (op->IsStackSlot() || op->IsArgument()) {
347 MemOperand mem_op = ToMemOperand(op);
348 __ ldc1(dbl_scratch, mem_op);
356 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
357 Handle<Object> literal = chunk_->LookupLiteral(op);
358 ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
363 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
364 return chunk_->LookupLiteralRepresentation(op).IsInteger32();
368 int LCodeGen::ToInteger32(LConstantOperand* op) const {
369 Handle<Object> value = chunk_->LookupLiteral(op);
370 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
371 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
373 return static_cast<int32_t>(value->Number());
377 double LCodeGen::ToDouble(LConstantOperand* op) const {
378 Handle<Object> value = chunk_->LookupLiteral(op);
379 return value->Number();
383 Operand LCodeGen::ToOperand(LOperand* op) {
384 if (op->IsConstantOperand()) {
385 LConstantOperand* const_op = LConstantOperand::cast(op);
386 Handle<Object> literal = chunk_->LookupLiteral(const_op);
387 Representation r = chunk_->LookupLiteralRepresentation(const_op);
388 if (r.IsInteger32()) {
389 ASSERT(literal->IsNumber());
390 return Operand(static_cast<int32_t>(literal->Number()));
391 } else if (r.IsDouble()) {
392 Abort("ToOperand Unsupported double immediate.");
394 ASSERT(r.IsTagged());
395 return Operand(literal);
396 } else if (op->IsRegister()) {
397 return Operand(ToRegister(op));
398 } else if (op->IsDoubleRegister()) {
399 Abort("ToOperand IsDoubleRegister unimplemented");
402 // Stack slots not implemented, use ToMemOperand instead.
408 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
409 ASSERT(!op->IsRegister());
410 ASSERT(!op->IsDoubleRegister());
411 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
412 int index = op->index();
414 // Local or spill slot. Skip the frame pointer, function, and
415 // context in the fixed part of the frame.
416 return MemOperand(fp, -(index + 3) * kPointerSize);
418 // Incoming parameter. Skip the return address.
419 return MemOperand(fp, -(index - 1) * kPointerSize);
424 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
425 ASSERT(op->IsDoubleStackSlot());
426 int index = op->index();
428 // Local or spill slot. Skip the frame pointer, function, context,
429 // and the first word of the double in the fixed part of the frame.
430 return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
432 // Incoming parameter. Skip the return address and the first word of
434 return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
439 void LCodeGen::WriteTranslation(LEnvironment* environment,
440 Translation* translation) {
441 if (environment == NULL) return;
443 // The translation includes one command per value in the environment.
444 int translation_size = environment->values()->length();
445 // The output frame height does not include the parameters.
446 int height = translation_size - environment->parameter_count();
448 WriteTranslation(environment->outer(), translation);
449 int closure_id = DefineDeoptimizationLiteral(environment->closure());
450 switch (environment->frame_type()) {
452 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
455 translation->BeginConstructStubFrame(closure_id, translation_size);
457 case ARGUMENTS_ADAPTOR:
458 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
463 for (int i = 0; i < translation_size; ++i) {
464 LOperand* value = environment->values()->at(i);
465 // spilled_registers_ and spilled_double_registers_ are either
466 // both NULL or both set.
467 if (environment->spilled_registers() != NULL && value != NULL) {
468 if (value->IsRegister() &&
469 environment->spilled_registers()[value->index()] != NULL) {
470 translation->MarkDuplicate();
471 AddToTranslation(translation,
472 environment->spilled_registers()[value->index()],
473 environment->HasTaggedValueAt(i));
475 value->IsDoubleRegister() &&
476 environment->spilled_double_registers()[value->index()] != NULL) {
477 translation->MarkDuplicate();
480 environment->spilled_double_registers()[value->index()],
485 AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
490 void LCodeGen::AddToTranslation(Translation* translation,
494 // TODO(twuerthinger): Introduce marker operands to indicate that this value
495 // is not present and must be reconstructed from the deoptimizer. Currently
496 // this is only used for the arguments object.
497 translation->StoreArgumentsObject();
498 } else if (op->IsStackSlot()) {
500 translation->StoreStackSlot(op->index());
502 translation->StoreInt32StackSlot(op->index());
504 } else if (op->IsDoubleStackSlot()) {
505 translation->StoreDoubleStackSlot(op->index());
506 } else if (op->IsArgument()) {
508 int src_index = GetStackSlotCount() + op->index();
509 translation->StoreStackSlot(src_index);
510 } else if (op->IsRegister()) {
511 Register reg = ToRegister(op);
513 translation->StoreRegister(reg);
515 translation->StoreInt32Register(reg);
517 } else if (op->IsDoubleRegister()) {
518 DoubleRegister reg = ToDoubleRegister(op);
519 translation->StoreDoubleRegister(reg);
520 } else if (op->IsConstantOperand()) {
521 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
522 int src_index = DefineDeoptimizationLiteral(literal);
523 translation->StoreLiteral(src_index);
530 void LCodeGen::CallCode(Handle<Code> code,
531 RelocInfo::Mode mode,
532 LInstruction* instr) {
533 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
537 void LCodeGen::CallCodeGeneric(Handle<Code> code,
538 RelocInfo::Mode mode,
540 SafepointMode safepoint_mode) {
541 ASSERT(instr != NULL);
542 LPointerMap* pointers = instr->pointer_map();
543 RecordPosition(pointers->position());
545 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
549 void LCodeGen::CallRuntime(const Runtime::Function* function,
551 LInstruction* instr) {
552 ASSERT(instr != NULL);
553 LPointerMap* pointers = instr->pointer_map();
554 ASSERT(pointers != NULL);
555 RecordPosition(pointers->position());
557 __ CallRuntime(function, num_arguments);
558 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
562 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
564 LInstruction* instr) {
565 __ CallRuntimeSaveDoubles(id);
566 RecordSafepointWithRegisters(
567 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
571 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
572 Safepoint::DeoptMode mode) {
573 if (!environment->HasBeenRegistered()) {
574 // Physical stack frame layout:
575 // -x ............. -4 0 ..................................... y
576 // [incoming arguments] [spill slots] [pushed outgoing arguments]
578 // Layout of the environment:
579 // 0 ..................................................... size-1
580 // [parameters] [locals] [expression stack including arguments]
582 // Layout of the translation:
583 // 0 ........................................................ size - 1 + 4
584 // [expression stack including arguments] [locals] [4 words] [parameters]
585 // |>------------ translation_size ------------<|
588 int jsframe_count = 0;
589 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
591 if (e->frame_type() == JS_FUNCTION) {
595 Translation translation(&translations_, frame_count, jsframe_count);
596 WriteTranslation(environment, &translation);
597 int deoptimization_index = deoptimizations_.length();
598 int pc_offset = masm()->pc_offset();
599 environment->Register(deoptimization_index,
601 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
602 deoptimizations_.Add(environment);
607 void LCodeGen::DeoptimizeIf(Condition cc,
608 LEnvironment* environment,
610 const Operand& src2) {
611 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
612 ASSERT(environment->HasBeenRegistered());
613 int id = environment->deoptimization_index();
614 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
616 Abort("bailout was not prepared");
620 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
622 if (FLAG_deopt_every_n_times == 1 &&
623 info_->shared_info()->opt_count() == id) {
624 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
628 if (FLAG_trap_on_deopt) {
631 __ Branch(&skip, NegateCondition(cc), src1, src2);
633 __ stop("trap_on_deopt");
637 // TODO(plind): The Arm port is a little different here, due to their
638 // DeOpt jump table, which is not used for Mips yet.
639 __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
643 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
644 int length = deoptimizations_.length();
645 if (length == 0) return;
646 Handle<DeoptimizationInputData> data =
647 factory()->NewDeoptimizationInputData(length, TENURED);
649 Handle<ByteArray> translations = translations_.CreateByteArray();
650 data->SetTranslationByteArray(*translations);
651 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
653 Handle<FixedArray> literals =
654 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
655 for (int i = 0; i < deoptimization_literals_.length(); i++) {
656 literals->set(i, *deoptimization_literals_[i]);
658 data->SetLiteralArray(*literals);
660 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
661 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
663 // Populate the deoptimization entries.
664 for (int i = 0; i < length; i++) {
665 LEnvironment* env = deoptimizations_[i];
666 data->SetAstId(i, Smi::FromInt(env->ast_id()));
667 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
668 data->SetArgumentsStackHeight(i,
669 Smi::FromInt(env->arguments_stack_height()));
670 data->SetPc(i, Smi::FromInt(env->pc_offset()));
672 code->set_deoptimization_data(*data);
676 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
677 int result = deoptimization_literals_.length();
678 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
679 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
681 deoptimization_literals_.Add(literal);
686 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
687 ASSERT(deoptimization_literals_.length() == 0);
689 const ZoneList<Handle<JSFunction> >* inlined_closures =
690 chunk()->inlined_closures();
692 for (int i = 0, length = inlined_closures->length();
695 DefineDeoptimizationLiteral(inlined_closures->at(i));
698 inlined_function_count_ = deoptimization_literals_.length();
702 void LCodeGen::RecordSafepointWithLazyDeopt(
703 LInstruction* instr, SafepointMode safepoint_mode) {
704 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
705 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
707 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
708 RecordSafepointWithRegisters(
709 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
714 void LCodeGen::RecordSafepoint(
715 LPointerMap* pointers,
716 Safepoint::Kind kind,
718 Safepoint::DeoptMode deopt_mode) {
719 ASSERT(expected_safepoint_kind_ == kind);
721 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
722 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
723 kind, arguments, deopt_mode);
724 for (int i = 0; i < operands->length(); i++) {
725 LOperand* pointer = operands->at(i);
726 if (pointer->IsStackSlot()) {
727 safepoint.DefinePointerSlot(pointer->index());
728 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
729 safepoint.DefinePointerRegister(ToRegister(pointer));
732 if (kind & Safepoint::kWithRegisters) {
733 // Register cp always contains a pointer to the context.
734 safepoint.DefinePointerRegister(cp);
739 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
740 Safepoint::DeoptMode deopt_mode) {
741 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
745 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
746 LPointerMap empty_pointers(RelocInfo::kNoPosition);
747 RecordSafepoint(&empty_pointers, deopt_mode);
751 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
753 Safepoint::DeoptMode deopt_mode) {
755 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
759 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
760 LPointerMap* pointers,
762 Safepoint::DeoptMode deopt_mode) {
764 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
768 void LCodeGen::RecordPosition(int position) {
769 if (position == RelocInfo::kNoPosition) return;
770 masm()->positions_recorder()->RecordPosition(position);
774 void LCodeGen::DoLabel(LLabel* label) {
775 if (label->is_loop_header()) {
776 Comment(";;; B%d - LOOP entry", label->block_id());
778 Comment(";;; B%d", label->block_id());
780 __ bind(label->label());
781 current_block_ = label->block_id();
786 void LCodeGen::DoParallelMove(LParallelMove* move) {
787 resolver_.Resolve(move);
791 void LCodeGen::DoGap(LGap* gap) {
792 for (int i = LGap::FIRST_INNER_POSITION;
793 i <= LGap::LAST_INNER_POSITION;
795 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
796 LParallelMove* move = gap->GetParallelMove(inner_pos);
797 if (move != NULL) DoParallelMove(move);
802 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
807 void LCodeGen::DoParameter(LParameter* instr) {
812 void LCodeGen::DoCallStub(LCallStub* instr) {
813 ASSERT(ToRegister(instr->result()).is(v0));
814 switch (instr->hydrogen()->major_key()) {
815 case CodeStub::RegExpConstructResult: {
816 RegExpConstructResultStub stub;
817 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
820 case CodeStub::RegExpExec: {
822 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
825 case CodeStub::SubString: {
827 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
830 case CodeStub::NumberToString: {
831 NumberToStringStub stub;
832 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
835 case CodeStub::StringAdd: {
836 StringAddStub stub(NO_STRING_ADD_FLAGS);
837 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
840 case CodeStub::StringCompare: {
841 StringCompareStub stub;
842 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
845 case CodeStub::TranscendentalCache: {
846 __ lw(a0, MemOperand(sp, 0));
847 TranscendentalCacheStub stub(instr->transcendental_type(),
848 TranscendentalCacheStub::TAGGED);
849 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
858 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
863 void LCodeGen::DoModI(LModI* instr) {
864 Register scratch = scratch0();
865 const Register left = ToRegister(instr->InputAt(0));
866 const Register result = ToRegister(instr->result());
870 if (instr->hydrogen()->HasPowerOf2Divisor()) {
871 Register scratch = scratch0();
872 ASSERT(!left.is(scratch));
873 __ mov(scratch, left);
874 int32_t p2constant = HConstant::cast(
875 instr->hydrogen()->right())->Integer32Value();
876 ASSERT(p2constant != 0);
877 // Result always takes the sign of the dividend (left).
878 p2constant = abs(p2constant);
880 Label positive_dividend;
881 __ Branch(USE_DELAY_SLOT, &positive_dividend, ge, left, Operand(zero_reg));
882 __ subu(result, zero_reg, left);
883 __ And(result, result, p2constant - 1);
884 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
885 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
887 __ Branch(USE_DELAY_SLOT, &done);
888 __ subu(result, zero_reg, result);
889 __ bind(&positive_dividend);
890 __ And(result, scratch, p2constant - 1);
892 // div runs in the background while we check for special cases.
893 Register right = EmitLoadRegister(instr->InputAt(1), scratch);
897 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
898 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
901 __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg));
904 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
905 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
912 void LCodeGen::DoDivI(LDivI* instr) {
913 const Register left = ToRegister(instr->InputAt(0));
914 const Register right = ToRegister(instr->InputAt(1));
915 const Register result = ToRegister(instr->result());
917 // On MIPS div is asynchronous - it will run in the background while we
918 // check for special cases.
922 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
923 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
926 // Check for (0 / -x) that will produce negative zero.
927 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
929 __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
930 DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
931 __ bind(&left_not_zero);
934 // Check for (-kMinInt / -1).
935 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
936 Label left_not_min_int;
937 __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
938 DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
939 __ bind(&left_not_min_int);
943 DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
948 void LCodeGen::DoMulI(LMulI* instr) {
949 Register scratch = scratch0();
950 Register result = ToRegister(instr->result());
951 // Note that result may alias left.
952 Register left = ToRegister(instr->InputAt(0));
953 LOperand* right_op = instr->InputAt(1);
955 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
956 bool bailout_on_minus_zero =
957 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
959 if (right_op->IsConstantOperand() && !can_overflow) {
960 // Use optimized code for specific constants.
961 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
963 if (bailout_on_minus_zero && (constant < 0)) {
964 // The case of a null constant will be handled separately.
965 // If constant is negative and left is null, the result should be -0.
966 DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
971 __ Subu(result, zero_reg, left);
974 if (bailout_on_minus_zero) {
975 // If left is strictly negative and the constant is null, the
976 // result is -0. Deoptimize if required, otherwise return 0.
977 DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
979 __ mov(result, zero_reg);
983 __ Move(result, left);
986 // Multiplying by powers of two and powers of two plus or minus
987 // one can be done faster with shifted operands.
988 // For other constants we emit standard code.
989 int32_t mask = constant >> 31;
990 uint32_t constant_abs = (constant + mask) ^ mask;
992 if (IsPowerOf2(constant_abs) ||
993 IsPowerOf2(constant_abs - 1) ||
994 IsPowerOf2(constant_abs + 1)) {
995 if (IsPowerOf2(constant_abs)) {
996 int32_t shift = WhichPowerOf2(constant_abs);
997 __ sll(result, left, shift);
998 } else if (IsPowerOf2(constant_abs - 1)) {
999 int32_t shift = WhichPowerOf2(constant_abs - 1);
1000 __ sll(result, left, shift);
1001 __ Addu(result, result, left);
1002 } else if (IsPowerOf2(constant_abs + 1)) {
1003 int32_t shift = WhichPowerOf2(constant_abs + 1);
1004 __ sll(result, left, shift);
1005 __ Subu(result, result, left);
1008 // Correct the sign of the result is the constant is negative.
1010 __ Subu(result, zero_reg, result);
1014 // Generate standard code.
1015 __ li(at, constant);
1016 __ Mul(result, left, at);
1021 Register right = EmitLoadRegister(right_op, scratch);
1022 if (bailout_on_minus_zero) {
1023 __ Or(ToRegister(instr->TempAt(0)), left, right);
1027 // hi:lo = left * right.
1028 __ mult(left, right);
1031 __ sra(at, result, 31);
1032 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1034 __ Mul(result, left, right);
1037 if (bailout_on_minus_zero) {
1038 // Bail out if the result is supposed to be negative zero.
1040 __ Branch(&done, ne, result, Operand(zero_reg));
1042 instr->environment(),
1043 ToRegister(instr->TempAt(0)),
1051 void LCodeGen::DoBitI(LBitI* instr) {
1052 LOperand* left_op = instr->InputAt(0);
1053 LOperand* right_op = instr->InputAt(1);
1054 ASSERT(left_op->IsRegister());
1055 Register left = ToRegister(left_op);
1056 Register result = ToRegister(instr->result());
1057 Operand right(no_reg);
1059 if (right_op->IsStackSlot() || right_op->IsArgument()) {
1060 right = Operand(EmitLoadRegister(right_op, at));
1062 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1063 right = ToOperand(right_op);
1066 switch (instr->op()) {
1067 case Token::BIT_AND:
1068 __ And(result, left, right);
1071 __ Or(result, left, right);
1073 case Token::BIT_XOR:
1074 __ Xor(result, left, right);
1083 void LCodeGen::DoShiftI(LShiftI* instr) {
1084 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1085 // result may alias either of them.
1086 LOperand* right_op = instr->InputAt(1);
1087 Register left = ToRegister(instr->InputAt(0));
1088 Register result = ToRegister(instr->result());
1090 if (right_op->IsRegister()) {
1091 // No need to mask the right operand on MIPS, it is built into the variable
1092 // shift instructions.
1093 switch (instr->op()) {
1095 __ srav(result, left, ToRegister(right_op));
1098 __ srlv(result, left, ToRegister(right_op));
1099 if (instr->can_deopt()) {
1100 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1104 __ sllv(result, left, ToRegister(right_op));
1111 // Mask the right_op operand.
1112 int value = ToInteger32(LConstantOperand::cast(right_op));
1113 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1114 switch (instr->op()) {
1116 if (shift_count != 0) {
1117 __ sra(result, left, shift_count);
1119 __ Move(result, left);
1123 if (shift_count != 0) {
1124 __ srl(result, left, shift_count);
1126 if (instr->can_deopt()) {
1127 __ And(at, left, Operand(0x80000000));
1128 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1130 __ Move(result, left);
1134 if (shift_count != 0) {
1135 __ sll(result, left, shift_count);
1137 __ Move(result, left);
1148 void LCodeGen::DoSubI(LSubI* instr) {
1149 LOperand* left = instr->InputAt(0);
1150 LOperand* right = instr->InputAt(1);
1151 LOperand* result = instr->result();
1152 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1154 if (!can_overflow) {
1155 if (right->IsStackSlot() || right->IsArgument()) {
1156 Register right_reg = EmitLoadRegister(right, at);
1157 __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1159 ASSERT(right->IsRegister() || right->IsConstantOperand());
1160 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1162 } else { // can_overflow.
1163 Register overflow = scratch0();
1164 Register scratch = scratch1();
1165 if (right->IsStackSlot() ||
1166 right->IsArgument() ||
1167 right->IsConstantOperand()) {
1168 Register right_reg = EmitLoadRegister(right, scratch);
1169 __ SubuAndCheckForOverflow(ToRegister(result),
1172 overflow); // Reg at also used as scratch.
1174 ASSERT(right->IsRegister());
1175 // Due to overflow check macros not supporting constant operands,
1176 // handling the IsConstantOperand case was moved to prev if clause.
1177 __ SubuAndCheckForOverflow(ToRegister(result),
1180 overflow); // Reg at also used as scratch.
1182 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1187 void LCodeGen::DoConstantI(LConstantI* instr) {
1188 ASSERT(instr->result()->IsRegister());
1189 __ li(ToRegister(instr->result()), Operand(instr->value()));
1193 void LCodeGen::DoConstantD(LConstantD* instr) {
1194 ASSERT(instr->result()->IsDoubleRegister());
1195 DoubleRegister result = ToDoubleRegister(instr->result());
1196 double v = instr->value();
1201 void LCodeGen::DoConstantT(LConstantT* instr) {
1202 Handle<Object> value = instr->value();
1203 if (value->IsSmi()) {
1204 __ li(ToRegister(instr->result()), Operand(value));
1206 __ LoadHeapObject(ToRegister(instr->result()),
1207 Handle<HeapObject>::cast(value));
1212 void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
1213 Register result = ToRegister(instr->result());
1214 Register array = ToRegister(instr->InputAt(0));
1215 __ lw(result, FieldMemOperand(array, JSArray::kLengthOffset));
1219 void LCodeGen::DoFixedArrayBaseLength(LFixedArrayBaseLength* instr) {
1220 Register result = ToRegister(instr->result());
1221 Register array = ToRegister(instr->InputAt(0));
1222 __ lw(result, FieldMemOperand(array, FixedArrayBase::kLengthOffset));
1226 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1227 Register result = ToRegister(instr->result());
1228 Register input = ToRegister(instr->InputAt(0));
1230 // Load map into |result|.
1231 __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
1232 // Load the map's "bit field 2" into |result|. We only need the first byte,
1233 // but the following bit field extraction takes care of that anyway.
1234 __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
1235 // Retrieve elements_kind from bit field 2.
1236 __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1240 void LCodeGen::DoValueOf(LValueOf* instr) {
1241 Register input = ToRegister(instr->InputAt(0));
1242 Register result = ToRegister(instr->result());
1243 Register map = ToRegister(instr->TempAt(0));
1246 // If the object is a smi return the object.
1247 __ Move(result, input);
1248 __ JumpIfSmi(input, &done);
1250 // If the object is not a value type, return the object.
1251 __ GetObjectType(input, map, map);
1252 __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
1253 __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
1259 void LCodeGen::DoDateField(LDateField* instr) {
1260 Register object = ToRegister(instr->InputAt(0));
1261 Register result = ToRegister(instr->result());
1262 Register scratch = ToRegister(instr->TempAt(0));
1263 Smi* index = instr->index();
1264 Label runtime, done;
1265 ASSERT(object.is(a0));
1266 ASSERT(result.is(v0));
1267 ASSERT(!scratch.is(scratch0()));
1268 ASSERT(!scratch.is(object));
1271 __ AbortIfSmi(object);
1272 __ GetObjectType(object, scratch, scratch);
1273 __ Assert(eq, "Trying to get date field from non-date.",
1274 scratch, Operand(JS_DATE_TYPE));
1277 if (index->value() == 0) {
1278 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1280 if (index->value() < JSDate::kFirstUncachedField) {
1281 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1282 __ li(scratch, Operand(stamp));
1283 __ lw(scratch, MemOperand(scratch));
1284 __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1285 __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1286 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1287 kPointerSize * index->value()));
1291 __ PrepareCallCFunction(2, scratch);
1292 __ li(a1, Operand(index));
1293 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1299 void LCodeGen::DoBitNotI(LBitNotI* instr) {
1300 Register input = ToRegister(instr->InputAt(0));
1301 Register result = ToRegister(instr->result());
1302 __ Nor(result, zero_reg, Operand(input));
1306 void LCodeGen::DoThrow(LThrow* instr) {
1307 Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1309 CallRuntime(Runtime::kThrow, 1, instr);
1311 if (FLAG_debug_code) {
1312 __ stop("Unreachable code.");
1317 void LCodeGen::DoAddI(LAddI* instr) {
1318 LOperand* left = instr->InputAt(0);
1319 LOperand* right = instr->InputAt(1);
1320 LOperand* result = instr->result();
1321 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1323 if (!can_overflow) {
1324 if (right->IsStackSlot() || right->IsArgument()) {
1325 Register right_reg = EmitLoadRegister(right, at);
1326 __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1328 ASSERT(right->IsRegister() || right->IsConstantOperand());
1329 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1331 } else { // can_overflow.
1332 Register overflow = scratch0();
1333 Register scratch = scratch1();
1334 if (right->IsStackSlot() ||
1335 right->IsArgument() ||
1336 right->IsConstantOperand()) {
1337 Register right_reg = EmitLoadRegister(right, scratch);
1338 __ AdduAndCheckForOverflow(ToRegister(result),
1341 overflow); // Reg at also used as scratch.
1343 ASSERT(right->IsRegister());
1344 // Due to overflow check macros not supporting constant operands,
1345 // handling the IsConstantOperand case was moved to prev if clause.
1346 __ AdduAndCheckForOverflow(ToRegister(result),
1349 overflow); // Reg at also used as scratch.
1351 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1356 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1357 DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
1358 DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
1359 DoubleRegister result = ToDoubleRegister(instr->result());
1360 switch (instr->op()) {
1362 __ add_d(result, left, right);
1365 __ sub_d(result, left, right);
1368 __ mul_d(result, left, right);
1371 __ div_d(result, left, right);
1374 // Save a0-a3 on the stack.
1375 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1376 __ MultiPush(saved_regs);
1378 __ PrepareCallCFunction(0, 2, scratch0());
1379 __ SetCallCDoubleArguments(left, right);
1381 ExternalReference::double_fp_operation(Token::MOD, isolate()),
1383 // Move the result in the double result register.
1384 __ GetCFunctionDoubleResult(result);
1386 // Restore saved register.
1387 __ MultiPop(saved_regs);
1397 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1398 ASSERT(ToRegister(instr->InputAt(0)).is(a1));
1399 ASSERT(ToRegister(instr->InputAt(1)).is(a0));
1400 ASSERT(ToRegister(instr->result()).is(v0));
1402 BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1403 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1404 // Other arch use a nop here, to signal that there is no inlined
1405 // patchable code. Mips does not need the nop, since our marker
1406 // instruction (andi zero_reg) will never be used in normal code.
1410 int LCodeGen::GetNextEmittedBlock(int block) {
1411 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1412 LLabel* label = chunk_->GetLabel(i);
1413 if (!label->HasReplacement()) return i;
1419 void LCodeGen::EmitBranch(int left_block, int right_block,
1420 Condition cc, Register src1, const Operand& src2) {
1421 int next_block = GetNextEmittedBlock(current_block_);
1422 right_block = chunk_->LookupDestination(right_block);
1423 left_block = chunk_->LookupDestination(left_block);
1424 if (right_block == left_block) {
1425 EmitGoto(left_block);
1426 } else if (left_block == next_block) {
1427 __ Branch(chunk_->GetAssemblyLabel(right_block),
1428 NegateCondition(cc), src1, src2);
1429 } else if (right_block == next_block) {
1430 __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1432 __ Branch(chunk_->GetAssemblyLabel(left_block), cc, src1, src2);
1433 __ Branch(chunk_->GetAssemblyLabel(right_block));
1438 void LCodeGen::EmitBranchF(int left_block, int right_block,
1439 Condition cc, FPURegister src1, FPURegister src2) {
1440 int next_block = GetNextEmittedBlock(current_block_);
1441 right_block = chunk_->LookupDestination(right_block);
1442 left_block = chunk_->LookupDestination(left_block);
1443 if (right_block == left_block) {
1444 EmitGoto(left_block);
1445 } else if (left_block == next_block) {
1446 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1447 NegateCondition(cc), src1, src2);
1448 } else if (right_block == next_block) {
1449 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1451 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL, cc, src1, src2);
1452 __ Branch(chunk_->GetAssemblyLabel(right_block));
1457 void LCodeGen::DoBranch(LBranch* instr) {
1458 int true_block = chunk_->LookupDestination(instr->true_block_id());
1459 int false_block = chunk_->LookupDestination(instr->false_block_id());
1461 Representation r = instr->hydrogen()->value()->representation();
1462 if (r.IsInteger32()) {
1463 Register reg = ToRegister(instr->InputAt(0));
1464 EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1465 } else if (r.IsDouble()) {
1466 DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
1467 // Test the double value. Zero and NaN are false.
1468 EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero);
1470 ASSERT(r.IsTagged());
1471 Register reg = ToRegister(instr->InputAt(0));
1472 HType type = instr->hydrogen()->value()->type();
1473 if (type.IsBoolean()) {
1474 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1475 EmitBranch(true_block, false_block, eq, reg, Operand(at));
1476 } else if (type.IsSmi()) {
1477 EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
1479 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1480 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1482 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
1483 // Avoid deopts in the case where we've never executed this path before.
1484 if (expected.IsEmpty()) expected = ToBooleanStub::all_types();
1486 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
1487 // undefined -> false.
1488 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
1489 __ Branch(false_label, eq, reg, Operand(at));
1491 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
1492 // Boolean -> its value.
1493 __ LoadRoot(at, Heap::kTrueValueRootIndex);
1494 __ Branch(true_label, eq, reg, Operand(at));
1495 __ LoadRoot(at, Heap::kFalseValueRootIndex);
1496 __ Branch(false_label, eq, reg, Operand(at));
1498 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
1500 __ LoadRoot(at, Heap::kNullValueRootIndex);
1501 __ Branch(false_label, eq, reg, Operand(at));
1504 if (expected.Contains(ToBooleanStub::SMI)) {
1505 // Smis: 0 -> false, all other -> true.
1506 __ Branch(false_label, eq, reg, Operand(zero_reg));
1507 __ JumpIfSmi(reg, true_label);
1508 } else if (expected.NeedsMap()) {
1509 // If we need a map later and have a Smi -> deopt.
1510 __ And(at, reg, Operand(kSmiTagMask));
1511 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1514 const Register map = scratch0();
1515 if (expected.NeedsMap()) {
1516 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
1517 if (expected.CanBeUndetectable()) {
1518 // Undetectable -> false.
1519 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
1520 __ And(at, at, Operand(1 << Map::kIsUndetectable));
1521 __ Branch(false_label, ne, at, Operand(zero_reg));
1525 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
1526 // spec object -> true.
1527 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1528 __ Branch(true_label, ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
1531 if (expected.Contains(ToBooleanStub::STRING)) {
1532 // String value -> false iff empty.
1534 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
1535 __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
1536 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
1537 __ Branch(true_label, ne, at, Operand(zero_reg));
1538 __ Branch(false_label);
1539 __ bind(¬_string);
1542 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
1543 // heap number -> false iff +0, -0, or NaN.
1544 DoubleRegister dbl_scratch = double_scratch0();
1545 Label not_heap_number;
1546 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1547 __ Branch(¬_heap_number, ne, map, Operand(at));
1548 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
1549 __ BranchF(true_label, false_label, ne, dbl_scratch, kDoubleRegZero);
1550 // Falls through if dbl_scratch == 0.
1551 __ Branch(false_label);
1552 __ bind(¬_heap_number);
1555 // We've seen something for the first time -> deopt.
1556 DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
1562 void LCodeGen::EmitGoto(int block) {
1563 block = chunk_->LookupDestination(block);
1564 int next_block = GetNextEmittedBlock(current_block_);
1565 if (block != next_block) {
1566 __ jmp(chunk_->GetAssemblyLabel(block));
1571 void LCodeGen::DoGoto(LGoto* instr) {
1572 EmitGoto(instr->block_id());
1576 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1577 Condition cond = kNoCondition;
1580 case Token::EQ_STRICT:
1584 cond = is_unsigned ? lo : lt;
1587 cond = is_unsigned ? hi : gt;
1590 cond = is_unsigned ? ls : le;
1593 cond = is_unsigned ? hs : ge;
1596 case Token::INSTANCEOF:
1604 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1605 LOperand* left = instr->InputAt(0);
1606 LOperand* right = instr->InputAt(1);
1607 int false_block = chunk_->LookupDestination(instr->false_block_id());
1608 int true_block = chunk_->LookupDestination(instr->true_block_id());
1610 Condition cond = TokenToCondition(instr->op(), false);
1612 if (left->IsConstantOperand() && right->IsConstantOperand()) {
1613 // We can statically evaluate the comparison.
1614 double left_val = ToDouble(LConstantOperand::cast(left));
1615 double right_val = ToDouble(LConstantOperand::cast(right));
1617 EvalComparison(instr->op(), left_val, right_val) ? true_block
1619 EmitGoto(next_block);
1621 if (instr->is_double()) {
1622 // Compare left and right as doubles and load the
1623 // resulting flags into the normal status register.
1624 FPURegister left_reg = ToDoubleRegister(left);
1625 FPURegister right_reg = ToDoubleRegister(right);
1627 // If a NaN is involved, i.e. the result is unordered,
1628 // jump to false block label.
1629 __ BranchF(NULL, chunk_->GetAssemblyLabel(false_block), eq,
1630 left_reg, right_reg);
1632 EmitBranchF(true_block, false_block, cond, left_reg, right_reg);
1635 Operand cmp_right = Operand(0);
1637 if (right->IsConstantOperand()) {
1638 cmp_left = ToRegister(left);
1639 cmp_right = Operand(ToInteger32(LConstantOperand::cast(right)));
1640 } else if (left->IsConstantOperand()) {
1641 cmp_left = ToRegister(right);
1642 cmp_right = Operand(ToInteger32(LConstantOperand::cast(left)));
1643 // We transposed the operands. Reverse the condition.
1644 cond = ReverseCondition(cond);
1646 cmp_left = ToRegister(left);
1647 cmp_right = Operand(ToRegister(right));
1650 EmitBranch(true_block, false_block, cond, cmp_left, cmp_right);
1656 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
1657 Register left = ToRegister(instr->InputAt(0));
1658 Register right = ToRegister(instr->InputAt(1));
1659 int false_block = chunk_->LookupDestination(instr->false_block_id());
1660 int true_block = chunk_->LookupDestination(instr->true_block_id());
1662 EmitBranch(true_block, false_block, eq, left, Operand(right));
1666 void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
1667 Register left = ToRegister(instr->InputAt(0));
1668 int true_block = chunk_->LookupDestination(instr->true_block_id());
1669 int false_block = chunk_->LookupDestination(instr->false_block_id());
1671 EmitBranch(true_block, false_block, eq, left,
1672 Operand(instr->hydrogen()->right()));
1677 void LCodeGen::DoIsNilAndBranch(LIsNilAndBranch* instr) {
1678 Register scratch = scratch0();
1679 Register reg = ToRegister(instr->InputAt(0));
1680 int false_block = chunk_->LookupDestination(instr->false_block_id());
1682 // If the expression is known to be untagged or a smi, then it's definitely
1683 // not null, and it can't be a an undetectable object.
1684 if (instr->hydrogen()->representation().IsSpecialization() ||
1685 instr->hydrogen()->type().IsSmi()) {
1686 EmitGoto(false_block);
1690 int true_block = chunk_->LookupDestination(instr->true_block_id());
1692 Heap::RootListIndex nil_value = instr->nil() == kNullValue ?
1693 Heap::kNullValueRootIndex :
1694 Heap::kUndefinedValueRootIndex;
1695 __ LoadRoot(at, nil_value);
1696 if (instr->kind() == kStrictEquality) {
1697 EmitBranch(true_block, false_block, eq, reg, Operand(at));
1699 Heap::RootListIndex other_nil_value = instr->nil() == kNullValue ?
1700 Heap::kUndefinedValueRootIndex :
1701 Heap::kNullValueRootIndex;
1702 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1703 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1704 __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1705 __ LoadRoot(at, other_nil_value); // In the delay slot.
1706 __ Branch(USE_DELAY_SLOT, true_label, eq, reg, Operand(at));
1707 __ JumpIfSmi(reg, false_label); // In the delay slot.
1708 // Check for undetectable objects by looking in the bit field in
1709 // the map. The object has already been smi checked.
1710 __ lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
1711 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
1712 __ And(scratch, scratch, 1 << Map::kIsUndetectable);
1713 EmitBranch(true_block, false_block, ne, scratch, Operand(zero_reg));
1718 Condition LCodeGen::EmitIsObject(Register input,
1721 Label* is_not_object,
1723 __ JumpIfSmi(input, is_not_object);
1725 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
1726 __ Branch(is_object, eq, input, Operand(temp2));
1729 __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1730 // Undetectable objects behave like undefined.
1731 __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1732 __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
1733 __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
1735 // Load instance type and check that it is in object type range.
1736 __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
1737 __ Branch(is_not_object,
1738 lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1744 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1745 Register reg = ToRegister(instr->InputAt(0));
1746 Register temp1 = ToRegister(instr->TempAt(0));
1747 Register temp2 = scratch0();
1749 int true_block = chunk_->LookupDestination(instr->true_block_id());
1750 int false_block = chunk_->LookupDestination(instr->false_block_id());
1751 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1752 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1754 Condition true_cond =
1755 EmitIsObject(reg, temp1, temp2, false_label, true_label);
1757 EmitBranch(true_block, false_block, true_cond, temp2,
1758 Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1762 Condition LCodeGen::EmitIsString(Register input,
1764 Label* is_not_string) {
1765 __ JumpIfSmi(input, is_not_string);
1766 __ GetObjectType(input, temp1, temp1);
1772 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
1773 Register reg = ToRegister(instr->InputAt(0));
1774 Register temp1 = ToRegister(instr->TempAt(0));
1776 int true_block = chunk_->LookupDestination(instr->true_block_id());
1777 int false_block = chunk_->LookupDestination(instr->false_block_id());
1778 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1780 Condition true_cond =
1781 EmitIsString(reg, temp1, false_label);
1783 EmitBranch(true_block, false_block, true_cond, temp1,
1784 Operand(FIRST_NONSTRING_TYPE));
1788 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1789 int true_block = chunk_->LookupDestination(instr->true_block_id());
1790 int false_block = chunk_->LookupDestination(instr->false_block_id());
1792 Register input_reg = EmitLoadRegister(instr->InputAt(0), at);
1793 __ And(at, input_reg, kSmiTagMask);
1794 EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1798 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
1799 Register input = ToRegister(instr->InputAt(0));
1800 Register temp = ToRegister(instr->TempAt(0));
1802 int true_block = chunk_->LookupDestination(instr->true_block_id());
1803 int false_block = chunk_->LookupDestination(instr->false_block_id());
1805 __ JumpIfSmi(input, chunk_->GetAssemblyLabel(false_block));
1806 __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
1807 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
1808 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
1809 EmitBranch(true_block, false_block, ne, at, Operand(zero_reg));
1813 static Condition ComputeCompareCondition(Token::Value op) {
1815 case Token::EQ_STRICT:
1828 return kNoCondition;
1833 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
1834 Token::Value op = instr->op();
1835 int true_block = chunk_->LookupDestination(instr->true_block_id());
1836 int false_block = chunk_->LookupDestination(instr->false_block_id());
1838 Handle<Code> ic = CompareIC::GetUninitialized(op);
1839 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1841 Condition condition = ComputeCompareCondition(op);
1843 EmitBranch(true_block, false_block, condition, v0, Operand(zero_reg));
1847 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
1848 InstanceType from = instr->from();
1849 InstanceType to = instr->to();
1850 if (from == FIRST_TYPE) return to;
1851 ASSERT(from == to || to == LAST_TYPE);
1856 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
1857 InstanceType from = instr->from();
1858 InstanceType to = instr->to();
1859 if (from == to) return eq;
1860 if (to == LAST_TYPE) return hs;
1861 if (from == FIRST_TYPE) return ls;
1867 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1868 Register scratch = scratch0();
1869 Register input = ToRegister(instr->InputAt(0));
1871 int true_block = chunk_->LookupDestination(instr->true_block_id());
1872 int false_block = chunk_->LookupDestination(instr->false_block_id());
1874 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1876 __ JumpIfSmi(input, false_label);
1878 __ GetObjectType(input, scratch, scratch);
1879 EmitBranch(true_block,
1881 BranchCondition(instr->hydrogen()),
1883 Operand(TestType(instr->hydrogen())));
1887 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
1888 Register input = ToRegister(instr->InputAt(0));
1889 Register result = ToRegister(instr->result());
1891 if (FLAG_debug_code) {
1892 __ AbortIfNotString(input);
1895 __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
1896 __ IndexFromHash(result, result);
1900 void LCodeGen::DoHasCachedArrayIndexAndBranch(
1901 LHasCachedArrayIndexAndBranch* instr) {
1902 Register input = ToRegister(instr->InputAt(0));
1903 Register scratch = scratch0();
1905 int true_block = chunk_->LookupDestination(instr->true_block_id());
1906 int false_block = chunk_->LookupDestination(instr->false_block_id());
1909 FieldMemOperand(input, String::kHashFieldOffset));
1910 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
1911 EmitBranch(true_block, false_block, eq, at, Operand(zero_reg));
1915 // Branches to a label or falls through with the answer in flags. Trashes
1916 // the temp registers, but not the input.
1917 void LCodeGen::EmitClassOfTest(Label* is_true,
1919 Handle<String>class_name,
1923 ASSERT(!input.is(temp));
1924 ASSERT(!input.is(temp2));
1925 ASSERT(!temp.is(temp2));
1927 __ JumpIfSmi(input, is_false);
1929 if (class_name->IsEqualTo(CStrVector("Function"))) {
1930 // Assuming the following assertions, we can use the same compares to test
1931 // for both being a function type and being in the object type range.
1932 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
1933 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1934 FIRST_SPEC_OBJECT_TYPE + 1);
1935 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
1936 LAST_SPEC_OBJECT_TYPE - 1);
1937 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
1939 __ GetObjectType(input, temp, temp2);
1940 __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1941 __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
1942 __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
1944 // Faster code path to avoid two compares: subtract lower bound from the
1945 // actual type and do a signed compare with the width of the type range.
1946 __ GetObjectType(input, temp, temp2);
1947 __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1948 __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
1949 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1952 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
1953 // Check if the constructor in the map is a function.
1954 __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
1956 // Objects with a non-function constructor have class 'Object'.
1957 __ GetObjectType(temp, temp2, temp2);
1958 if (class_name->IsEqualTo(CStrVector("Object"))) {
1959 __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
1961 __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
1964 // temp now contains the constructor function. Grab the
1965 // instance class name from there.
1966 __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1967 __ lw(temp, FieldMemOperand(temp,
1968 SharedFunctionInfo::kInstanceClassNameOffset));
1969 // The class name we are testing against is a symbol because it's a literal.
1970 // The name in the constructor is a symbol because of the way the context is
1971 // booted. This routine isn't expected to work for random API-created
1972 // classes and it doesn't have to because you can't access it with natives
1973 // syntax. Since both sides are symbols it is sufficient to use an identity
1976 // End with the address of this class_name instance in temp register.
1977 // On MIPS, the caller must do the comparison with Handle<String>class_name.
1981 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1982 Register input = ToRegister(instr->InputAt(0));
1983 Register temp = scratch0();
1984 Register temp2 = ToRegister(instr->TempAt(0));
1985 Handle<String> class_name = instr->hydrogen()->class_name();
1987 int true_block = chunk_->LookupDestination(instr->true_block_id());
1988 int false_block = chunk_->LookupDestination(instr->false_block_id());
1990 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1991 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1993 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1995 EmitBranch(true_block, false_block, eq, temp, Operand(class_name));
1999 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2000 Register reg = ToRegister(instr->InputAt(0));
2001 Register temp = ToRegister(instr->TempAt(0));
2002 int true_block = instr->true_block_id();
2003 int false_block = instr->false_block_id();
2005 __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2006 EmitBranch(true_block, false_block, eq, temp, Operand(instr->map()));
2010 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2011 Label true_label, done;
2012 ASSERT(ToRegister(instr->InputAt(0)).is(a0)); // Object is in a0.
2013 ASSERT(ToRegister(instr->InputAt(1)).is(a1)); // Function is in a1.
2014 Register result = ToRegister(instr->result());
2015 ASSERT(result.is(v0));
2017 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2018 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2020 __ Branch(&true_label, eq, result, Operand(zero_reg));
2021 __ li(result, Operand(factory()->false_value()));
2023 __ bind(&true_label);
2024 __ li(result, Operand(factory()->true_value()));
2029 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2030 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
2032 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2033 LInstanceOfKnownGlobal* instr)
2034 : LDeferredCode(codegen), instr_(instr) { }
2035 virtual void Generate() {
2036 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2038 virtual LInstruction* instr() { return instr_; }
2039 Label* map_check() { return &map_check_; }
2042 LInstanceOfKnownGlobal* instr_;
2046 DeferredInstanceOfKnownGlobal* deferred;
2047 deferred = new DeferredInstanceOfKnownGlobal(this, instr);
2049 Label done, false_result;
2050 Register object = ToRegister(instr->InputAt(0));
2051 Register temp = ToRegister(instr->TempAt(0));
2052 Register result = ToRegister(instr->result());
2054 ASSERT(object.is(a0));
2055 ASSERT(result.is(v0));
2057 // A Smi is not instance of anything.
2058 __ JumpIfSmi(object, &false_result);
2060 // This is the inlined call site instanceof cache. The two occurences of the
2061 // hole value will be patched to the last map/result pair generated by the
2064 Register map = temp;
2065 __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2067 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2068 __ bind(deferred->map_check()); // Label for calculating code patching.
2069 // We use Factory::the_hole_value() on purpose instead of loading from the
2070 // root array to force relocation to be able to later patch with
2072 Handle<JSGlobalPropertyCell> cell =
2073 factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
2074 __ li(at, Operand(Handle<Object>(cell)));
2075 __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
2076 __ Branch(&cache_miss, ne, map, Operand(at));
2077 // We use Factory::the_hole_value() on purpose instead of loading from the
2078 // root array to force relocation to be able to later patch
2079 // with true or false.
2080 __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2083 // The inlined call site cache did not match. Check null and string before
2084 // calling the deferred code.
2085 __ bind(&cache_miss);
2086 // Null is not instance of anything.
2087 __ LoadRoot(temp, Heap::kNullValueRootIndex);
2088 __ Branch(&false_result, eq, object, Operand(temp));
2090 // String values is not instance of anything.
2091 Condition cc = __ IsObjectStringType(object, temp, temp);
2092 __ Branch(&false_result, cc, temp, Operand(zero_reg));
2094 // Go to the deferred code.
2095 __ Branch(deferred->entry());
2097 __ bind(&false_result);
2098 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2100 // Here result has either true or false. Deferred code also produces true or
2102 __ bind(deferred->exit());
2107 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2109 Register result = ToRegister(instr->result());
2110 ASSERT(result.is(v0));
2112 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2113 flags = static_cast<InstanceofStub::Flags>(
2114 flags | InstanceofStub::kArgsInRegisters);
2115 flags = static_cast<InstanceofStub::Flags>(
2116 flags | InstanceofStub::kCallSiteInlineCheck);
2117 flags = static_cast<InstanceofStub::Flags>(
2118 flags | InstanceofStub::kReturnTrueFalseObject);
2119 InstanceofStub stub(flags);
2121 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2123 // Get the temp register reserved by the instruction. This needs to be t0 as
2124 // its slot of the pushing of safepoint registers is used to communicate the
2125 // offset to the location of the map check.
2126 Register temp = ToRegister(instr->TempAt(0));
2127 ASSERT(temp.is(t0));
2128 __ LoadHeapObject(InstanceofStub::right(), instr->function());
2129 static const int kAdditionalDelta = 7;
2130 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2131 Label before_push_delta;
2132 __ bind(&before_push_delta);
2134 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2135 __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2136 __ StoreToSafepointRegisterSlot(temp, temp);
2138 CallCodeGeneric(stub.GetCode(),
2139 RelocInfo::CODE_TARGET,
2141 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2142 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2143 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2144 // Put the result value into the result register slot and
2145 // restore all registers.
2146 __ StoreToSafepointRegisterSlot(result, result);
2150 void LCodeGen::DoCmpT(LCmpT* instr) {
2151 Token::Value op = instr->op();
2153 Handle<Code> ic = CompareIC::GetUninitialized(op);
2154 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2155 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2157 Condition condition = ComputeCompareCondition(op);
2158 // A minor optimization that relies on LoadRoot always emitting one
2160 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2162 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2163 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2164 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2165 ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done));
2170 void LCodeGen::DoReturn(LReturn* instr) {
2172 // Push the return value on the stack as the parameter.
2173 // Runtime::TraceExit returns its parameter in v0.
2175 __ CallRuntime(Runtime::kTraceExit, 1);
2177 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2180 __ Addu(sp, sp, Operand(sp_delta));
2185 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2186 Register result = ToRegister(instr->result());
2187 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
2188 __ lw(result, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
2189 if (instr->hydrogen()->RequiresHoleCheck()) {
2190 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2191 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2196 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2197 ASSERT(ToRegister(instr->global_object()).is(a0));
2198 ASSERT(ToRegister(instr->result()).is(v0));
2200 __ li(a2, Operand(instr->name()));
2201 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2202 : RelocInfo::CODE_TARGET_CONTEXT;
2203 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2204 CallCode(ic, mode, instr);
2208 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2209 Register value = ToRegister(instr->value());
2210 Register cell = scratch0();
2213 __ li(cell, Operand(instr->hydrogen()->cell()));
2215 // If the cell we are storing to contains the hole it could have
2216 // been deleted from the property dictionary. In that case, we need
2217 // to update the property details in the property dictionary to mark
2218 // it as no longer deleted.
2219 if (instr->hydrogen()->RequiresHoleCheck()) {
2220 // We use a temp to check the payload.
2221 Register payload = ToRegister(instr->TempAt(0));
2222 __ lw(payload, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2223 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2224 DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
2228 __ sw(value, FieldMemOperand(cell, JSGlobalPropertyCell::kValueOffset));
2229 // Cells are always rescanned, so no write barrier here.
2233 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2234 ASSERT(ToRegister(instr->global_object()).is(a1));
2235 ASSERT(ToRegister(instr->value()).is(a0));
2237 __ li(a2, Operand(instr->name()));
2238 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2239 ? isolate()->builtins()->StoreIC_Initialize_Strict()
2240 : isolate()->builtins()->StoreIC_Initialize();
2241 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2245 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2246 Register context = ToRegister(instr->context());
2247 Register result = ToRegister(instr->result());
2249 __ lw(result, ContextOperand(context, instr->slot_index()));
2250 if (instr->hydrogen()->RequiresHoleCheck()) {
2251 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2253 if (instr->hydrogen()->DeoptimizesOnHole()) {
2254 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2257 __ Branch(&is_not_hole, ne, result, Operand(at));
2258 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2259 __ bind(&is_not_hole);
2265 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2266 Register context = ToRegister(instr->context());
2267 Register value = ToRegister(instr->value());
2268 Register scratch = scratch0();
2269 MemOperand target = ContextOperand(context, instr->slot_index());
2271 Label skip_assignment;
2273 if (instr->hydrogen()->RequiresHoleCheck()) {
2274 __ lw(scratch, target);
2275 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2277 if (instr->hydrogen()->DeoptimizesOnHole()) {
2278 DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
2280 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2284 __ sw(value, target);
2285 if (instr->hydrogen()->NeedsWriteBarrier()) {
2286 HType type = instr->hydrogen()->value()->type();
2287 SmiCheck check_needed =
2288 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2289 __ RecordWriteContextSlot(context,
2295 EMIT_REMEMBERED_SET,
2299 __ bind(&skip_assignment);
2303 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2304 Register object = ToRegister(instr->InputAt(0));
2305 Register result = ToRegister(instr->result());
2306 if (instr->hydrogen()->is_in_object()) {
2307 __ lw(result, FieldMemOperand(object, instr->hydrogen()->offset()));
2309 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2310 __ lw(result, FieldMemOperand(result, instr->hydrogen()->offset()));
2315 void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
2318 Handle<String> name) {
2319 LookupResult lookup(isolate());
2320 type->LookupInDescriptors(NULL, *name, &lookup);
2321 ASSERT(lookup.IsFound() &&
2322 (lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
2323 if (lookup.type() == FIELD) {
2324 int index = lookup.GetLocalFieldIndexFromMap(*type);
2325 int offset = index * kPointerSize;
2327 // Negative property indices are in-object properties, indexed
2328 // from the end of the fixed part of the object.
2329 __ lw(result, FieldMemOperand(object, offset + type->instance_size()));
2331 // Non-negative property indices are in the properties array.
2332 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2333 __ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
2336 Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
2337 __ LoadHeapObject(result, function);
2342 void LCodeGen::DoLoadNamedFieldPolymorphic(LLoadNamedFieldPolymorphic* instr) {
2343 Register object = ToRegister(instr->object());
2344 Register result = ToRegister(instr->result());
2345 Register scratch = scratch0();
2347 int map_count = instr->hydrogen()->types()->length();
2348 bool need_generic = instr->hydrogen()->need_generic();
2350 if (map_count == 0 && !need_generic) {
2351 DeoptimizeIf(al, instr->environment());
2354 Handle<String> name = instr->hydrogen()->name();
2356 __ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2357 for (int i = 0; i < map_count; ++i) {
2358 bool last = (i == map_count - 1);
2359 Handle<Map> map = instr->hydrogen()->types()->at(i);
2360 if (last && !need_generic) {
2361 DeoptimizeIf(ne, instr->environment(), scratch, Operand(map));
2362 EmitLoadFieldOrConstantFunction(result, object, map, name);
2365 __ Branch(&next, ne, scratch, Operand(map));
2366 EmitLoadFieldOrConstantFunction(result, object, map, name);
2372 __ li(a2, Operand(name));
2373 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2374 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2380 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2381 ASSERT(ToRegister(instr->object()).is(a0));
2382 ASSERT(ToRegister(instr->result()).is(v0));
2384 // Name is always in a2.
2385 __ li(a2, Operand(instr->name()));
2386 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2387 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2391 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2392 Register scratch = scratch0();
2393 Register function = ToRegister(instr->function());
2394 Register result = ToRegister(instr->result());
2396 // Check that the function really is a function. Load map into the
2398 __ GetObjectType(function, result, scratch);
2399 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2401 // Make sure that the function has an instance prototype.
2403 __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2404 __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2405 __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2407 // Get the prototype or initial map from the function.
2409 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2411 // Check that the function has a prototype or an initial map.
2412 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2413 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2415 // If the function does not have an initial map, we're done.
2417 __ GetObjectType(result, scratch, scratch);
2418 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2420 // Get the prototype from the initial map.
2421 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2424 // Non-instance prototype: Fetch prototype from constructor field
2426 __ bind(&non_instance);
2427 __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2434 void LCodeGen::DoLoadElements(LLoadElements* instr) {
2435 Register result = ToRegister(instr->result());
2436 Register input = ToRegister(instr->InputAt(0));
2437 Register scratch = scratch0();
2439 __ lw(result, FieldMemOperand(input, JSObject::kElementsOffset));
2440 if (FLAG_debug_code) {
2442 __ lw(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
2443 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
2444 __ Branch(USE_DELAY_SLOT, &done, eq, scratch, Operand(at));
2445 __ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); // In the delay slot.
2446 __ Branch(&done, eq, scratch, Operand(at));
2447 // |scratch| still contains |input|'s map.
2448 __ lbu(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
2449 __ Ext(scratch, scratch, Map::kElementsKindShift,
2450 Map::kElementsKindBitCount);
2451 __ Branch(&done, eq, scratch,
2452 Operand(FAST_ELEMENTS));
2453 __ Branch(&fail, lt, scratch,
2454 Operand(FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2455 __ Branch(&done, le, scratch,
2456 Operand(LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
2458 __ Abort("Check for fast or external elements failed.");
2464 void LCodeGen::DoLoadExternalArrayPointer(
2465 LLoadExternalArrayPointer* instr) {
2466 Register to_reg = ToRegister(instr->result());
2467 Register from_reg = ToRegister(instr->InputAt(0));
2468 __ lw(to_reg, FieldMemOperand(from_reg,
2469 ExternalArray::kExternalPointerOffset));
2473 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2474 Register arguments = ToRegister(instr->arguments());
2475 Register length = ToRegister(instr->length());
2476 Register index = ToRegister(instr->index());
2477 Register result = ToRegister(instr->result());
2479 // Bailout index is not a valid argument index. Use unsigned check to get
2480 // negative check for free.
2482 // TODO(plind): Shoud be optimized to do the sub before the DeoptimizeIf(),
2483 // as they do in Arm. It will save us an instruction.
2484 DeoptimizeIf(ls, instr->environment(), length, Operand(index));
2486 // There are two words between the frame pointer and the last argument.
2487 // Subtracting from length accounts for one of them, add one more.
2488 __ subu(length, length, index);
2489 __ Addu(length, length, Operand(1));
2490 __ sll(length, length, kPointerSizeLog2);
2491 __ Addu(at, arguments, Operand(length));
2492 __ lw(result, MemOperand(at, 0));
2496 void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2497 Register elements = ToRegister(instr->elements());
2498 Register key = EmitLoadRegister(instr->key(), scratch0());
2499 Register result = ToRegister(instr->result());
2500 Register scratch = scratch0();
2503 __ sll(scratch, key, kPointerSizeLog2); // Key indexes words.
2504 __ addu(scratch, elements, scratch);
2505 __ lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2507 // Check for the hole value.
2508 if (instr->hydrogen()->RequiresHoleCheck()) {
2509 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
2510 DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
2515 void LCodeGen::DoLoadKeyedFastDoubleElement(
2516 LLoadKeyedFastDoubleElement* instr) {
2517 Register elements = ToRegister(instr->elements());
2518 bool key_is_constant = instr->key()->IsConstantOperand();
2519 Register key = no_reg;
2520 DoubleRegister result = ToDoubleRegister(instr->result());
2521 Register scratch = scratch0();
2524 ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
2525 int constant_key = 0;
2526 if (key_is_constant) {
2527 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2528 if (constant_key & 0xF0000000) {
2529 Abort("array index constant value too big.");
2532 key = ToRegister(instr->key());
2535 if (key_is_constant) {
2536 __ Addu(elements, elements, Operand(constant_key * (1 << shift_size) +
2537 FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2539 __ sll(scratch, key, shift_size);
2540 __ Addu(elements, elements, Operand(scratch));
2541 __ Addu(elements, elements,
2542 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
2545 __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
2546 DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
2548 __ ldc1(result, MemOperand(elements));
2552 void LCodeGen::DoLoadKeyedSpecializedArrayElement(
2553 LLoadKeyedSpecializedArrayElement* instr) {
2554 Register external_pointer = ToRegister(instr->external_pointer());
2555 Register key = no_reg;
2556 ElementsKind elements_kind = instr->elements_kind();
2557 bool key_is_constant = instr->key()->IsConstantOperand();
2558 int constant_key = 0;
2559 if (key_is_constant) {
2560 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
2561 if (constant_key & 0xF0000000) {
2562 Abort("array index constant value too big.");
2565 key = ToRegister(instr->key());
2567 int shift_size = ElementsKindToShiftSize(elements_kind);
2569 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
2570 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
2571 FPURegister result = ToDoubleRegister(instr->result());
2572 if (key_is_constant) {
2573 __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
2575 __ sll(scratch0(), key, shift_size);
2576 __ Addu(scratch0(), scratch0(), external_pointer);
2579 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
2580 __ lwc1(result, MemOperand(scratch0()));
2581 __ cvt_d_s(result, result);
2582 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
2583 __ ldc1(result, MemOperand(scratch0()));
2586 Register result = ToRegister(instr->result());
2587 Register scratch = scratch0();
2588 MemOperand mem_operand(zero_reg);
2589 if (key_is_constant) {
2590 mem_operand = MemOperand(external_pointer,
2591 constant_key * (1 << shift_size));
2593 __ sll(scratch, key, shift_size);
2594 __ Addu(scratch, scratch, external_pointer);
2595 mem_operand = MemOperand(scratch);
2597 switch (elements_kind) {
2598 case EXTERNAL_BYTE_ELEMENTS:
2599 __ lb(result, mem_operand);
2601 case EXTERNAL_PIXEL_ELEMENTS:
2602 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
2603 __ lbu(result, mem_operand);
2605 case EXTERNAL_SHORT_ELEMENTS:
2606 __ lh(result, mem_operand);
2608 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
2609 __ lhu(result, mem_operand);
2611 case EXTERNAL_INT_ELEMENTS:
2612 __ lw(result, mem_operand);
2614 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
2615 __ lw(result, mem_operand);
2616 // TODO(danno): we could be more clever here, perhaps having a special
2617 // version of the stub that detects if the overflow case actually
2618 // happens, and generate code that returns a double rather than int.
2619 DeoptimizeIf(Ugreater_equal, instr->environment(),
2620 result, Operand(0x80000000));
2622 case EXTERNAL_FLOAT_ELEMENTS:
2623 case EXTERNAL_DOUBLE_ELEMENTS:
2624 case FAST_DOUBLE_ELEMENTS:
2626 case FAST_SMI_ONLY_ELEMENTS:
2627 case DICTIONARY_ELEMENTS:
2628 case NON_STRICT_ARGUMENTS_ELEMENTS:
2636 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2637 ASSERT(ToRegister(instr->object()).is(a1));
2638 ASSERT(ToRegister(instr->key()).is(a0));
2640 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
2641 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2645 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2646 Register scratch = scratch0();
2647 Register temp = scratch1();
2648 Register result = ToRegister(instr->result());
2650 if (instr->hydrogen()->from_inlined()) {
2651 __ Subu(result, sp, 2 * kPointerSize);
2653 // Check if the calling frame is an arguments adaptor frame.
2654 Label done, adapted;
2655 __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2656 __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
2657 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2659 // Result is the frame pointer for the frame if not adapted and for the real
2660 // frame below the adaptor frame if adapted.
2661 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
2662 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
2667 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2668 Register elem = ToRegister(instr->InputAt(0));
2669 Register result = ToRegister(instr->result());
2673 // If no arguments adaptor frame the number of arguments is fixed.
2674 __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
2675 __ Branch(&done, eq, fp, Operand(elem));
2677 // Arguments adaptor frame present. Get argument length from there.
2678 __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2680 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
2681 __ SmiUntag(result);
2683 // Argument length is in result register.
2688 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
2689 Register receiver = ToRegister(instr->receiver());
2690 Register function = ToRegister(instr->function());
2691 Register scratch = scratch0();
2693 // If the receiver is null or undefined, we have to pass the global
2694 // object as a receiver to normal functions. Values have to be
2695 // passed unchanged to builtins and strict-mode functions.
2696 Label global_object, receiver_ok;
2698 // Do not transform the receiver to object for strict mode
2701 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2703 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2705 // Do not transform the receiver to object for builtins.
2706 int32_t strict_mode_function_mask =
2707 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
2708 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
2709 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
2710 __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
2712 // Normal function. Replace undefined or null with global receiver.
2713 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
2714 __ Branch(&global_object, eq, receiver, Operand(scratch));
2715 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
2716 __ Branch(&global_object, eq, receiver, Operand(scratch));
2718 // Deoptimize if the receiver is not a JS object.
2719 __ And(scratch, receiver, Operand(kSmiTagMask));
2720 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
2722 __ GetObjectType(receiver, scratch, scratch);
2723 DeoptimizeIf(lt, instr->environment(),
2724 scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
2725 __ Branch(&receiver_ok);
2727 __ bind(&global_object);
2728 __ lw(receiver, GlobalObjectOperand());
2730 FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
2731 __ bind(&receiver_ok);
2734 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2735 Register receiver = ToRegister(instr->receiver());
2736 Register function = ToRegister(instr->function());
2737 Register length = ToRegister(instr->length());
2738 Register elements = ToRegister(instr->elements());
2739 Register scratch = scratch0();
2740 ASSERT(receiver.is(a0)); // Used for parameter count.
2741 ASSERT(function.is(a1)); // Required by InvokeFunction.
2742 ASSERT(ToRegister(instr->result()).is(v0));
2744 // Copy the arguments to this function possibly from the
2745 // adaptor frame below it.
2746 const uint32_t kArgumentsLimit = 1 * KB;
2747 DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
2749 // Push the receiver and use the register to keep the original
2750 // number of arguments.
2752 __ Move(receiver, length);
2753 // The arguments are at a one pointer size offset from elements.
2754 __ Addu(elements, elements, Operand(1 * kPointerSize));
2756 // Loop through the arguments pushing them onto the execution
2759 // length is a small non-negative integer, due to the test above.
2760 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
2761 __ sll(scratch, length, 2);
2763 __ Addu(scratch, elements, scratch);
2764 __ lw(scratch, MemOperand(scratch));
2766 __ Subu(length, length, Operand(1));
2767 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
2768 __ sll(scratch, length, 2);
2771 ASSERT(instr->HasPointerMap());
2772 LPointerMap* pointers = instr->pointer_map();
2773 RecordPosition(pointers->position());
2774 SafepointGenerator safepoint_generator(
2775 this, pointers, Safepoint::kLazyDeopt);
2776 // The number of arguments is stored in receiver which is a0, as expected
2777 // by InvokeFunction.
2778 ParameterCount actual(receiver);
2779 __ InvokeFunction(function, actual, CALL_FUNCTION,
2780 safepoint_generator, CALL_AS_METHOD);
2781 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2785 void LCodeGen::DoPushArgument(LPushArgument* instr) {
2786 LOperand* argument = instr->InputAt(0);
2787 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
2788 Abort("DoPushArgument not implemented for double type.");
2790 Register argument_reg = EmitLoadRegister(argument, at);
2791 __ push(argument_reg);
2796 void LCodeGen::DoDrop(LDrop* instr) {
2797 __ Drop(instr->count());
2801 void LCodeGen::DoThisFunction(LThisFunction* instr) {
2802 Register result = ToRegister(instr->result());
2803 __ LoadHeapObject(result, instr->hydrogen()->closure());
2807 void LCodeGen::DoContext(LContext* instr) {
2808 Register result = ToRegister(instr->result());
2813 void LCodeGen::DoOuterContext(LOuterContext* instr) {
2814 Register context = ToRegister(instr->context());
2815 Register result = ToRegister(instr->result());
2817 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2821 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
2822 __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
2823 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
2824 // The context is the first argument.
2825 __ Push(cp, scratch0(), scratch1());
2826 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
2830 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2831 Register result = ToRegister(instr->result());
2832 __ lw(result, ContextOperand(cp, Context::GLOBAL_INDEX));
2836 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2837 Register global = ToRegister(instr->global());
2838 Register result = ToRegister(instr->result());
2839 __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
2843 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2845 LInstruction* instr,
2848 bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
2849 function->shared()->formal_parameter_count() == arity;
2851 LPointerMap* pointers = instr->pointer_map();
2852 RecordPosition(pointers->position());
2854 if (can_invoke_directly) {
2855 if (a1_state == A1_UNINITIALIZED) {
2856 __ LoadHeapObject(a1, function);
2859 // Change context if needed.
2860 bool change_context =
2861 (info()->closure()->context() != function->context()) ||
2862 scope()->contains_with() ||
2863 (scope()->num_heap_slots() > 0);
2864 if (change_context) {
2865 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
2868 // Set r0 to arguments count if adaption is not needed. Assumes that r0
2869 // is available to write to at this point.
2870 if (!function->NeedsArgumentsAdaption()) {
2871 __ li(a0, Operand(arity));
2875 __ SetCallKind(t1, call_kind);
2876 __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
2879 // Set up deoptimization.
2880 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
2882 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
2883 ParameterCount count(arity);
2884 __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
2888 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2892 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2893 ASSERT(ToRegister(instr->result()).is(v0));
2895 CallKnownFunction(instr->function(),
2903 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2904 Register input = ToRegister(instr->InputAt(0));
2905 Register result = ToRegister(instr->result());
2906 Register scratch = scratch0();
2908 // Deoptimize if not a heap number.
2909 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
2910 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2911 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
2914 Register exponent = scratch0();
2916 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2917 // Check the sign of the argument. If the argument is positive, just
2919 __ Move(result, input);
2920 __ And(at, exponent, Operand(HeapNumber::kSignMask));
2921 __ Branch(&done, eq, at, Operand(zero_reg));
2923 // Input is negative. Reverse its sign.
2924 // Preserve the value of all registers.
2926 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2928 // Registers were saved at the safepoint, so we can use
2929 // many scratch registers.
2930 Register tmp1 = input.is(a1) ? a0 : a1;
2931 Register tmp2 = input.is(a2) ? a0 : a2;
2932 Register tmp3 = input.is(a3) ? a0 : a3;
2933 Register tmp4 = input.is(t0) ? a0 : t0;
2935 // exponent: floating point exponent value.
2937 Label allocated, slow;
2938 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
2939 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
2940 __ Branch(&allocated);
2942 // Slow case: Call the runtime system to do the number allocation.
2945 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
2946 // Set the pointer to the new heap number in tmp.
2949 // Restore input_reg after call to runtime.
2950 __ LoadFromSafepointRegisterSlot(input, input);
2951 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
2953 __ bind(&allocated);
2954 // exponent: floating point exponent value.
2955 // tmp1: allocated heap number.
2956 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
2957 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
2958 __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
2959 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
2961 __ StoreToSafepointRegisterSlot(tmp1, result);
2968 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
2969 Register input = ToRegister(instr->InputAt(0));
2970 Register result = ToRegister(instr->result());
2971 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2973 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
2974 __ mov(result, input);
2975 ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done));
2976 __ subu(result, zero_reg, input);
2977 // Overflow if result is still negative, i.e. 0x80000000.
2978 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
2983 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2984 // Class for deferred case.
2985 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2987 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2988 LUnaryMathOperation* instr)
2989 : LDeferredCode(codegen), instr_(instr) { }
2990 virtual void Generate() {
2991 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2993 virtual LInstruction* instr() { return instr_; }
2995 LUnaryMathOperation* instr_;
2998 Representation r = instr->hydrogen()->value()->representation();
3000 FPURegister input = ToDoubleRegister(instr->InputAt(0));
3001 FPURegister result = ToDoubleRegister(instr->result());
3002 __ abs_d(result, input);
3003 } else if (r.IsInteger32()) {
3004 EmitIntegerMathAbs(instr);
3006 // Representation is tagged.
3007 DeferredMathAbsTaggedHeapNumber* deferred =
3008 new DeferredMathAbsTaggedHeapNumber(this, instr);
3009 Register input = ToRegister(instr->InputAt(0));
3011 __ JumpIfNotSmi(input, deferred->entry());
3012 // If smi, handle it directly.
3013 EmitIntegerMathAbs(instr);
3014 __ bind(deferred->exit());
3019 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3020 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3021 Register result = ToRegister(instr->result());
3022 FPURegister single_scratch = double_scratch0().low();
3023 Register scratch1 = scratch0();
3024 Register except_flag = ToRegister(instr->TempAt(0));
3026 __ EmitFPUTruncate(kRoundToMinusInf,
3032 // Deopt if the operation did not succeed.
3033 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3036 __ mfc1(result, single_scratch);
3038 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3041 __ Branch(&done, ne, result, Operand(zero_reg));
3042 __ mfc1(scratch1, input.high());
3043 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3044 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3050 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3051 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3052 Register result = ToRegister(instr->result());
3053 Register scratch = scratch0();
3054 Label done, check_sign_on_zero;
3056 // Extract exponent bits.
3057 __ mfc1(result, input.high());
3060 HeapNumber::kExponentShift,
3061 HeapNumber::kExponentBits);
3063 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3065 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3066 __ mov(result, zero_reg);
3067 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3068 __ Branch(&check_sign_on_zero);
3074 // The following conversion will not work with numbers
3075 // outside of ]-2^32, 2^32[.
3076 DeoptimizeIf(ge, instr->environment(), scratch,
3077 Operand(HeapNumber::kExponentBias + 32));
3079 // Save the original sign for later comparison.
3080 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3082 __ Move(double_scratch0(), 0.5);
3083 __ add_d(double_scratch0(), input, double_scratch0());
3085 // Check sign of the result: if the sign changed, the input
3086 // value was in ]0.5, 0[ and the result should be -0.
3087 __ mfc1(result, double_scratch0().high());
3088 __ Xor(result, result, Operand(scratch));
3089 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3090 // ARM uses 'mi' here, which is 'lt'
3091 DeoptimizeIf(lt, instr->environment(), result,
3095 // ARM uses 'mi' here, which is 'lt'
3096 // Negating it results in 'ge'
3097 __ Branch(&skip2, ge, result, Operand(zero_reg));
3098 __ mov(result, zero_reg);
3103 Register except_flag = scratch;
3105 __ EmitFPUTruncate(kRoundToMinusInf,
3106 double_scratch0().low(),
3111 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3113 __ mfc1(result, double_scratch0().low());
3115 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3117 __ Branch(&done, ne, result, Operand(zero_reg));
3118 __ bind(&check_sign_on_zero);
3119 __ mfc1(scratch, input.high());
3120 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3121 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3127 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3128 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3129 DoubleRegister result = ToDoubleRegister(instr->result());
3130 __ sqrt_d(result, input);
3134 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3135 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
3136 DoubleRegister result = ToDoubleRegister(instr->result());
3137 DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
3139 ASSERT(!input.is(result));
3141 // Note that according to ECMA-262 15.8.2.13:
3142 // Math.pow(-Infinity, 0.5) == Infinity
3143 // Math.sqrt(-Infinity) == NaN
3145 __ Move(temp, -V8_INFINITY);
3146 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3147 // Set up Infinity in the delay slot.
3148 // result is overwritten if the branch is not taken.
3149 __ neg_d(result, temp);
3151 // Add +0 to convert -0 to +0.
3152 __ add_d(result, input, kDoubleRegZero);
3153 __ sqrt_d(result, result);
3158 void LCodeGen::DoPower(LPower* instr) {
3159 Representation exponent_type = instr->hydrogen()->right()->representation();
3160 // Having marked this as a call, we can use any registers.
3161 // Just make sure that the input/output registers are the expected ones.
3162 ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
3163 ToDoubleRegister(instr->InputAt(1)).is(f4));
3164 ASSERT(!instr->InputAt(1)->IsRegister() ||
3165 ToRegister(instr->InputAt(1)).is(a2));
3166 ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
3167 ASSERT(ToDoubleRegister(instr->result()).is(f0));
3169 if (exponent_type.IsTagged()) {
3171 __ JumpIfSmi(a2, &no_deopt);
3172 __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
3173 DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
3175 MathPowStub stub(MathPowStub::TAGGED);
3177 } else if (exponent_type.IsInteger32()) {
3178 MathPowStub stub(MathPowStub::INTEGER);
3181 ASSERT(exponent_type.IsDouble());
3182 MathPowStub stub(MathPowStub::DOUBLE);
3188 void LCodeGen::DoRandom(LRandom* instr) {
3189 class DeferredDoRandom: public LDeferredCode {
3191 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3192 : LDeferredCode(codegen), instr_(instr) { }
3193 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3194 virtual LInstruction* instr() { return instr_; }
3199 DeferredDoRandom* deferred = new DeferredDoRandom(this, instr);
3200 // Having marked this instruction as a call we can use any
3202 ASSERT(ToDoubleRegister(instr->result()).is(f0));
3203 ASSERT(ToRegister(instr->InputAt(0)).is(a0));
3205 static const int kSeedSize = sizeof(uint32_t);
3206 STATIC_ASSERT(kPointerSize == kSeedSize);
3208 __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalContextOffset));
3209 static const int kRandomSeedOffset =
3210 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3211 __ lw(a2, FieldMemOperand(a0, kRandomSeedOffset));
3212 // a2: FixedArray of the global context's random seeds
3215 __ lw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
3216 __ Branch(deferred->entry(), eq, a1, Operand(zero_reg));
3218 __ lw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3222 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3223 __ And(a3, a1, Operand(0xFFFF));
3224 __ li(t0, Operand(18273));
3227 __ Addu(a1, a3, a1);
3229 __ sw(a1, FieldMemOperand(a2, ByteArray::kHeaderSize));
3231 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3232 __ And(a3, a0, Operand(0xFFFF));
3233 __ li(t0, Operand(36969));
3236 __ Addu(a0, a3, a0);
3238 __ sw(a0, FieldMemOperand(a2, ByteArray::kHeaderSize + kSeedSize));
3240 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3241 __ And(a0, a0, Operand(0x3FFFF));
3243 __ Addu(v0, a0, a1);
3245 __ bind(deferred->exit());
3247 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3248 __ li(a2, Operand(0x41300000));
3249 // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
3250 __ Move(f12, v0, a2);
3251 // Move 0x4130000000000000 to FPU.
3252 __ Move(f14, zero_reg, a2);
3253 // Subtract to get the result.
3254 __ sub_d(f0, f12, f14);
3257 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3258 __ PrepareCallCFunction(1, scratch0());
3259 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3260 // Return value is in v0.
3264 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
3265 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3266 TranscendentalCacheStub stub(TranscendentalCache::LOG,
3267 TranscendentalCacheStub::UNTAGGED);
3268 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3272 void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
3273 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3274 TranscendentalCacheStub stub(TranscendentalCache::TAN,
3275 TranscendentalCacheStub::UNTAGGED);
3276 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3280 void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
3281 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3282 TranscendentalCacheStub stub(TranscendentalCache::COS,
3283 TranscendentalCacheStub::UNTAGGED);
3284 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3288 void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
3289 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3290 TranscendentalCacheStub stub(TranscendentalCache::SIN,
3291 TranscendentalCacheStub::UNTAGGED);
3292 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3296 void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
3297 switch (instr->op()) {
3311 DoMathPowHalf(instr);
3326 Abort("Unimplemented type of LUnaryMathOperation.");
3332 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3333 ASSERT(ToRegister(instr->function()).is(a1));
3334 ASSERT(instr->HasPointerMap());
3336 if (instr->known_function().is_null()) {
3337 LPointerMap* pointers = instr->pointer_map();
3338 RecordPosition(pointers->position());
3339 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3340 ParameterCount count(instr->arity());
3341 __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3342 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3344 CallKnownFunction(instr->known_function(),
3348 A1_CONTAINS_TARGET);
3353 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3354 ASSERT(ToRegister(instr->result()).is(v0));
3356 int arity = instr->arity();
3358 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3359 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3360 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3364 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3365 ASSERT(ToRegister(instr->result()).is(v0));
3367 int arity = instr->arity();
3368 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3370 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3371 __ li(a2, Operand(instr->name()));
3372 CallCode(ic, mode, instr);
3373 // Restore context register.
3374 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3378 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3379 ASSERT(ToRegister(instr->function()).is(a1));
3380 ASSERT(ToRegister(instr->result()).is(v0));
3382 int arity = instr->arity();
3383 CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3384 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3385 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3389 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3390 ASSERT(ToRegister(instr->result()).is(v0));
3392 int arity = instr->arity();
3393 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3395 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3396 __ li(a2, Operand(instr->name()));
3397 CallCode(ic, mode, instr);
3398 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3402 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3403 ASSERT(ToRegister(instr->result()).is(v0));
3404 CallKnownFunction(instr->target(),
3412 void LCodeGen::DoCallNew(LCallNew* instr) {
3413 ASSERT(ToRegister(instr->InputAt(0)).is(a1));
3414 ASSERT(ToRegister(instr->result()).is(v0));
3416 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3417 __ li(a0, Operand(instr->arity()));
3418 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3422 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3423 CallRuntime(instr->function(), instr->arity(), instr);
3427 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3428 Register object = ToRegister(instr->object());
3429 Register value = ToRegister(instr->value());
3430 Register scratch = scratch0();
3431 int offset = instr->offset();
3433 ASSERT(!object.is(value));
3435 if (!instr->transition().is_null()) {
3436 __ li(scratch, Operand(instr->transition()));
3437 __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3441 HType type = instr->hydrogen()->value()->type();
3442 SmiCheck check_needed =
3443 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3444 if (instr->is_in_object()) {
3445 __ sw(value, FieldMemOperand(object, offset));
3446 if (instr->hydrogen()->NeedsWriteBarrier()) {
3447 // Update the write barrier for the object for in-object properties.
3448 __ RecordWriteField(object,
3454 EMIT_REMEMBERED_SET,
3458 __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
3459 __ sw(value, FieldMemOperand(scratch, offset));
3460 if (instr->hydrogen()->NeedsWriteBarrier()) {
3461 // Update the write barrier for the properties array.
3462 // object is used as a scratch register.
3463 __ RecordWriteField(scratch,
3469 EMIT_REMEMBERED_SET,
3476 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
3477 ASSERT(ToRegister(instr->object()).is(a1));
3478 ASSERT(ToRegister(instr->value()).is(a0));
3480 // Name is always in a2.
3481 __ li(a2, Operand(instr->name()));
3482 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3483 ? isolate()->builtins()->StoreIC_Initialize_Strict()
3484 : isolate()->builtins()->StoreIC_Initialize();
3485 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3489 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
3491 instr->environment(),
3492 ToRegister(instr->index()),
3493 Operand(ToRegister(instr->length())));
3497 void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
3498 Register value = ToRegister(instr->value());
3499 Register elements = ToRegister(instr->object());
3500 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
3501 Register scratch = scratch0();
3504 if (instr->key()->IsConstantOperand()) {
3505 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3506 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3508 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
3509 __ sw(value, FieldMemOperand(elements, offset));
3511 __ sll(scratch, key, kPointerSizeLog2);
3512 __ addu(scratch, elements, scratch);
3513 __ sw(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3516 if (instr->hydrogen()->NeedsWriteBarrier()) {
3517 HType type = instr->hydrogen()->value()->type();
3518 SmiCheck check_needed =
3519 type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3520 // Compute address of modified element and store it into key register.
3521 __ Addu(key, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3522 __ RecordWrite(elements,
3527 EMIT_REMEMBERED_SET,
3533 void LCodeGen::DoStoreKeyedFastDoubleElement(
3534 LStoreKeyedFastDoubleElement* instr) {
3535 DoubleRegister value = ToDoubleRegister(instr->value());
3536 Register elements = ToRegister(instr->elements());
3537 Register key = no_reg;
3538 Register scratch = scratch0();
3539 bool key_is_constant = instr->key()->IsConstantOperand();
3540 int constant_key = 0;
3543 // Calculate the effective address of the slot in the array to store the
3545 if (key_is_constant) {
3546 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3547 if (constant_key & 0xF0000000) {
3548 Abort("array index constant value too big.");
3551 key = ToRegister(instr->key());
3553 int shift_size = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3554 if (key_is_constant) {
3555 __ Addu(scratch, elements, Operand(constant_key * (1 << shift_size) +
3556 FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3558 __ sll(scratch, key, shift_size);
3559 __ Addu(scratch, elements, Operand(scratch));
3560 __ Addu(scratch, scratch,
3561 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
3564 if (instr->NeedsCanonicalization()) {
3566 // Check for NaN. All NaNs must be canonicalized.
3567 __ BranchF(NULL, &is_nan, eq, value, value);
3568 __ Branch(¬_nan);
3570 // Only load canonical NaN if the comparison above set the overflow.
3572 __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3576 __ sdc1(value, MemOperand(scratch));
3580 void LCodeGen::DoStoreKeyedSpecializedArrayElement(
3581 LStoreKeyedSpecializedArrayElement* instr) {
3583 Register external_pointer = ToRegister(instr->external_pointer());
3584 Register key = no_reg;
3585 ElementsKind elements_kind = instr->elements_kind();
3586 bool key_is_constant = instr->key()->IsConstantOperand();
3587 int constant_key = 0;
3588 if (key_is_constant) {
3589 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3590 if (constant_key & 0xF0000000) {
3591 Abort("array index constant value too big.");
3594 key = ToRegister(instr->key());
3596 int shift_size = ElementsKindToShiftSize(elements_kind);
3598 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3599 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3600 FPURegister value(ToDoubleRegister(instr->value()));
3601 if (key_is_constant) {
3602 __ Addu(scratch0(), external_pointer, constant_key * (1 << shift_size));
3604 __ sll(scratch0(), key, shift_size);
3605 __ Addu(scratch0(), scratch0(), external_pointer);
3608 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3609 __ cvt_s_d(double_scratch0(), value);
3610 __ swc1(double_scratch0(), MemOperand(scratch0()));
3611 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3612 __ sdc1(value, MemOperand(scratch0()));
3615 Register value(ToRegister(instr->value()));
3616 MemOperand mem_operand(zero_reg);
3617 Register scratch = scratch0();
3618 if (key_is_constant) {
3619 mem_operand = MemOperand(external_pointer,
3620 constant_key * (1 << shift_size));
3622 __ sll(scratch, key, shift_size);
3623 __ Addu(scratch, scratch, external_pointer);
3624 mem_operand = MemOperand(scratch);
3626 switch (elements_kind) {
3627 case EXTERNAL_PIXEL_ELEMENTS:
3628 case EXTERNAL_BYTE_ELEMENTS:
3629 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3630 __ sb(value, mem_operand);
3632 case EXTERNAL_SHORT_ELEMENTS:
3633 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3634 __ sh(value, mem_operand);
3636 case EXTERNAL_INT_ELEMENTS:
3637 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3638 __ sw(value, mem_operand);
3640 case EXTERNAL_FLOAT_ELEMENTS:
3641 case EXTERNAL_DOUBLE_ELEMENTS:
3642 case FAST_DOUBLE_ELEMENTS:
3644 case FAST_SMI_ONLY_ELEMENTS:
3645 case DICTIONARY_ELEMENTS:
3646 case NON_STRICT_ARGUMENTS_ELEMENTS:
3653 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
3654 ASSERT(ToRegister(instr->object()).is(a2));
3655 ASSERT(ToRegister(instr->key()).is(a1));
3656 ASSERT(ToRegister(instr->value()).is(a0));
3658 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3659 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
3660 : isolate()->builtins()->KeyedStoreIC_Initialize();
3661 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3665 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
3666 Register object_reg = ToRegister(instr->object());
3667 Register new_map_reg = ToRegister(instr->new_map_reg());
3668 Register scratch = scratch0();
3670 Handle<Map> from_map = instr->original_map();
3671 Handle<Map> to_map = instr->transitioned_map();
3672 ElementsKind from_kind = from_map->elements_kind();
3673 ElementsKind to_kind = to_map->elements_kind();
3675 __ mov(ToRegister(instr->result()), object_reg);
3677 Label not_applicable;
3678 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3679 __ Branch(¬_applicable, ne, scratch, Operand(from_map));
3681 __ li(new_map_reg, Operand(to_map));
3682 if (from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) {
3683 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
3685 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
3686 scratch, kRAHasBeenSaved, kDontSaveFPRegs);
3687 } else if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
3688 to_kind == FAST_DOUBLE_ELEMENTS) {
3689 Register fixed_object_reg = ToRegister(instr->temp_reg());
3690 ASSERT(fixed_object_reg.is(a2));
3691 ASSERT(new_map_reg.is(a3));
3692 __ mov(fixed_object_reg, object_reg);
3693 CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
3694 RelocInfo::CODE_TARGET, instr);
3695 } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
3696 Register fixed_object_reg = ToRegister(instr->temp_reg());
3697 ASSERT(fixed_object_reg.is(a2));
3698 ASSERT(new_map_reg.is(a3));
3699 __ mov(fixed_object_reg, object_reg);
3700 CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
3701 RelocInfo::CODE_TARGET, instr);
3705 __ bind(¬_applicable);
3709 void LCodeGen::DoStringAdd(LStringAdd* instr) {
3710 __ push(ToRegister(instr->left()));
3711 __ push(ToRegister(instr->right()));
3712 StringAddStub stub(NO_STRING_CHECK_IN_STUB);
3713 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3717 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
3718 class DeferredStringCharCodeAt: public LDeferredCode {
3720 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
3721 : LDeferredCode(codegen), instr_(instr) { }
3722 virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
3723 virtual LInstruction* instr() { return instr_; }
3725 LStringCharCodeAt* instr_;
3728 DeferredStringCharCodeAt* deferred =
3729 new DeferredStringCharCodeAt(this, instr);
3730 StringCharLoadGenerator::Generate(masm(),
3731 ToRegister(instr->string()),
3732 ToRegister(instr->index()),
3733 ToRegister(instr->result()),
3735 __ bind(deferred->exit());
3739 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
3740 Register string = ToRegister(instr->string());
3741 Register result = ToRegister(instr->result());
3742 Register scratch = scratch0();
3744 // TODO(3095996): Get rid of this. For now, we need to make the
3745 // result register contain a valid pointer because it is already
3746 // contained in the register pointer map.
3747 __ mov(result, zero_reg);
3749 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3751 // Push the index as a smi. This is safe because of the checks in
3752 // DoStringCharCodeAt above.
3753 if (instr->index()->IsConstantOperand()) {
3754 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3755 __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
3758 Register index = ToRegister(instr->index());
3762 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
3763 if (FLAG_debug_code) {
3764 __ AbortIfNotSmi(v0);
3767 __ StoreToSafepointRegisterSlot(v0, result);
3771 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
3772 class DeferredStringCharFromCode: public LDeferredCode {
3774 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
3775 : LDeferredCode(codegen), instr_(instr) { }
3776 virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
3777 virtual LInstruction* instr() { return instr_; }
3779 LStringCharFromCode* instr_;
3782 DeferredStringCharFromCode* deferred =
3783 new DeferredStringCharFromCode(this, instr);
3785 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
3786 Register char_code = ToRegister(instr->char_code());
3787 Register result = ToRegister(instr->result());
3788 Register scratch = scratch0();
3789 ASSERT(!char_code.is(result));
3791 __ Branch(deferred->entry(), hi,
3792 char_code, Operand(String::kMaxAsciiCharCode));
3793 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
3794 __ sll(scratch, char_code, kPointerSizeLog2);
3795 __ Addu(result, result, scratch);
3796 __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
3797 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3798 __ Branch(deferred->entry(), eq, result, Operand(scratch));
3799 __ bind(deferred->exit());
3803 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
3804 Register char_code = ToRegister(instr->char_code());
3805 Register result = ToRegister(instr->result());
3807 // TODO(3095996): Get rid of this. For now, we need to make the
3808 // result register contain a valid pointer because it is already
3809 // contained in the register pointer map.
3810 __ mov(result, zero_reg);
3812 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3813 __ SmiTag(char_code);
3815 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
3816 __ StoreToSafepointRegisterSlot(v0, result);
3820 void LCodeGen::DoStringLength(LStringLength* instr) {
3821 Register string = ToRegister(instr->InputAt(0));
3822 Register result = ToRegister(instr->result());
3823 __ lw(result, FieldMemOperand(string, String::kLengthOffset));
3827 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
3828 LOperand* input = instr->InputAt(0);
3829 ASSERT(input->IsRegister() || input->IsStackSlot());
3830 LOperand* output = instr->result();
3831 ASSERT(output->IsDoubleRegister());
3832 FPURegister single_scratch = double_scratch0().low();
3833 if (input->IsStackSlot()) {
3834 Register scratch = scratch0();
3835 __ lw(scratch, ToMemOperand(input));
3836 __ mtc1(scratch, single_scratch);
3838 __ mtc1(ToRegister(input), single_scratch);
3840 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
3844 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
3845 class DeferredNumberTagI: public LDeferredCode {
3847 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
3848 : LDeferredCode(codegen), instr_(instr) { }
3849 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
3850 virtual LInstruction* instr() { return instr_; }
3852 LNumberTagI* instr_;
3855 Register src = ToRegister(instr->InputAt(0));
3856 Register dst = ToRegister(instr->result());
3857 Register overflow = scratch0();
3859 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
3860 __ SmiTagCheckOverflow(dst, src, overflow);
3861 __ BranchOnOverflow(deferred->entry(), overflow);
3862 __ bind(deferred->exit());
3866 void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
3868 Register src = ToRegister(instr->InputAt(0));
3869 Register dst = ToRegister(instr->result());
3870 FPURegister dbl_scratch = double_scratch0();
3872 // Preserve the value of all registers.
3873 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3875 // There was overflow, so bits 30 and 31 of the original integer
3876 // disagree. Try to allocate a heap number in new space and store
3877 // the value in there. If that fails, call the runtime system.
3880 __ SmiUntag(src, dst);
3881 __ Xor(src, src, Operand(0x80000000));
3883 __ mtc1(src, dbl_scratch);
3884 __ cvt_d_w(dbl_scratch, dbl_scratch);
3885 if (FLAG_inline_new) {
3886 __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
3887 __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
3892 // Slow case: Call the runtime system to do the number allocation.
3895 // TODO(3095996): Put a valid pointer value in the stack slot where the result
3896 // register is stored, as this register is in the pointer map, but contains an
3898 __ StoreToSafepointRegisterSlot(zero_reg, dst);
3899 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3902 // Done. Put the value in dbl_scratch into the value of the allocated heap
3905 __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
3906 __ StoreToSafepointRegisterSlot(dst, dst);
3910 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
3911 class DeferredNumberTagD: public LDeferredCode {
3913 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
3914 : LDeferredCode(codegen), instr_(instr) { }
3915 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
3916 virtual LInstruction* instr() { return instr_; }
3918 LNumberTagD* instr_;
3921 DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
3922 Register scratch = scratch0();
3923 Register reg = ToRegister(instr->result());
3924 Register temp1 = ToRegister(instr->TempAt(0));
3925 Register temp2 = ToRegister(instr->TempAt(1));
3927 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
3928 if (FLAG_inline_new) {
3929 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
3930 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
3932 __ Branch(deferred->entry());
3934 __ bind(deferred->exit());
3935 __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
3939 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
3940 // TODO(3095996): Get rid of this. For now, we need to make the
3941 // result register contain a valid pointer because it is already
3942 // contained in the register pointer map.
3943 Register reg = ToRegister(instr->result());
3944 __ mov(reg, zero_reg);
3946 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3947 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3948 __ StoreToSafepointRegisterSlot(v0, reg);
3952 void LCodeGen::DoSmiTag(LSmiTag* instr) {
3953 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
3954 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
3958 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
3959 Register scratch = scratch0();
3960 Register input = ToRegister(instr->InputAt(0));
3961 Register result = ToRegister(instr->result());
3962 if (instr->needs_check()) {
3963 STATIC_ASSERT(kHeapObjectTag == 1);
3964 // If the input is a HeapObject, value of scratch won't be zero.
3965 __ And(scratch, input, Operand(kHeapObjectTag));
3966 __ SmiUntag(result, input);
3967 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3969 __ SmiUntag(result, input);
3974 void LCodeGen::EmitNumberUntagD(Register input_reg,
3975 DoubleRegister result_reg,
3976 bool deoptimize_on_undefined,
3977 bool deoptimize_on_minus_zero,
3978 LEnvironment* env) {
3979 Register scratch = scratch0();
3981 Label load_smi, heap_number, done;
3984 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
3986 // Heap number map check.
3987 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
3988 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3989 if (deoptimize_on_undefined) {
3990 DeoptimizeIf(ne, env, scratch, Operand(at));
3993 __ Branch(&heap_number, eq, scratch, Operand(at));
3995 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3996 DeoptimizeIf(ne, env, input_reg, Operand(at));
3998 // Convert undefined to NaN.
3999 __ LoadRoot(at, Heap::kNanValueRootIndex);
4000 __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
4003 __ bind(&heap_number);
4005 // Heap number to double register conversion.
4006 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4007 if (deoptimize_on_minus_zero) {
4008 __ mfc1(at, result_reg.low());
4009 __ Branch(&done, ne, at, Operand(zero_reg));
4010 __ mfc1(scratch, result_reg.high());
4011 DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4015 // Smi to double register conversion
4017 // scratch: untagged value of input_reg
4018 __ mtc1(scratch, result_reg);
4019 __ cvt_d_w(result_reg, result_reg);
4024 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4025 Register input_reg = ToRegister(instr->InputAt(0));
4026 Register scratch1 = scratch0();
4027 Register scratch2 = ToRegister(instr->TempAt(0));
4028 DoubleRegister double_scratch = double_scratch0();
4029 FPURegister single_scratch = double_scratch.low();
4031 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4032 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4036 // The input is a tagged HeapObject.
4037 // Heap number map check.
4038 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4039 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4040 // This 'at' value and scratch1 map value are used for tests in both clauses
4043 if (instr->truncating()) {
4044 Register scratch3 = ToRegister(instr->TempAt(1));
4045 DoubleRegister double_scratch2 = ToDoubleRegister(instr->TempAt(2));
4046 ASSERT(!scratch3.is(input_reg) &&
4047 !scratch3.is(scratch1) &&
4048 !scratch3.is(scratch2));
4049 // Performs a truncating conversion of a floating point number as used by
4050 // the JS bitwise operations.
4052 __ Branch(&heap_number, eq, scratch1, Operand(at)); // HeapNumber map?
4053 // Check for undefined. Undefined is converted to zero for truncating
4055 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4056 DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
4057 ASSERT(ToRegister(instr->result()).is(input_reg));
4058 __ mov(input_reg, zero_reg);
4061 __ bind(&heap_number);
4062 __ ldc1(double_scratch2,
4063 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4064 __ EmitECMATruncate(input_reg,
4071 // Deoptimize if we don't have a heap number.
4072 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
4074 // Load the double value.
4075 __ ldc1(double_scratch,
4076 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4078 Register except_flag = scratch2;
4079 __ EmitFPUTruncate(kRoundToZero,
4084 kCheckForInexactConversion);
4086 // Deopt if the operation did not succeed.
4087 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4090 __ mfc1(input_reg, single_scratch);
4092 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4093 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4095 __ mfc1(scratch1, double_scratch.high());
4096 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4097 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4104 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4105 class DeferredTaggedToI: public LDeferredCode {
4107 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4108 : LDeferredCode(codegen), instr_(instr) { }
4109 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
4110 virtual LInstruction* instr() { return instr_; }
4115 LOperand* input = instr->InputAt(0);
4116 ASSERT(input->IsRegister());
4117 ASSERT(input->Equals(instr->result()));
4119 Register input_reg = ToRegister(input);
4121 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
4123 // Let the deferred code handle the HeapObject case.
4124 __ JumpIfNotSmi(input_reg, deferred->entry());
4126 // Smi to int32 conversion.
4127 __ SmiUntag(input_reg);
4128 __ bind(deferred->exit());
4132 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4133 LOperand* input = instr->InputAt(0);
4134 ASSERT(input->IsRegister());
4135 LOperand* result = instr->result();
4136 ASSERT(result->IsDoubleRegister());
4138 Register input_reg = ToRegister(input);
4139 DoubleRegister result_reg = ToDoubleRegister(result);
4141 EmitNumberUntagD(input_reg, result_reg,
4142 instr->hydrogen()->deoptimize_on_undefined(),
4143 instr->hydrogen()->deoptimize_on_minus_zero(),
4144 instr->environment());
4148 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4149 Register result_reg = ToRegister(instr->result());
4150 Register scratch1 = scratch0();
4151 Register scratch2 = ToRegister(instr->TempAt(0));
4152 DoubleRegister double_input = ToDoubleRegister(instr->InputAt(0));
4153 FPURegister single_scratch = double_scratch0().low();
4155 if (instr->truncating()) {
4156 Register scratch3 = ToRegister(instr->TempAt(1));
4157 __ EmitECMATruncate(result_reg,
4164 Register except_flag = scratch2;
4166 __ EmitFPUTruncate(kRoundToMinusInf,
4171 kCheckForInexactConversion);
4173 // Deopt if the operation did not succeed (except_flag != 0).
4174 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4177 __ mfc1(result_reg, single_scratch);
4182 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4183 LOperand* input = instr->InputAt(0);
4184 __ And(at, ToRegister(input), Operand(kSmiTagMask));
4185 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
4189 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4190 LOperand* input = instr->InputAt(0);
4191 __ And(at, ToRegister(input), Operand(kSmiTagMask));
4192 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
4196 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4197 Register input = ToRegister(instr->InputAt(0));
4198 Register scratch = scratch0();
4200 __ GetObjectType(input, scratch, scratch);
4202 if (instr->hydrogen()->is_interval_check()) {
4205 instr->hydrogen()->GetCheckInterval(&first, &last);
4207 // If there is only one type in the interval check for equality.
4208 if (first == last) {
4209 DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
4211 DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
4212 // Omit check for the last type.
4213 if (last != LAST_TYPE) {
4214 DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
4220 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
4222 if (IsPowerOf2(mask)) {
4223 ASSERT(tag == 0 || IsPowerOf2(tag));
4224 __ And(at, scratch, mask);
4225 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
4226 at, Operand(zero_reg));
4228 __ And(scratch, scratch, Operand(mask));
4229 DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
4235 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
4236 Register reg = ToRegister(instr->value());
4237 Handle<JSFunction> target = instr->hydrogen()->target();
4238 if (isolate()->heap()->InNewSpace(*target)) {
4239 Register reg = ToRegister(instr->value());
4240 Handle<JSGlobalPropertyCell> cell =
4241 isolate()->factory()->NewJSGlobalPropertyCell(target);
4242 __ li(at, Operand(Handle<Object>(cell)));
4243 __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
4244 DeoptimizeIf(ne, instr->environment(), reg,
4247 DeoptimizeIf(ne, instr->environment(), reg,
4253 void LCodeGen::DoCheckMapCommon(Register reg,
4256 CompareMapMode mode,
4257 LEnvironment* env) {
4259 __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode);
4260 DeoptimizeIf(al, env);
4265 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
4266 Register scratch = scratch0();
4267 LOperand* input = instr->InputAt(0);
4268 ASSERT(input->IsRegister());
4269 Register reg = ToRegister(input);
4271 SmallMapList* map_set = instr->hydrogen()->map_set();
4272 for (int i = 0; i < map_set->length() - 1; i++) {
4273 Handle<Map> map = map_set->at(i);
4274 __ CompareMapAndBranch(
4275 reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP);
4277 Handle<Map> map = map_set->last();
4278 DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
4283 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4284 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
4285 Register result_reg = ToRegister(instr->result());
4286 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4287 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4291 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
4292 Register unclamped_reg = ToRegister(instr->unclamped());
4293 Register result_reg = ToRegister(instr->result());
4294 __ ClampUint8(result_reg, unclamped_reg);
4298 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
4299 Register scratch = scratch0();
4300 Register input_reg = ToRegister(instr->unclamped());
4301 Register result_reg = ToRegister(instr->result());
4302 DoubleRegister temp_reg = ToDoubleRegister(instr->TempAt(0));
4303 Label is_smi, done, heap_number;
4305 // Both smi and heap number cases are handled.
4306 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
4308 // Check for heap number
4309 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4310 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
4312 // Check for undefined. Undefined is converted to zero for clamping
4314 DeoptimizeIf(ne, instr->environment(), input_reg,
4315 Operand(factory()->undefined_value()));
4316 __ mov(result_reg, zero_reg);
4320 __ bind(&heap_number);
4321 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
4322 HeapNumber::kValueOffset));
4323 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
4327 __ ClampUint8(result_reg, scratch);
4333 void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
4334 Register temp1 = ToRegister(instr->TempAt(0));
4335 Register temp2 = ToRegister(instr->TempAt(1));
4337 Handle<JSObject> holder = instr->holder();
4338 Handle<JSObject> current_prototype = instr->prototype();
4340 // Load prototype object.
4341 __ LoadHeapObject(temp1, current_prototype);
4343 // Check prototype maps up to the holder.
4344 while (!current_prototype.is_identical_to(holder)) {
4345 DoCheckMapCommon(temp1, temp2,
4346 Handle<Map>(current_prototype->map()),
4347 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4349 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
4350 // Load next prototype object.
4351 __ LoadHeapObject(temp1, current_prototype);
4354 // Check the holder map.
4355 DoCheckMapCommon(temp1, temp2,
4356 Handle<Map>(current_prototype->map()),
4357 ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
4361 void LCodeGen::DoAllocateObject(LAllocateObject* instr) {
4362 class DeferredAllocateObject: public LDeferredCode {
4364 DeferredAllocateObject(LCodeGen* codegen, LAllocateObject* instr)
4365 : LDeferredCode(codegen), instr_(instr) { }
4366 virtual void Generate() { codegen()->DoDeferredAllocateObject(instr_); }
4367 virtual LInstruction* instr() { return instr_; }
4369 LAllocateObject* instr_;
4372 DeferredAllocateObject* deferred = new DeferredAllocateObject(this, instr);
4374 Register result = ToRegister(instr->result());
4375 Register scratch = ToRegister(instr->TempAt(0));
4376 Register scratch2 = ToRegister(instr->TempAt(1));
4377 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4378 Handle<Map> initial_map(constructor->initial_map());
4379 int instance_size = initial_map->instance_size();
4380 ASSERT(initial_map->pre_allocated_property_fields() +
4381 initial_map->unused_property_fields() -
4382 initial_map->inobject_properties() == 0);
4384 // Allocate memory for the object. The initial map might change when
4385 // the constructor's prototype changes, but instance size and property
4386 // counts remain unchanged (if slack tracking finished).
4387 ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress());
4388 __ AllocateInNewSpace(instance_size,
4395 __ bind(deferred->exit());
4396 if (FLAG_debug_code) {
4397 Label is_in_new_space;
4398 __ JumpIfInNewSpace(result, scratch, &is_in_new_space);
4399 __ Abort("Allocated object is not in new-space");
4400 __ bind(&is_in_new_space);
4403 // Load the initial map.
4404 Register map = scratch;
4405 __ LoadHeapObject(map, constructor);
4406 __ lw(map, FieldMemOperand(map, JSFunction::kPrototypeOrInitialMapOffset));
4408 // Initialize map and fields of the newly allocated object.
4409 ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
4410 __ sw(map, FieldMemOperand(result, JSObject::kMapOffset));
4411 __ LoadRoot(scratch, Heap::kEmptyFixedArrayRootIndex);
4412 __ sw(scratch, FieldMemOperand(result, JSObject::kElementsOffset));
4413 __ sw(scratch, FieldMemOperand(result, JSObject::kPropertiesOffset));
4414 if (initial_map->inobject_properties() != 0) {
4415 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4416 for (int i = 0; i < initial_map->inobject_properties(); i++) {
4417 int property_offset = JSObject::kHeaderSize + i * kPointerSize;
4418 __ sw(scratch, FieldMemOperand(result, property_offset));
4424 void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
4425 Register result = ToRegister(instr->result());
4426 Handle<JSFunction> constructor = instr->hydrogen()->constructor();
4427 Handle<Map> initial_map(constructor->initial_map());
4428 int instance_size = initial_map->instance_size();
4430 // TODO(3095996): Get rid of this. For now, we need to make the
4431 // result register contain a valid pointer because it is already
4432 // contained in the register pointer map.
4433 __ mov(result, zero_reg);
4435 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4436 __ li(a0, Operand(Smi::FromInt(instance_size)));
4438 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
4439 __ StoreToSafepointRegisterSlot(v0, result);
4443 void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
4444 Heap* heap = isolate()->heap();
4445 ElementsKind boilerplate_elements_kind =
4446 instr->hydrogen()->boilerplate_elements_kind();
4448 // Deopt if the array literal boilerplate ElementsKind is of a type different
4449 // than the expected one. The check isn't necessary if the boilerplate has
4450 // already been converted to FAST_ELEMENTS.
4451 if (boilerplate_elements_kind != FAST_ELEMENTS) {
4452 __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
4453 // Load map into a2.
4454 __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
4455 // Load the map's "bit field 2".
4456 __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
4457 // Retrieve elements_kind from bit field 2.
4458 __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
4460 instr->environment(),
4462 Operand(boilerplate_elements_kind));
4464 __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4465 __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
4466 __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4467 // Boilerplate already exists, constant elements are never accessed.
4468 // Pass an empty fixed array.
4469 __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
4470 __ Push(a3, a2, a1);
4472 // Pick the right runtime function or stub to call.
4473 int length = instr->hydrogen()->length();
4474 if (instr->hydrogen()->IsCopyOnWrite()) {
4475 ASSERT(instr->hydrogen()->depth() == 1);
4476 FastCloneShallowArrayStub::Mode mode =
4477 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
4478 FastCloneShallowArrayStub stub(mode, length);
4479 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4480 } else if (instr->hydrogen()->depth() > 1) {
4481 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
4482 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
4483 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
4485 FastCloneShallowArrayStub::Mode mode =
4486 boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
4487 ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
4488 : FastCloneShallowArrayStub::CLONE_ELEMENTS;
4489 FastCloneShallowArrayStub stub(mode, length);
4490 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4495 void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
4499 ASSERT(!source.is(a2));
4500 ASSERT(!result.is(a2));
4502 // Only elements backing stores for non-COW arrays need to be copied.
4503 Handle<FixedArrayBase> elements(object->elements());
4504 bool has_elements = elements->length() > 0 &&
4505 elements->map() != isolate()->heap()->fixed_cow_array_map();
4507 // Increase the offset so that subsequent objects end up right after
4508 // this object and its backing store.
4509 int object_offset = *offset;
4510 int object_size = object->map()->instance_size();
4511 int elements_offset = *offset + object_size;
4512 int elements_size = has_elements ? elements->Size() : 0;
4513 *offset += object_size + elements_size;
4515 // Copy object header.
4516 ASSERT(object->properties()->length() == 0);
4517 int inobject_properties = object->map()->inobject_properties();
4518 int header_size = object_size - inobject_properties * kPointerSize;
4519 for (int i = 0; i < header_size; i += kPointerSize) {
4520 if (has_elements && i == JSObject::kElementsOffset) {
4521 __ Addu(a2, result, Operand(elements_offset));
4523 __ lw(a2, FieldMemOperand(source, i));
4525 __ sw(a2, FieldMemOperand(result, object_offset + i));
4528 // Copy in-object properties.
4529 for (int i = 0; i < inobject_properties; i++) {
4530 int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
4531 Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
4532 if (value->IsJSObject()) {
4533 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4534 __ Addu(a2, result, Operand(*offset));
4535 __ sw(a2, FieldMemOperand(result, total_offset));
4536 __ LoadHeapObject(source, value_object);
4537 EmitDeepCopy(value_object, result, source, offset);
4538 } else if (value->IsHeapObject()) {
4539 __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4540 __ sw(a2, FieldMemOperand(result, total_offset));
4542 __ li(a2, Operand(value));
4543 __ sw(a2, FieldMemOperand(result, total_offset));
4549 // Copy elements backing store header.
4550 __ LoadHeapObject(source, elements);
4551 for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
4552 __ lw(a2, FieldMemOperand(source, i));
4553 __ sw(a2, FieldMemOperand(result, elements_offset + i));
4556 // Copy elements backing store content.
4557 int elements_length = has_elements ? elements->length() : 0;
4558 if (elements->IsFixedDoubleArray()) {
4559 Handle<FixedDoubleArray> double_array =
4560 Handle<FixedDoubleArray>::cast(elements);
4561 for (int i = 0; i < elements_length; i++) {
4562 int64_t value = double_array->get_representation(i);
4563 // We only support little endian mode...
4564 int32_t value_low = value & 0xFFFFFFFF;
4565 int32_t value_high = value >> 32;
4567 elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
4568 __ li(a2, Operand(value_low));
4569 __ sw(a2, FieldMemOperand(result, total_offset));
4570 __ li(a2, Operand(value_high));
4571 __ sw(a2, FieldMemOperand(result, total_offset + 4));
4573 } else if (elements->IsFixedArray()) {
4574 Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
4575 for (int i = 0; i < elements_length; i++) {
4576 int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
4577 Handle<Object> value(fast_elements->get(i));
4578 if (value->IsJSObject()) {
4579 Handle<JSObject> value_object = Handle<JSObject>::cast(value);
4580 __ Addu(a2, result, Operand(*offset));
4581 __ sw(a2, FieldMemOperand(result, total_offset));
4582 __ LoadHeapObject(source, value_object);
4583 EmitDeepCopy(value_object, result, source, offset);
4584 } else if (value->IsHeapObject()) {
4585 __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
4586 __ sw(a2, FieldMemOperand(result, total_offset));
4588 __ li(a2, Operand(value));
4589 __ sw(a2, FieldMemOperand(result, total_offset));
4599 void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
4600 int size = instr->hydrogen()->total_size();
4601 ElementsKind boilerplate_elements_kind =
4602 instr->hydrogen()->boilerplate()->GetElementsKind();
4604 // Deopt if the literal boilerplate ElementsKind is of a type different than
4605 // the expected one. The check isn't necessary if the boilerplate has already
4606 // been converted to FAST_ELEMENTS.
4607 if (boilerplate_elements_kind != FAST_ELEMENTS) {
4608 __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4609 // Load map into a2.
4610 __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
4611 // Load the map's "bit field 2".
4612 __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
4613 // Retrieve elements_kind from bit field 2.
4614 __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
4615 DeoptimizeIf(ne, instr->environment(), a2,
4616 Operand(boilerplate_elements_kind));
4619 // Allocate all objects that are part of the literal in one big
4620 // allocation. This avoids multiple limit checks.
4621 Label allocated, runtime_allocate;
4622 __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4625 __ bind(&runtime_allocate);
4626 __ li(a0, Operand(Smi::FromInt(size)));
4628 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4630 __ bind(&allocated);
4632 __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
4633 EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
4634 ASSERT_EQ(size, offset);
4638 void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
4639 ASSERT(ToRegister(instr->result()).is(v0));
4640 Handle<FixedArray> literals(instr->environment()->closure()->literals());
4641 Handle<FixedArray> constant_properties =
4642 instr->hydrogen()->constant_properties();
4644 // Set up the parameters to the stub/runtime call.
4645 __ LoadHeapObject(t0, literals);
4646 __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4647 __ li(a2, Operand(constant_properties));
4648 int flags = instr->hydrogen()->fast_elements()
4649 ? ObjectLiteral::kFastElements
4650 : ObjectLiteral::kNoFlags;
4651 __ li(a1, Operand(Smi::FromInt(flags)));
4652 __ Push(t0, a3, a2, a1);
4654 // Pick the right runtime function or stub to call.
4655 int properties_count = constant_properties->length() / 2;
4656 if (instr->hydrogen()->depth() > 1) {
4657 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
4658 } else if (flags != ObjectLiteral::kFastElements ||
4659 properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
4660 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
4662 FastCloneShallowObjectStub stub(properties_count);
4663 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4668 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
4669 ASSERT(ToRegister(instr->InputAt(0)).is(a0));
4670 ASSERT(ToRegister(instr->result()).is(v0));
4672 CallRuntime(Runtime::kToFastProperties, 1, instr);
4676 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
4678 // Registers will be used as follows:
4679 // a3 = JS function.
4680 // t3 = literals array.
4681 // a1 = regexp literal.
4682 // a0 = regexp literal clone.
4683 // a2 and t0-t2 are used as temporaries.
4684 __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4685 __ lw(t3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
4686 int literal_offset = FixedArray::kHeaderSize +
4687 instr->hydrogen()->literal_index() * kPointerSize;
4688 __ lw(a1, FieldMemOperand(t3, literal_offset));
4689 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4690 __ Branch(&materialized, ne, a1, Operand(at));
4692 // Create regexp literal using runtime function
4693 // Result will be in v0.
4694 __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
4695 __ li(t1, Operand(instr->hydrogen()->pattern()));
4696 __ li(t0, Operand(instr->hydrogen()->flags()));
4697 __ Push(t3, t2, t1, t0);
4698 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
4701 __ bind(&materialized);
4702 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
4703 Label allocated, runtime_allocate;
4705 __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
4708 __ bind(&runtime_allocate);
4709 __ li(a0, Operand(Smi::FromInt(size)));
4711 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
4714 __ bind(&allocated);
4715 // Copy the content into the newly allocated memory.
4716 // (Unroll copy loop once for better throughput).
4717 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
4718 __ lw(a3, FieldMemOperand(a1, i));
4719 __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
4720 __ sw(a3, FieldMemOperand(v0, i));
4721 __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
4723 if ((size % (2 * kPointerSize)) != 0) {
4724 __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
4725 __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
4730 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
4731 // Use the fast case closure allocation code that allocates in new
4732 // space for nested functions that don't need literals cloning.
4733 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
4734 bool pretenure = instr->hydrogen()->pretenure();
4735 if (!pretenure && shared_info->num_literals() == 0) {
4736 FastNewClosureStub stub(shared_info->language_mode());
4737 __ li(a1, Operand(shared_info));
4739 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4741 __ li(a2, Operand(shared_info));
4742 __ li(a1, Operand(pretenure
4743 ? factory()->true_value()
4744 : factory()->false_value()));
4745 __ Push(cp, a2, a1);
4746 CallRuntime(Runtime::kNewClosure, 3, instr);
4751 void LCodeGen::DoTypeof(LTypeof* instr) {
4752 ASSERT(ToRegister(instr->result()).is(v0));
4753 Register input = ToRegister(instr->InputAt(0));
4755 CallRuntime(Runtime::kTypeof, 1, instr);
4759 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
4760 Register input = ToRegister(instr->InputAt(0));
4761 int true_block = chunk_->LookupDestination(instr->true_block_id());
4762 int false_block = chunk_->LookupDestination(instr->false_block_id());
4763 Label* true_label = chunk_->GetAssemblyLabel(true_block);
4764 Label* false_label = chunk_->GetAssemblyLabel(false_block);
4766 Register cmp1 = no_reg;
4767 Operand cmp2 = Operand(no_reg);
4769 Condition final_branch_condition = EmitTypeofIs(true_label,
4772 instr->type_literal(),
4776 ASSERT(cmp1.is_valid());
4777 ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
4779 if (final_branch_condition != kNoCondition) {
4780 EmitBranch(true_block, false_block, final_branch_condition, cmp1, cmp2);
4785 Condition LCodeGen::EmitTypeofIs(Label* true_label,
4788 Handle<String> type_name,
4791 // This function utilizes the delay slot heavily. This is used to load
4792 // values that are always usable without depending on the type of the input
4794 Condition final_branch_condition = kNoCondition;
4795 Register scratch = scratch0();
4796 if (type_name->Equals(heap()->number_symbol())) {
4797 __ JumpIfSmi(input, true_label);
4798 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4799 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4802 final_branch_condition = eq;
4804 } else if (type_name->Equals(heap()->string_symbol())) {
4805 __ JumpIfSmi(input, false_label);
4806 __ GetObjectType(input, input, scratch);
4807 __ Branch(USE_DELAY_SLOT, false_label,
4808 ge, scratch, Operand(FIRST_NONSTRING_TYPE));
4809 // input is an object so we can load the BitFieldOffset even if we take the
4811 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4812 __ And(at, at, 1 << Map::kIsUndetectable);
4814 cmp2 = Operand(zero_reg);
4815 final_branch_condition = eq;
4817 } else if (type_name->Equals(heap()->boolean_symbol())) {
4818 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4819 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4820 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4822 cmp2 = Operand(input);
4823 final_branch_condition = eq;
4825 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) {
4826 __ LoadRoot(at, Heap::kNullValueRootIndex);
4828 cmp2 = Operand(input);
4829 final_branch_condition = eq;
4831 } else if (type_name->Equals(heap()->undefined_symbol())) {
4832 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4833 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4834 // The first instruction of JumpIfSmi is an And - it is safe in the delay
4836 __ JumpIfSmi(input, false_label);
4837 // Check for undetectable objects => true.
4838 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
4839 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4840 __ And(at, at, 1 << Map::kIsUndetectable);
4842 cmp2 = Operand(zero_reg);
4843 final_branch_condition = ne;
4845 } else if (type_name->Equals(heap()->function_symbol())) {
4846 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
4847 __ JumpIfSmi(input, false_label);
4848 __ GetObjectType(input, scratch, input);
4849 __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
4851 cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
4852 final_branch_condition = eq;
4854 } else if (type_name->Equals(heap()->object_symbol())) {
4855 __ JumpIfSmi(input, false_label);
4856 if (!FLAG_harmony_typeof) {
4857 __ LoadRoot(at, Heap::kNullValueRootIndex);
4858 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
4860 // input is an object, it is safe to use GetObjectType in the delay slot.
4861 __ GetObjectType(input, input, scratch);
4862 __ Branch(USE_DELAY_SLOT, false_label,
4863 lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
4864 // Still an object, so the InstanceType can be loaded.
4865 __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset));
4866 __ Branch(USE_DELAY_SLOT, false_label,
4867 gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
4868 // Still an object, so the BitField can be loaded.
4869 // Check for undetectable objects => false.
4870 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
4871 __ And(at, at, 1 << Map::kIsUndetectable);
4873 cmp2 = Operand(zero_reg);
4874 final_branch_condition = eq;
4878 cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
4879 __ Branch(false_label);
4882 return final_branch_condition;
4886 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
4887 Register temp1 = ToRegister(instr->TempAt(0));
4888 int true_block = chunk_->LookupDestination(instr->true_block_id());
4889 int false_block = chunk_->LookupDestination(instr->false_block_id());
4891 EmitIsConstructCall(temp1, scratch0());
4893 EmitBranch(true_block, false_block, eq, temp1,
4894 Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
4898 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
4899 ASSERT(!temp1.is(temp2));
4900 // Get the frame pointer for the calling frame.
4901 __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4903 // Skip the arguments adaptor frame if it exists.
4904 Label check_frame_marker;
4905 __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
4906 __ Branch(&check_frame_marker, ne, temp2,
4907 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
4908 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
4910 // Check the marker in the calling frame.
4911 __ bind(&check_frame_marker);
4912 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
4916 void LCodeGen::EnsureSpaceForLazyDeopt() {
4917 // Ensure that we have enough space after the previous lazy-bailout
4918 // instruction for patching the code here.
4919 int current_pc = masm()->pc_offset();
4920 int patch_size = Deoptimizer::patch_size();
4921 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
4922 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
4923 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
4924 while (padding_size > 0) {
4926 padding_size -= Assembler::kInstrSize;
4929 last_lazy_deopt_pc_ = masm()->pc_offset();
4933 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
4934 EnsureSpaceForLazyDeopt();
4935 ASSERT(instr->HasEnvironment());
4936 LEnvironment* env = instr->environment();
4937 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
4938 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4942 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
4943 DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
4947 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
4948 Register object = ToRegister(instr->object());
4949 Register key = ToRegister(instr->key());
4950 Register strict = scratch0();
4951 __ li(strict, Operand(Smi::FromInt(strict_mode_flag())));
4952 __ Push(object, key, strict);
4953 ASSERT(instr->HasPointerMap());
4954 LPointerMap* pointers = instr->pointer_map();
4955 RecordPosition(pointers->position());
4956 SafepointGenerator safepoint_generator(
4957 this, pointers, Safepoint::kLazyDeopt);
4958 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, safepoint_generator);
4962 void LCodeGen::DoIn(LIn* instr) {
4963 Register obj = ToRegister(instr->object());
4964 Register key = ToRegister(instr->key());
4966 ASSERT(instr->HasPointerMap());
4967 LPointerMap* pointers = instr->pointer_map();
4968 RecordPosition(pointers->position());
4969 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
4970 __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION, safepoint_generator);
4974 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
4975 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4976 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
4977 RecordSafepointWithLazyDeopt(
4978 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4979 ASSERT(instr->HasEnvironment());
4980 LEnvironment* env = instr->environment();
4981 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
4985 void LCodeGen::DoStackCheck(LStackCheck* instr) {
4986 class DeferredStackCheck: public LDeferredCode {
4988 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
4989 : LDeferredCode(codegen), instr_(instr) { }
4990 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
4991 virtual LInstruction* instr() { return instr_; }
4993 LStackCheck* instr_;
4996 ASSERT(instr->HasEnvironment());
4997 LEnvironment* env = instr->environment();
4998 // There is no LLazyBailout instruction for stack-checks. We have to
4999 // prepare for lazy deoptimization explicitly here.
5000 if (instr->hydrogen()->is_function_entry()) {
5001 // Perform stack overflow check.
5003 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5004 __ Branch(&done, hs, sp, Operand(at));
5005 StackCheckStub stub;
5006 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5007 EnsureSpaceForLazyDeopt();
5009 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5010 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5012 ASSERT(instr->hydrogen()->is_backwards_branch());
5013 // Perform stack overflow check if this goto needs it before jumping.
5014 DeferredStackCheck* deferred_stack_check =
5015 new DeferredStackCheck(this, instr);
5016 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5017 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5018 EnsureSpaceForLazyDeopt();
5019 __ bind(instr->done_label());
5020 deferred_stack_check->SetExit(instr->done_label());
5021 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5022 // Don't record a deoptimization index for the safepoint here.
5023 // This will be done explicitly when emitting call and the safepoint in
5024 // the deferred code.
5029 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5030 // This is a pseudo-instruction that ensures that the environment here is
5031 // properly registered for deoptimization and records the assembler's PC
5033 LEnvironment* environment = instr->environment();
5034 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
5035 instr->SpilledDoubleRegisterArray());
5037 // If the environment were already registered, we would have no way of
5038 // backpatching it with the spill slot operands.
5039 ASSERT(!environment->HasBeenRegistered());
5040 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5041 ASSERT(osr_pc_offset_ == -1);
5042 osr_pc_offset_ = masm()->pc_offset();
5046 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5047 Register result = ToRegister(instr->result());
5048 Register object = ToRegister(instr->object());
5049 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5050 DeoptimizeIf(eq, instr->environment(), object, Operand(at));
5052 Register null_value = t1;
5053 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5054 DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
5056 __ And(at, object, kSmiTagMask);
5057 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5059 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5060 __ GetObjectType(object, a1, a1);
5061 DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
5063 Label use_cache, call_runtime;
5064 ASSERT(object.is(a0));
5065 __ CheckEnumCache(null_value, &call_runtime);
5067 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5068 __ Branch(&use_cache);
5070 // Get the set of properties to enumerate.
5071 __ bind(&call_runtime);
5073 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5075 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5076 ASSERT(result.is(v0));
5077 __ LoadRoot(at, Heap::kMetaMapRootIndex);
5078 DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
5079 __ bind(&use_cache);
5083 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5084 Register map = ToRegister(instr->map());
5085 Register result = ToRegister(instr->result());
5086 __ LoadInstanceDescriptors(map, result);
5088 FieldMemOperand(result, DescriptorArray::kEnumerationIndexOffset));
5090 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5091 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5095 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5096 Register object = ToRegister(instr->value());
5097 Register map = ToRegister(instr->map());
5098 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5099 DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
5103 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5104 Register object = ToRegister(instr->object());
5105 Register index = ToRegister(instr->index());
5106 Register result = ToRegister(instr->result());
5107 Register scratch = scratch0();
5109 Label out_of_object, done;
5110 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5111 __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
5113 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5114 __ Addu(scratch, object, scratch);
5115 __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5119 __ bind(&out_of_object);
5120 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5121 // Index is equal to negated out of object property index plus 1.
5122 __ Subu(scratch, result, scratch);
5123 __ lw(result, FieldMemOperand(scratch,
5124 FixedArray::kHeaderSize - kPointerSize));
5131 } } // namespace v8::internal