1 // Copyright 2012 the V8 project authors. All rights reserved.7
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "mips/lithium-codegen-mips.h"
31 #include "mips/lithium-gap-resolver-mips.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 #include "hydrogen-osr.h"
40 class SafepointGenerator V8_FINAL : public CallWrapper {
42 SafepointGenerator(LCodeGen* codegen,
43 LPointerMap* pointers,
44 Safepoint::DeoptMode mode)
48 virtual ~SafepointGenerator() {}
50 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
52 virtual void AfterCall() const V8_OVERRIDE {
53 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 LPointerMap* pointers_;
59 Safepoint::DeoptMode deopt_mode_;
65 bool LCodeGen::GenerateCode() {
66 LPhase phase("Z_Code generation", chunk());
70 // Open a frame scope to indicate that there is a frame on the stack. The
71 // NONE indicates that the scope shouldn't actually generate code to set up
72 // the frame (that is done in GeneratePrologue).
73 FrameScope frame_scope(masm_, StackFrame::NONE);
75 return GeneratePrologue() &&
77 GenerateDeferredCode() &&
78 GenerateDeoptJumpTable() &&
79 GenerateSafepointTable();
83 void LCodeGen::FinishCode(Handle<Code> code) {
85 code->set_stack_slots(GetStackSlotCount());
86 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
87 RegisterDependentCodeForEmbeddedMaps(code);
88 PopulateDeoptimizationData(code);
89 info()->CommitDependencies(code);
93 void LChunkBuilder::Abort(BailoutReason reason) {
94 info()->set_bailout_reason(reason);
99 void LCodeGen::SaveCallerDoubles() {
100 ASSERT(info()->saves_caller_doubles());
101 ASSERT(NeedsEagerFrame());
102 Comment(";;; Save clobbered callee double registers");
104 BitVector* doubles = chunk()->allocated_double_registers();
105 BitVector::Iterator save_iterator(doubles);
106 while (!save_iterator.Done()) {
107 __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
108 MemOperand(sp, count * kDoubleSize));
109 save_iterator.Advance();
115 void LCodeGen::RestoreCallerDoubles() {
116 ASSERT(info()->saves_caller_doubles());
117 ASSERT(NeedsEagerFrame());
118 Comment(";;; Restore clobbered callee double registers");
119 BitVector* doubles = chunk()->allocated_double_registers();
120 BitVector::Iterator save_iterator(doubles);
122 while (!save_iterator.Done()) {
123 __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
124 MemOperand(sp, count * kDoubleSize));
125 save_iterator.Advance();
131 bool LCodeGen::GeneratePrologue() {
132 ASSERT(is_generating());
134 if (info()->IsOptimizing()) {
135 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
138 if (strlen(FLAG_stop_at) > 0 &&
139 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
144 // a1: Callee's JS function.
145 // cp: Callee's context.
146 // fp: Caller's frame pointer.
149 // Classic mode functions and builtins need to replace the receiver with the
150 // global proxy when called as functions (without an explicit receiver
152 if (info_->this_has_uses() &&
153 info_->is_classic_mode() &&
154 !info_->is_native()) {
156 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
157 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
158 __ lw(a2, MemOperand(sp, receiver_offset));
159 __ Branch(&ok, ne, a2, Operand(at));
161 __ lw(a2, GlobalObjectOperand());
162 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
164 __ sw(a2, MemOperand(sp, receiver_offset));
170 info()->set_prologue_offset(masm_->pc_offset());
171 if (NeedsEagerFrame()) {
172 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
173 frame_is_built_ = true;
174 info_->AddNoFrameRange(0, masm_->pc_offset());
177 // Reserve space for the stack slots needed by the code.
178 int slots = GetStackSlotCount();
180 if (FLAG_debug_code) {
181 __ Subu(sp, sp, Operand(slots * kPointerSize));
183 __ Addu(a0, sp, Operand(slots * kPointerSize));
184 __ li(a1, Operand(kSlotsZapValue));
187 __ Subu(a0, a0, Operand(kPointerSize));
188 __ sw(a1, MemOperand(a0, 2 * kPointerSize));
189 __ Branch(&loop, ne, a0, Operand(sp));
192 __ Subu(sp, sp, Operand(slots * kPointerSize));
196 if (info()->saves_caller_doubles()) {
200 // Possibly allocate a local context.
201 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
202 if (heap_slots > 0) {
203 Comment(";;; Allocate local context");
204 // Argument to NewContext is the function, which is in a1.
205 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
206 FastNewContextStub stub(heap_slots);
210 __ CallRuntime(Runtime::kNewFunctionContext, 1);
212 RecordSafepoint(Safepoint::kNoLazyDeopt);
213 // Context is returned in both v0. It replaces the context passed to us.
214 // It's saved in the stack and kept live in cp.
216 __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
217 // Copy any necessary parameters into the context.
218 int num_parameters = scope()->num_parameters();
219 for (int i = 0; i < num_parameters; i++) {
220 Variable* var = scope()->parameter(i);
221 if (var->IsContextSlot()) {
222 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
223 (num_parameters - 1 - i) * kPointerSize;
224 // Load parameter from stack.
225 __ lw(a0, MemOperand(fp, parameter_offset));
226 // Store it in the context.
227 MemOperand target = ContextOperand(cp, var->index());
229 // Update the write barrier. This clobbers a3 and a0.
230 __ RecordWriteContextSlot(
231 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
234 Comment(";;; End allocate local context");
238 if (FLAG_trace && info()->IsOptimizing()) {
239 // We have not executed any compiled code yet, so cp still holds the
241 __ CallRuntime(Runtime::kTraceEnter, 0);
243 return !is_aborted();
247 void LCodeGen::GenerateOsrPrologue() {
248 // Generate the OSR entry prologue at the first unknown OSR value, or if there
249 // are none, at the OSR entrypoint instruction.
250 if (osr_pc_offset_ >= 0) return;
252 osr_pc_offset_ = masm()->pc_offset();
254 // Adjust the frame size, subsuming the unoptimized frame into the
256 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
258 __ Subu(sp, sp, Operand(slots * kPointerSize));
262 bool LCodeGen::GenerateDeferredCode() {
263 ASSERT(is_generating());
264 if (deferred_.length() > 0) {
265 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
266 LDeferredCode* code = deferred_[i];
269 instructions_->at(code->instruction_index())->hydrogen_value();
270 RecordAndWritePosition(value->position());
272 Comment(";;; <@%d,#%d> "
273 "-------------------- Deferred %s --------------------",
274 code->instruction_index(),
275 code->instr()->hydrogen_value()->id(),
276 code->instr()->Mnemonic());
277 __ bind(code->entry());
278 if (NeedsDeferredFrame()) {
279 Comment(";;; Build frame");
280 ASSERT(!frame_is_built_);
281 ASSERT(info()->IsStub());
282 frame_is_built_ = true;
283 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
284 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
286 __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
287 Comment(";;; Deferred code");
290 if (NeedsDeferredFrame()) {
291 Comment(";;; Destroy frame");
292 ASSERT(frame_is_built_);
294 __ MultiPop(cp.bit() | fp.bit() | ra.bit());
295 frame_is_built_ = false;
297 __ jmp(code->exit());
300 // Deferred code is the last part of the instruction sequence. Mark
301 // the generated code as done unless we bailed out.
302 if (!is_aborted()) status_ = DONE;
303 return !is_aborted();
307 bool LCodeGen::GenerateDeoptJumpTable() {
308 if (deopt_jump_table_.length() > 0) {
309 Comment(";;; -------------------- Jump table --------------------");
311 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
313 __ bind(&table_start);
315 for (int i = 0; i < deopt_jump_table_.length(); i++) {
316 __ bind(&deopt_jump_table_[i].label);
317 Address entry = deopt_jump_table_[i].address;
318 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
319 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
320 if (id == Deoptimizer::kNotDeoptimizationEntry) {
321 Comment(";;; jump table entry %d.", i);
323 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
325 __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
326 if (deopt_jump_table_[i].needs_frame) {
327 ASSERT(!info()->saves_caller_doubles());
328 if (needs_frame.is_bound()) {
329 __ Branch(&needs_frame);
331 __ bind(&needs_frame);
332 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
333 // This variant of deopt can only be used with stubs. Since we don't
334 // have a function pointer to install in the stack frame that we're
335 // building, install a special marker there instead.
336 ASSERT(info()->IsStub());
337 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
339 __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
343 if (info()->saves_caller_doubles()) {
344 ASSERT(info()->IsStub());
345 RestoreCallerDoubles();
350 __ RecordComment("]");
352 // The deoptimization jump table is the last part of the instruction
353 // sequence. Mark the generated code as done unless we bailed out.
354 if (!is_aborted()) status_ = DONE;
355 return !is_aborted();
359 bool LCodeGen::GenerateSafepointTable() {
361 safepoints_.Emit(masm(), GetStackSlotCount());
362 return !is_aborted();
366 Register LCodeGen::ToRegister(int index) const {
367 return Register::FromAllocationIndex(index);
371 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
372 return DoubleRegister::FromAllocationIndex(index);
376 Register LCodeGen::ToRegister(LOperand* op) const {
377 ASSERT(op->IsRegister());
378 return ToRegister(op->index());
382 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
383 if (op->IsRegister()) {
384 return ToRegister(op->index());
385 } else if (op->IsConstantOperand()) {
386 LConstantOperand* const_op = LConstantOperand::cast(op);
387 HConstant* constant = chunk_->LookupConstant(const_op);
388 Handle<Object> literal = constant->handle(isolate());
389 Representation r = chunk_->LookupLiteralRepresentation(const_op);
390 if (r.IsInteger32()) {
391 ASSERT(literal->IsNumber());
392 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
393 } else if (r.IsSmi()) {
394 ASSERT(constant->HasSmiValue());
395 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
396 } else if (r.IsDouble()) {
397 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
399 ASSERT(r.IsSmiOrTagged());
400 __ li(scratch, literal);
403 } else if (op->IsStackSlot() || op->IsArgument()) {
404 __ lw(scratch, ToMemOperand(op));
412 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
413 ASSERT(op->IsDoubleRegister());
414 return ToDoubleRegister(op->index());
418 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
419 FloatRegister flt_scratch,
420 DoubleRegister dbl_scratch) {
421 if (op->IsDoubleRegister()) {
422 return ToDoubleRegister(op->index());
423 } else if (op->IsConstantOperand()) {
424 LConstantOperand* const_op = LConstantOperand::cast(op);
425 HConstant* constant = chunk_->LookupConstant(const_op);
426 Handle<Object> literal = constant->handle(isolate());
427 Representation r = chunk_->LookupLiteralRepresentation(const_op);
428 if (r.IsInteger32()) {
429 ASSERT(literal->IsNumber());
430 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
431 __ mtc1(at, flt_scratch);
432 __ cvt_d_w(dbl_scratch, flt_scratch);
434 } else if (r.IsDouble()) {
435 Abort(kUnsupportedDoubleImmediate);
436 } else if (r.IsTagged()) {
437 Abort(kUnsupportedTaggedImmediate);
439 } else if (op->IsStackSlot() || op->IsArgument()) {
440 MemOperand mem_op = ToMemOperand(op);
441 __ ldc1(dbl_scratch, mem_op);
449 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
450 HConstant* constant = chunk_->LookupConstant(op);
451 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
452 return constant->handle(isolate());
456 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
457 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
461 bool LCodeGen::IsSmi(LConstantOperand* op) const {
462 return chunk_->LookupLiteralRepresentation(op).IsSmi();
466 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
467 return ToRepresentation(op, Representation::Integer32());
471 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
472 const Representation& r) const {
473 HConstant* constant = chunk_->LookupConstant(op);
474 int32_t value = constant->Integer32Value();
475 if (r.IsInteger32()) return value;
476 ASSERT(r.IsSmiOrTagged());
477 return reinterpret_cast<int32_t>(Smi::FromInt(value));
481 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
482 HConstant* constant = chunk_->LookupConstant(op);
483 return Smi::FromInt(constant->Integer32Value());
487 double LCodeGen::ToDouble(LConstantOperand* op) const {
488 HConstant* constant = chunk_->LookupConstant(op);
489 ASSERT(constant->HasDoubleValue());
490 return constant->DoubleValue();
494 Operand LCodeGen::ToOperand(LOperand* op) {
495 if (op->IsConstantOperand()) {
496 LConstantOperand* const_op = LConstantOperand::cast(op);
497 HConstant* constant = chunk()->LookupConstant(const_op);
498 Representation r = chunk_->LookupLiteralRepresentation(const_op);
500 ASSERT(constant->HasSmiValue());
501 return Operand(Smi::FromInt(constant->Integer32Value()));
502 } else if (r.IsInteger32()) {
503 ASSERT(constant->HasInteger32Value());
504 return Operand(constant->Integer32Value());
505 } else if (r.IsDouble()) {
506 Abort(kToOperandUnsupportedDoubleImmediate);
508 ASSERT(r.IsTagged());
509 return Operand(constant->handle(isolate()));
510 } else if (op->IsRegister()) {
511 return Operand(ToRegister(op));
512 } else if (op->IsDoubleRegister()) {
513 Abort(kToOperandIsDoubleRegisterUnimplemented);
516 // Stack slots not implemented, use ToMemOperand instead.
522 static int ArgumentsOffsetWithoutFrame(int index) {
524 return -(index + 1) * kPointerSize;
528 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
529 ASSERT(!op->IsRegister());
530 ASSERT(!op->IsDoubleRegister());
531 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
532 if (NeedsEagerFrame()) {
533 return MemOperand(fp, StackSlotOffset(op->index()));
535 // Retrieve parameter without eager stack-frame relative to the
537 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
542 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
543 ASSERT(op->IsDoubleStackSlot());
544 if (NeedsEagerFrame()) {
545 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
547 // Retrieve parameter without eager stack-frame relative to the
550 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
555 void LCodeGen::WriteTranslation(LEnvironment* environment,
556 Translation* translation) {
557 if (environment == NULL) return;
559 // The translation includes one command per value in the environment.
560 int translation_size = environment->translation_size();
561 // The output frame height does not include the parameters.
562 int height = translation_size - environment->parameter_count();
564 WriteTranslation(environment->outer(), translation);
565 bool has_closure_id = !info()->closure().is_null() &&
566 !info()->closure().is_identical_to(environment->closure());
567 int closure_id = has_closure_id
568 ? DefineDeoptimizationLiteral(environment->closure())
569 : Translation::kSelfLiteralId;
571 switch (environment->frame_type()) {
573 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
576 translation->BeginConstructStubFrame(closure_id, translation_size);
579 ASSERT(translation_size == 1);
581 translation->BeginGetterStubFrame(closure_id);
584 ASSERT(translation_size == 2);
586 translation->BeginSetterStubFrame(closure_id);
589 translation->BeginCompiledStubFrame();
591 case ARGUMENTS_ADAPTOR:
592 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
596 int object_index = 0;
597 int dematerialized_index = 0;
598 for (int i = 0; i < translation_size; ++i) {
599 LOperand* value = environment->values()->at(i);
600 AddToTranslation(environment,
603 environment->HasTaggedValueAt(i),
604 environment->HasUint32ValueAt(i),
606 &dematerialized_index);
611 void LCodeGen::AddToTranslation(LEnvironment* environment,
612 Translation* translation,
616 int* object_index_pointer,
617 int* dematerialized_index_pointer) {
618 if (op == LEnvironment::materialization_marker()) {
619 int object_index = (*object_index_pointer)++;
620 if (environment->ObjectIsDuplicateAt(object_index)) {
621 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
622 translation->DuplicateObject(dupe_of);
625 int object_length = environment->ObjectLengthAt(object_index);
626 if (environment->ObjectIsArgumentsAt(object_index)) {
627 translation->BeginArgumentsObject(object_length);
629 translation->BeginCapturedObject(object_length);
631 int dematerialized_index = *dematerialized_index_pointer;
632 int env_offset = environment->translation_size() + dematerialized_index;
633 *dematerialized_index_pointer += object_length;
634 for (int i = 0; i < object_length; ++i) {
635 LOperand* value = environment->values()->at(env_offset + i);
636 AddToTranslation(environment,
639 environment->HasTaggedValueAt(env_offset + i),
640 environment->HasUint32ValueAt(env_offset + i),
641 object_index_pointer,
642 dematerialized_index_pointer);
647 if (op->IsStackSlot()) {
649 translation->StoreStackSlot(op->index());
650 } else if (is_uint32) {
651 translation->StoreUint32StackSlot(op->index());
653 translation->StoreInt32StackSlot(op->index());
655 } else if (op->IsDoubleStackSlot()) {
656 translation->StoreDoubleStackSlot(op->index());
657 } else if (op->IsArgument()) {
659 int src_index = GetStackSlotCount() + op->index();
660 translation->StoreStackSlot(src_index);
661 } else if (op->IsRegister()) {
662 Register reg = ToRegister(op);
664 translation->StoreRegister(reg);
665 } else if (is_uint32) {
666 translation->StoreUint32Register(reg);
668 translation->StoreInt32Register(reg);
670 } else if (op->IsDoubleRegister()) {
671 DoubleRegister reg = ToDoubleRegister(op);
672 translation->StoreDoubleRegister(reg);
673 } else if (op->IsConstantOperand()) {
674 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
675 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
676 translation->StoreLiteral(src_index);
683 void LCodeGen::CallCode(Handle<Code> code,
684 RelocInfo::Mode mode,
685 LInstruction* instr) {
686 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
690 void LCodeGen::CallCodeGeneric(Handle<Code> code,
691 RelocInfo::Mode mode,
693 SafepointMode safepoint_mode) {
694 ASSERT(instr != NULL);
696 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
700 void LCodeGen::CallRuntime(const Runtime::Function* function,
703 SaveFPRegsMode save_doubles) {
704 ASSERT(instr != NULL);
706 __ CallRuntime(function, num_arguments, save_doubles);
708 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
712 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
713 if (context->IsRegister()) {
714 __ Move(cp, ToRegister(context));
715 } else if (context->IsStackSlot()) {
716 __ lw(cp, ToMemOperand(context));
717 } else if (context->IsConstantOperand()) {
718 HConstant* constant =
719 chunk_->LookupConstant(LConstantOperand::cast(context));
720 __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
727 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
731 LoadContextFromDeferred(context);
732 __ CallRuntimeSaveDoubles(id);
733 RecordSafepointWithRegisters(
734 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
738 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
739 Safepoint::DeoptMode mode) {
740 if (!environment->HasBeenRegistered()) {
741 // Physical stack frame layout:
742 // -x ............. -4 0 ..................................... y
743 // [incoming arguments] [spill slots] [pushed outgoing arguments]
745 // Layout of the environment:
746 // 0 ..................................................... size-1
747 // [parameters] [locals] [expression stack including arguments]
749 // Layout of the translation:
750 // 0 ........................................................ size - 1 + 4
751 // [expression stack including arguments] [locals] [4 words] [parameters]
752 // |>------------ translation_size ------------<|
755 int jsframe_count = 0;
756 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
758 if (e->frame_type() == JS_FUNCTION) {
762 Translation translation(&translations_, frame_count, jsframe_count, zone());
763 WriteTranslation(environment, &translation);
764 int deoptimization_index = deoptimizations_.length();
765 int pc_offset = masm()->pc_offset();
766 environment->Register(deoptimization_index,
768 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
769 deoptimizations_.Add(environment, zone());
774 void LCodeGen::DeoptimizeIf(Condition condition,
775 LEnvironment* environment,
776 Deoptimizer::BailoutType bailout_type,
778 const Operand& src2) {
779 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
780 ASSERT(environment->HasBeenRegistered());
781 int id = environment->deoptimization_index();
782 ASSERT(info()->IsOptimizing() || info()->IsStub());
784 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
786 Abort(kBailoutWasNotPrepared);
790 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
791 Register scratch = scratch0();
792 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
794 __ Push(a1, scratch);
795 __ li(scratch, Operand(count));
796 __ lw(a1, MemOperand(scratch));
797 __ Subu(a1, a1, Operand(1));
798 __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
799 __ li(a1, Operand(FLAG_deopt_every_n_times));
800 __ sw(a1, MemOperand(scratch));
803 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
805 __ sw(a1, MemOperand(scratch));
809 if (info()->ShouldTrapOnDeopt()) {
811 if (condition != al) {
812 __ Branch(&skip, NegateCondition(condition), src1, src2);
814 __ stop("trap_on_deopt");
818 ASSERT(info()->IsStub() || frame_is_built_);
819 // Go through jump table if we need to handle condition, build frame, or
820 // restore caller doubles.
821 if (condition == al && frame_is_built_ &&
822 !info()->saves_caller_doubles()) {
823 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
825 // We often have several deopts to the same entry, reuse the last
826 // jump entry if this is the case.
827 if (deopt_jump_table_.is_empty() ||
828 (deopt_jump_table_.last().address != entry) ||
829 (deopt_jump_table_.last().bailout_type != bailout_type) ||
830 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
831 Deoptimizer::JumpTableEntry table_entry(entry,
834 deopt_jump_table_.Add(table_entry, zone());
836 __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
841 void LCodeGen::DeoptimizeIf(Condition condition,
842 LEnvironment* environment,
844 const Operand& src2) {
845 Deoptimizer::BailoutType bailout_type = info()->IsStub()
847 : Deoptimizer::EAGER;
848 DeoptimizeIf(condition, environment, bailout_type, src1, src2);
852 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
853 int length = deoptimizations_.length();
854 if (length == 0) return;
855 Handle<DeoptimizationInputData> data =
856 factory()->NewDeoptimizationInputData(length, TENURED);
858 Handle<ByteArray> translations =
859 translations_.CreateByteArray(isolate()->factory());
860 data->SetTranslationByteArray(*translations);
861 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
863 Handle<FixedArray> literals =
864 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
865 { AllowDeferredHandleDereference copy_handles;
866 for (int i = 0; i < deoptimization_literals_.length(); i++) {
867 literals->set(i, *deoptimization_literals_[i]);
869 data->SetLiteralArray(*literals);
872 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
873 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
875 // Populate the deoptimization entries.
876 for (int i = 0; i < length; i++) {
877 LEnvironment* env = deoptimizations_[i];
878 data->SetAstId(i, env->ast_id());
879 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
880 data->SetArgumentsStackHeight(i,
881 Smi::FromInt(env->arguments_stack_height()));
882 data->SetPc(i, Smi::FromInt(env->pc_offset()));
884 code->set_deoptimization_data(*data);
888 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
889 int result = deoptimization_literals_.length();
890 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
891 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
893 deoptimization_literals_.Add(literal, zone());
898 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
899 ASSERT(deoptimization_literals_.length() == 0);
901 const ZoneList<Handle<JSFunction> >* inlined_closures =
902 chunk()->inlined_closures();
904 for (int i = 0, length = inlined_closures->length();
907 DefineDeoptimizationLiteral(inlined_closures->at(i));
910 inlined_function_count_ = deoptimization_literals_.length();
914 void LCodeGen::RecordSafepointWithLazyDeopt(
915 LInstruction* instr, SafepointMode safepoint_mode) {
916 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
917 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
919 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
920 RecordSafepointWithRegisters(
921 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
926 void LCodeGen::RecordSafepoint(
927 LPointerMap* pointers,
928 Safepoint::Kind kind,
930 Safepoint::DeoptMode deopt_mode) {
931 ASSERT(expected_safepoint_kind_ == kind);
933 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
934 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
935 kind, arguments, deopt_mode);
936 for (int i = 0; i < operands->length(); i++) {
937 LOperand* pointer = operands->at(i);
938 if (pointer->IsStackSlot()) {
939 safepoint.DefinePointerSlot(pointer->index(), zone());
940 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
941 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
947 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
948 Safepoint::DeoptMode deopt_mode) {
949 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
953 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
954 LPointerMap empty_pointers(zone());
955 RecordSafepoint(&empty_pointers, deopt_mode);
959 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
961 Safepoint::DeoptMode deopt_mode) {
963 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
967 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
968 LPointerMap* pointers,
970 Safepoint::DeoptMode deopt_mode) {
972 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
976 void LCodeGen::RecordAndWritePosition(int position) {
977 if (position == RelocInfo::kNoPosition) return;
978 masm()->positions_recorder()->RecordPosition(position);
979 masm()->positions_recorder()->WriteRecordedPositions();
983 static const char* LabelType(LLabel* label) {
984 if (label->is_loop_header()) return " (loop header)";
985 if (label->is_osr_entry()) return " (OSR entry)";
990 void LCodeGen::DoLabel(LLabel* label) {
991 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
992 current_instruction_,
993 label->hydrogen_value()->id(),
996 __ bind(label->label());
997 current_block_ = label->block_id();
1002 void LCodeGen::DoParallelMove(LParallelMove* move) {
1003 resolver_.Resolve(move);
1007 void LCodeGen::DoGap(LGap* gap) {
1008 for (int i = LGap::FIRST_INNER_POSITION;
1009 i <= LGap::LAST_INNER_POSITION;
1011 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1012 LParallelMove* move = gap->GetParallelMove(inner_pos);
1013 if (move != NULL) DoParallelMove(move);
1018 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1023 void LCodeGen::DoParameter(LParameter* instr) {
1028 void LCodeGen::DoCallStub(LCallStub* instr) {
1029 ASSERT(ToRegister(instr->context()).is(cp));
1030 ASSERT(ToRegister(instr->result()).is(v0));
1031 switch (instr->hydrogen()->major_key()) {
1032 case CodeStub::RegExpExec: {
1033 RegExpExecStub stub;
1034 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1037 case CodeStub::SubString: {
1039 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1042 case CodeStub::StringCompare: {
1043 StringCompareStub stub;
1044 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1053 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1054 GenerateOsrPrologue();
1058 void LCodeGen::DoModI(LModI* instr) {
1059 HMod* hmod = instr->hydrogen();
1060 HValue* left = hmod->left();
1061 HValue* right = hmod->right();
1062 if (hmod->RightIsPowerOf2()) {
1063 const Register left_reg = ToRegister(instr->left());
1064 const Register result_reg = ToRegister(instr->result());
1066 // Note: The code below even works when right contains kMinInt.
1067 int32_t divisor = Abs(right->GetInteger32Constant());
1069 Label left_is_not_negative, done;
1070 if (left->CanBeNegative()) {
1071 __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
1072 &left_is_not_negative, ge, left_reg, Operand(zero_reg));
1073 __ subu(result_reg, zero_reg, left_reg);
1074 __ And(result_reg, result_reg, divisor - 1);
1075 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1076 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1078 __ Branch(USE_DELAY_SLOT, &done);
1079 __ subu(result_reg, zero_reg, result_reg);
1082 __ bind(&left_is_not_negative);
1083 __ And(result_reg, left_reg, divisor - 1);
1086 const Register scratch = scratch0();
1087 const Register left_reg = ToRegister(instr->left());
1088 const Register result_reg = ToRegister(instr->result());
1090 // div runs in the background while we check for special cases.
1091 Register right_reg = EmitLoadRegister(instr->right(), scratch);
1092 __ div(left_reg, right_reg);
1095 // Check for x % 0, we have to deopt in this case because we can't return a
1097 if (right->CanBeZero()) {
1098 DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
1101 // Check for kMinInt % -1, we have to deopt if we care about -0, because we
1102 // can't return that.
1103 if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
1104 Label left_not_min_int;
1105 __ Branch(&left_not_min_int, ne, left_reg, Operand(kMinInt));
1106 // TODO(svenpanne) Don't deopt when we don't care about -0.
1107 DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
1108 __ bind(&left_not_min_int);
1111 // TODO(svenpanne) Only emit the test/deopt if we have to.
1112 __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
1113 __ mfhi(result_reg);
1115 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1116 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1123 void LCodeGen::EmitSignedIntegerDivisionByConstant(
1129 LEnvironment* environment) {
1130 ASSERT(!AreAliased(dividend, scratch, at, no_reg));
1132 uint32_t divisor_abs = abs(divisor);
1134 int32_t power_of_2_factor =
1135 CompilerIntrinsics::CountTrailingZeros(divisor_abs);
1137 switch (divisor_abs) {
1139 DeoptimizeIf(al, environment);
1144 __ Move(result, dividend);
1146 __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
1147 DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
1149 // Compute the remainder.
1150 __ Move(remainder, zero_reg);
1154 if (IsPowerOf2(divisor_abs)) {
1155 // Branch and condition free code for integer division by a power
1157 int32_t power = WhichPowerOf2(divisor_abs);
1159 __ sra(scratch, dividend, power - 1);
1161 __ srl(scratch, scratch, 32 - power);
1162 __ Addu(scratch, dividend, Operand(scratch));
1163 __ sra(result, scratch, power);
1164 // Negate if necessary.
1165 // We don't need to check for overflow because the case '-1' is
1166 // handled separately.
1168 ASSERT(divisor != -1);
1169 __ Subu(result, zero_reg, Operand(result));
1171 // Compute the remainder.
1173 __ sll(scratch, result, power);
1174 __ Subu(remainder, dividend, Operand(scratch));
1176 __ sll(scratch, result, power);
1177 __ Addu(remainder, dividend, Operand(scratch));
1180 } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
1181 // Use magic numbers for a few specific divisors.
1182 // Details and proofs can be found in:
1183 // - Hacker's Delight, Henry S. Warren, Jr.
1184 // - The PowerPC Compiler Writer's Guide
1185 // and probably many others.
1188 // <divisor with magic numbers> * <power of 2>
1190 // <divisor with magic numbers> * <other divisor with magic numbers>
1191 DivMagicNumbers magic_numbers =
1192 DivMagicNumberFor(divisor_abs >> power_of_2_factor);
1193 // Branch and condition free code for integer division by a power
1195 const int32_t M = magic_numbers.M;
1196 const int32_t s = magic_numbers.s + power_of_2_factor;
1198 __ li(scratch, Operand(M));
1199 __ mult(dividend, scratch);
1202 __ Addu(scratch, scratch, Operand(dividend));
1205 __ sra(scratch, scratch, s);
1206 __ mov(scratch, scratch);
1208 __ srl(at, dividend, 31);
1209 __ Addu(result, scratch, Operand(at));
1210 if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
1211 // Compute the remainder.
1212 __ li(scratch, Operand(divisor));
1213 __ Mul(scratch, result, Operand(scratch));
1214 __ Subu(remainder, dividend, Operand(scratch));
1216 __ li(scratch, Operand(divisor));
1217 __ div(dividend, scratch);
1225 void LCodeGen::DoDivI(LDivI* instr) {
1226 const Register left = ToRegister(instr->left());
1227 const Register right = ToRegister(instr->right());
1228 const Register result = ToRegister(instr->result());
1230 // On MIPS div is asynchronous - it will run in the background while we
1231 // check for special cases.
1232 __ div(left, right);
1235 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1236 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
1239 // Check for (0 / -x) that will produce negative zero.
1240 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1241 Label left_not_zero;
1242 __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
1243 DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
1244 __ bind(&left_not_zero);
1247 // Check for (kMinInt / -1).
1248 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1249 Label left_not_min_int;
1250 __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
1251 DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
1252 __ bind(&left_not_min_int);
1255 if (!instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1257 DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
1263 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1264 DoubleRegister addend = ToDoubleRegister(instr->addend());
1265 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1266 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1268 // This is computed in-place.
1269 ASSERT(addend.is(ToDoubleRegister(instr->result())));
1271 __ madd_d(addend, addend, multiplier, multiplicand);
1275 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1276 const Register result = ToRegister(instr->result());
1277 const Register left = ToRegister(instr->left());
1278 const Register remainder = ToRegister(instr->temp());
1279 const Register scratch = scratch0();
1281 if (instr->right()->IsConstantOperand()) {
1283 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1285 DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
1287 EmitSignedIntegerDivisionByConstant(result,
1292 instr->environment());
1293 // We performed a truncating division. Correct the result if necessary.
1294 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1295 __ Xor(scratch , remainder, Operand(divisor));
1296 __ Branch(&done, ge, scratch, Operand(zero_reg));
1297 __ Subu(result, result, Operand(1));
1301 const Register right = ToRegister(instr->right());
1303 // On MIPS div is asynchronous - it will run in the background while we
1304 // check for special cases.
1305 __ div(left, right);
1308 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
1310 // Check for (0 / -x) that will produce negative zero.
1311 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1312 Label left_not_zero;
1313 __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
1314 DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
1315 __ bind(&left_not_zero);
1318 // Check for (kMinInt / -1).
1319 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1320 Label left_not_min_int;
1321 __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
1322 DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
1323 __ bind(&left_not_min_int);
1329 // We performed a truncating division. Correct the result if necessary.
1330 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1331 __ Xor(scratch , remainder, Operand(right));
1332 __ Branch(&done, ge, scratch, Operand(zero_reg));
1333 __ Subu(result, result, Operand(1));
1339 void LCodeGen::DoMulI(LMulI* instr) {
1340 Register scratch = scratch0();
1341 Register result = ToRegister(instr->result());
1342 // Note that result may alias left.
1343 Register left = ToRegister(instr->left());
1344 LOperand* right_op = instr->right();
1346 bool bailout_on_minus_zero =
1347 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1348 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1350 if (right_op->IsConstantOperand()) {
1351 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1353 if (bailout_on_minus_zero && (constant < 0)) {
1354 // The case of a null constant will be handled separately.
1355 // If constant is negative and left is null, the result should be -0.
1356 DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
1362 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1363 DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
1365 __ Subu(result, zero_reg, left);
1369 if (bailout_on_minus_zero) {
1370 // If left is strictly negative and the constant is null, the
1371 // result is -0. Deoptimize if required, otherwise return 0.
1372 DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
1374 __ mov(result, zero_reg);
1378 __ Move(result, left);
1381 // Multiplying by powers of two and powers of two plus or minus
1382 // one can be done faster with shifted operands.
1383 // For other constants we emit standard code.
1384 int32_t mask = constant >> 31;
1385 uint32_t constant_abs = (constant + mask) ^ mask;
1387 if (IsPowerOf2(constant_abs)) {
1388 int32_t shift = WhichPowerOf2(constant_abs);
1389 __ sll(result, left, shift);
1390 // Correct the sign of the result if the constant is negative.
1391 if (constant < 0) __ Subu(result, zero_reg, result);
1392 } else if (IsPowerOf2(constant_abs - 1)) {
1393 int32_t shift = WhichPowerOf2(constant_abs - 1);
1394 __ sll(scratch, left, shift);
1395 __ Addu(result, scratch, left);
1396 // Correct the sign of the result if the constant is negative.
1397 if (constant < 0) __ Subu(result, zero_reg, result);
1398 } else if (IsPowerOf2(constant_abs + 1)) {
1399 int32_t shift = WhichPowerOf2(constant_abs + 1);
1400 __ sll(scratch, left, shift);
1401 __ Subu(result, scratch, left);
1402 // Correct the sign of the result if the constant is negative.
1403 if (constant < 0) __ Subu(result, zero_reg, result);
1405 // Generate standard code.
1406 __ li(at, constant);
1407 __ Mul(result, left, at);
1412 ASSERT(right_op->IsRegister());
1413 Register right = ToRegister(right_op);
1416 // hi:lo = left * right.
1417 if (instr->hydrogen()->representation().IsSmi()) {
1418 __ SmiUntag(result, left);
1419 __ mult(result, right);
1423 __ mult(left, right);
1427 __ sra(at, result, 31);
1428 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1430 if (instr->hydrogen()->representation().IsSmi()) {
1431 __ SmiUntag(result, left);
1432 __ Mul(result, result, right);
1434 __ Mul(result, left, right);
1438 if (bailout_on_minus_zero) {
1440 __ Xor(at, left, right);
1441 __ Branch(&done, ge, at, Operand(zero_reg));
1442 // Bail out if the result is minus zero.
1444 instr->environment(),
1453 void LCodeGen::DoBitI(LBitI* instr) {
1454 LOperand* left_op = instr->left();
1455 LOperand* right_op = instr->right();
1456 ASSERT(left_op->IsRegister());
1457 Register left = ToRegister(left_op);
1458 Register result = ToRegister(instr->result());
1459 Operand right(no_reg);
1461 if (right_op->IsStackSlot() || right_op->IsArgument()) {
1462 right = Operand(EmitLoadRegister(right_op, at));
1464 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1465 right = ToOperand(right_op);
1468 switch (instr->op()) {
1469 case Token::BIT_AND:
1470 __ And(result, left, right);
1473 __ Or(result, left, right);
1475 case Token::BIT_XOR:
1476 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1477 __ Nor(result, zero_reg, left);
1479 __ Xor(result, left, right);
1489 void LCodeGen::DoShiftI(LShiftI* instr) {
1490 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1491 // result may alias either of them.
1492 LOperand* right_op = instr->right();
1493 Register left = ToRegister(instr->left());
1494 Register result = ToRegister(instr->result());
1495 Register scratch = scratch0();
1497 if (right_op->IsRegister()) {
1498 // No need to mask the right operand on MIPS, it is built into the variable
1499 // shift instructions.
1500 switch (instr->op()) {
1502 __ Ror(result, left, Operand(ToRegister(right_op)));
1505 __ srav(result, left, ToRegister(right_op));
1508 __ srlv(result, left, ToRegister(right_op));
1509 if (instr->can_deopt()) {
1510 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1514 __ sllv(result, left, ToRegister(right_op));
1521 // Mask the right_op operand.
1522 int value = ToInteger32(LConstantOperand::cast(right_op));
1523 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1524 switch (instr->op()) {
1526 if (shift_count != 0) {
1527 __ Ror(result, left, Operand(shift_count));
1529 __ Move(result, left);
1533 if (shift_count != 0) {
1534 __ sra(result, left, shift_count);
1536 __ Move(result, left);
1540 if (shift_count != 0) {
1541 __ srl(result, left, shift_count);
1543 if (instr->can_deopt()) {
1544 __ And(at, left, Operand(0x80000000));
1545 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1547 __ Move(result, left);
1551 if (shift_count != 0) {
1552 if (instr->hydrogen_value()->representation().IsSmi() &&
1553 instr->can_deopt()) {
1554 if (shift_count != 1) {
1555 __ sll(result, left, shift_count - 1);
1556 __ SmiTagCheckOverflow(result, result, scratch);
1558 __ SmiTagCheckOverflow(result, left, scratch);
1560 DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
1562 __ sll(result, left, shift_count);
1565 __ Move(result, left);
1576 void LCodeGen::DoSubI(LSubI* instr) {
1577 LOperand* left = instr->left();
1578 LOperand* right = instr->right();
1579 LOperand* result = instr->result();
1580 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1582 if (!can_overflow) {
1583 if (right->IsStackSlot() || right->IsArgument()) {
1584 Register right_reg = EmitLoadRegister(right, at);
1585 __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1587 ASSERT(right->IsRegister() || right->IsConstantOperand());
1588 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1590 } else { // can_overflow.
1591 Register overflow = scratch0();
1592 Register scratch = scratch1();
1593 if (right->IsStackSlot() ||
1594 right->IsArgument() ||
1595 right->IsConstantOperand()) {
1596 Register right_reg = EmitLoadRegister(right, scratch);
1597 __ SubuAndCheckForOverflow(ToRegister(result),
1600 overflow); // Reg at also used as scratch.
1602 ASSERT(right->IsRegister());
1603 // Due to overflow check macros not supporting constant operands,
1604 // handling the IsConstantOperand case was moved to prev if clause.
1605 __ SubuAndCheckForOverflow(ToRegister(result),
1608 overflow); // Reg at also used as scratch.
1610 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1615 void LCodeGen::DoConstantI(LConstantI* instr) {
1616 __ li(ToRegister(instr->result()), Operand(instr->value()));
1620 void LCodeGen::DoConstantS(LConstantS* instr) {
1621 __ li(ToRegister(instr->result()), Operand(instr->value()));
1625 void LCodeGen::DoConstantD(LConstantD* instr) {
1626 ASSERT(instr->result()->IsDoubleRegister());
1627 DoubleRegister result = ToDoubleRegister(instr->result());
1628 double v = instr->value();
1633 void LCodeGen::DoConstantE(LConstantE* instr) {
1634 __ li(ToRegister(instr->result()), Operand(instr->value()));
1638 void LCodeGen::DoConstantT(LConstantT* instr) {
1639 Handle<Object> value = instr->value(isolate());
1640 AllowDeferredHandleDereference smi_check;
1641 __ li(ToRegister(instr->result()), value);
1645 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1646 Register result = ToRegister(instr->result());
1647 Register map = ToRegister(instr->value());
1648 __ EnumLength(result, map);
1652 void LCodeGen::DoDateField(LDateField* instr) {
1653 Register object = ToRegister(instr->date());
1654 Register result = ToRegister(instr->result());
1655 Register scratch = ToRegister(instr->temp());
1656 Smi* index = instr->index();
1657 Label runtime, done;
1658 ASSERT(object.is(a0));
1659 ASSERT(result.is(v0));
1660 ASSERT(!scratch.is(scratch0()));
1661 ASSERT(!scratch.is(object));
1663 __ SmiTst(object, at);
1664 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1665 __ GetObjectType(object, scratch, scratch);
1666 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
1668 if (index->value() == 0) {
1669 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1671 if (index->value() < JSDate::kFirstUncachedField) {
1672 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1673 __ li(scratch, Operand(stamp));
1674 __ lw(scratch, MemOperand(scratch));
1675 __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1676 __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1677 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1678 kPointerSize * index->value()));
1682 __ PrepareCallCFunction(2, scratch);
1683 __ li(a1, Operand(index));
1684 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1690 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1692 String::Encoding encoding) {
1693 if (index->IsConstantOperand()) {
1694 int offset = ToInteger32(LConstantOperand::cast(index));
1695 if (encoding == String::TWO_BYTE_ENCODING) {
1696 offset *= kUC16Size;
1698 STATIC_ASSERT(kCharSize == 1);
1699 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1701 Register scratch = scratch0();
1702 ASSERT(!scratch.is(string));
1703 ASSERT(!scratch.is(ToRegister(index)));
1704 if (encoding == String::ONE_BYTE_ENCODING) {
1705 __ Addu(scratch, string, ToRegister(index));
1707 STATIC_ASSERT(kUC16Size == 2);
1708 __ sll(scratch, ToRegister(index), 1);
1709 __ Addu(scratch, string, scratch);
1711 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1715 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1716 String::Encoding encoding = instr->hydrogen()->encoding();
1717 Register string = ToRegister(instr->string());
1718 Register result = ToRegister(instr->result());
1720 if (FLAG_debug_code) {
1721 Register scratch = scratch0();
1722 __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1723 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1725 __ And(scratch, scratch,
1726 Operand(kStringRepresentationMask | kStringEncodingMask));
1727 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1728 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1729 __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1730 ? one_byte_seq_type : two_byte_seq_type));
1731 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1734 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1735 if (encoding == String::ONE_BYTE_ENCODING) {
1736 __ lbu(result, operand);
1738 __ lhu(result, operand);
1743 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1744 String::Encoding encoding = instr->hydrogen()->encoding();
1745 Register string = ToRegister(instr->string());
1746 Register value = ToRegister(instr->value());
1748 if (FLAG_debug_code) {
1749 Register scratch = scratch0();
1750 Register index = ToRegister(instr->index());
1751 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1752 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1754 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1755 ? one_byte_seq_type : two_byte_seq_type;
1756 __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1759 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1760 if (encoding == String::ONE_BYTE_ENCODING) {
1761 __ sb(value, operand);
1763 __ sh(value, operand);
1768 void LCodeGen::DoAddI(LAddI* instr) {
1769 LOperand* left = instr->left();
1770 LOperand* right = instr->right();
1771 LOperand* result = instr->result();
1772 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1774 if (!can_overflow) {
1775 if (right->IsStackSlot() || right->IsArgument()) {
1776 Register right_reg = EmitLoadRegister(right, at);
1777 __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1779 ASSERT(right->IsRegister() || right->IsConstantOperand());
1780 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1782 } else { // can_overflow.
1783 Register overflow = scratch0();
1784 Register scratch = scratch1();
1785 if (right->IsStackSlot() ||
1786 right->IsArgument() ||
1787 right->IsConstantOperand()) {
1788 Register right_reg = EmitLoadRegister(right, scratch);
1789 __ AdduAndCheckForOverflow(ToRegister(result),
1792 overflow); // Reg at also used as scratch.
1794 ASSERT(right->IsRegister());
1795 // Due to overflow check macros not supporting constant operands,
1796 // handling the IsConstantOperand case was moved to prev if clause.
1797 __ AdduAndCheckForOverflow(ToRegister(result),
1800 overflow); // Reg at also used as scratch.
1802 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1807 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1808 LOperand* left = instr->left();
1809 LOperand* right = instr->right();
1810 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1811 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1812 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1813 Register left_reg = ToRegister(left);
1814 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1816 : Operand(EmitLoadRegister(right, at));
1817 Register result_reg = ToRegister(instr->result());
1818 Label return_right, done;
1819 if (!result_reg.is(left_reg)) {
1820 __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
1821 __ mov(result_reg, left_reg);
1824 __ Branch(&done, condition, left_reg, right_op);
1825 __ bind(&return_right);
1826 __ Addu(result_reg, zero_reg, right_op);
1829 ASSERT(instr->hydrogen()->representation().IsDouble());
1830 FPURegister left_reg = ToDoubleRegister(left);
1831 FPURegister right_reg = ToDoubleRegister(right);
1832 FPURegister result_reg = ToDoubleRegister(instr->result());
1833 Label check_nan_left, check_zero, return_left, return_right, done;
1834 __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1835 __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1836 __ Branch(&return_right);
1838 __ bind(&check_zero);
1839 // left == right != 0.
1840 __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1841 // At this point, both left and right are either 0 or -0.
1842 if (operation == HMathMinMax::kMathMin) {
1843 __ neg_d(left_reg, left_reg);
1844 __ sub_d(result_reg, left_reg, right_reg);
1845 __ neg_d(result_reg, result_reg);
1847 __ add_d(result_reg, left_reg, right_reg);
1851 __ bind(&check_nan_left);
1853 __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1854 __ bind(&return_right);
1855 if (!right_reg.is(result_reg)) {
1856 __ mov_d(result_reg, right_reg);
1860 __ bind(&return_left);
1861 if (!left_reg.is(result_reg)) {
1862 __ mov_d(result_reg, left_reg);
1869 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1870 DoubleRegister left = ToDoubleRegister(instr->left());
1871 DoubleRegister right = ToDoubleRegister(instr->right());
1872 DoubleRegister result = ToDoubleRegister(instr->result());
1873 switch (instr->op()) {
1875 __ add_d(result, left, right);
1878 __ sub_d(result, left, right);
1881 __ mul_d(result, left, right);
1884 __ div_d(result, left, right);
1887 // Save a0-a3 on the stack.
1888 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1889 __ MultiPush(saved_regs);
1891 __ PrepareCallCFunction(0, 2, scratch0());
1892 __ MovToFloatParameters(left, right);
1894 ExternalReference::mod_two_doubles_operation(isolate()),
1896 // Move the result in the double result register.
1897 __ MovFromFloatResult(result);
1899 // Restore saved register.
1900 __ MultiPop(saved_regs);
1910 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1911 ASSERT(ToRegister(instr->context()).is(cp));
1912 ASSERT(ToRegister(instr->left()).is(a1));
1913 ASSERT(ToRegister(instr->right()).is(a0));
1914 ASSERT(ToRegister(instr->result()).is(v0));
1916 BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
1917 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1918 // Other arch use a nop here, to signal that there is no inlined
1919 // patchable code. Mips does not need the nop, since our marker
1920 // instruction (andi zero_reg) will never be used in normal code.
1924 template<class InstrType>
1925 void LCodeGen::EmitBranch(InstrType instr,
1926 Condition condition,
1928 const Operand& src2) {
1929 int left_block = instr->TrueDestination(chunk_);
1930 int right_block = instr->FalseDestination(chunk_);
1932 int next_block = GetNextEmittedBlock();
1933 if (right_block == left_block || condition == al) {
1934 EmitGoto(left_block);
1935 } else if (left_block == next_block) {
1936 __ Branch(chunk_->GetAssemblyLabel(right_block),
1937 NegateCondition(condition), src1, src2);
1938 } else if (right_block == next_block) {
1939 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1941 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1942 __ Branch(chunk_->GetAssemblyLabel(right_block));
1947 template<class InstrType>
1948 void LCodeGen::EmitBranchF(InstrType instr,
1949 Condition condition,
1952 int right_block = instr->FalseDestination(chunk_);
1953 int left_block = instr->TrueDestination(chunk_);
1955 int next_block = GetNextEmittedBlock();
1956 if (right_block == left_block) {
1957 EmitGoto(left_block);
1958 } else if (left_block == next_block) {
1959 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1960 NegateCondition(condition), src1, src2);
1961 } else if (right_block == next_block) {
1962 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1963 condition, src1, src2);
1965 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1966 condition, src1, src2);
1967 __ Branch(chunk_->GetAssemblyLabel(right_block));
1972 template<class InstrType>
1973 void LCodeGen::EmitFalseBranch(InstrType instr,
1974 Condition condition,
1976 const Operand& src2) {
1977 int false_block = instr->FalseDestination(chunk_);
1978 __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
1982 template<class InstrType>
1983 void LCodeGen::EmitFalseBranchF(InstrType instr,
1984 Condition condition,
1987 int false_block = instr->FalseDestination(chunk_);
1988 __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
1989 condition, src1, src2);
1993 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
1994 __ stop("LDebugBreak");
1998 void LCodeGen::DoBranch(LBranch* instr) {
1999 Representation r = instr->hydrogen()->value()->representation();
2000 if (r.IsInteger32() || r.IsSmi()) {
2001 ASSERT(!info()->IsStub());
2002 Register reg = ToRegister(instr->value());
2003 EmitBranch(instr, ne, reg, Operand(zero_reg));
2004 } else if (r.IsDouble()) {
2005 ASSERT(!info()->IsStub());
2006 DoubleRegister reg = ToDoubleRegister(instr->value());
2007 // Test the double value. Zero and NaN are false.
2008 EmitBranchF(instr, nue, reg, kDoubleRegZero);
2010 ASSERT(r.IsTagged());
2011 Register reg = ToRegister(instr->value());
2012 HType type = instr->hydrogen()->value()->type();
2013 if (type.IsBoolean()) {
2014 ASSERT(!info()->IsStub());
2015 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2016 EmitBranch(instr, eq, reg, Operand(at));
2017 } else if (type.IsSmi()) {
2018 ASSERT(!info()->IsStub());
2019 EmitBranch(instr, ne, reg, Operand(zero_reg));
2020 } else if (type.IsJSArray()) {
2021 ASSERT(!info()->IsStub());
2022 EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2023 } else if (type.IsHeapNumber()) {
2024 ASSERT(!info()->IsStub());
2025 DoubleRegister dbl_scratch = double_scratch0();
2026 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2027 // Test the double value. Zero and NaN are false.
2028 EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
2029 } else if (type.IsString()) {
2030 ASSERT(!info()->IsStub());
2031 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2032 EmitBranch(instr, ne, at, Operand(zero_reg));
2034 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2035 // Avoid deopts in the case where we've never executed this path before.
2036 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2038 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2039 // undefined -> false.
2040 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2041 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2043 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2044 // Boolean -> its value.
2045 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2046 __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2047 __ LoadRoot(at, Heap::kFalseValueRootIndex);
2048 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2050 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2052 __ LoadRoot(at, Heap::kNullValueRootIndex);
2053 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2056 if (expected.Contains(ToBooleanStub::SMI)) {
2057 // Smis: 0 -> false, all other -> true.
2058 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2059 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2060 } else if (expected.NeedsMap()) {
2061 // If we need a map later and have a Smi -> deopt.
2063 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
2066 const Register map = scratch0();
2067 if (expected.NeedsMap()) {
2068 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2069 if (expected.CanBeUndetectable()) {
2070 // Undetectable -> false.
2071 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2072 __ And(at, at, Operand(1 << Map::kIsUndetectable));
2073 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2077 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2078 // spec object -> true.
2079 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2080 __ Branch(instr->TrueLabel(chunk_),
2081 ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2084 if (expected.Contains(ToBooleanStub::STRING)) {
2085 // String value -> false iff empty.
2087 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2088 __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2089 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2090 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2091 __ Branch(instr->FalseLabel(chunk_));
2092 __ bind(¬_string);
2095 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2096 // Symbol value -> true.
2097 const Register scratch = scratch1();
2098 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2099 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2102 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2103 // heap number -> false iff +0, -0, or NaN.
2104 DoubleRegister dbl_scratch = double_scratch0();
2105 Label not_heap_number;
2106 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2107 __ Branch(¬_heap_number, ne, map, Operand(at));
2108 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2109 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2110 ne, dbl_scratch, kDoubleRegZero);
2111 // Falls through if dbl_scratch == 0.
2112 __ Branch(instr->FalseLabel(chunk_));
2113 __ bind(¬_heap_number);
2116 if (!expected.IsGeneric()) {
2117 // We've seen something for the first time -> deopt.
2118 // This can only happen if we are not generic already.
2119 DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
2126 void LCodeGen::EmitGoto(int block) {
2127 if (!IsNextEmittedBlock(block)) {
2128 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2133 void LCodeGen::DoGoto(LGoto* instr) {
2134 EmitGoto(instr->block_id());
2138 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2139 Condition cond = kNoCondition;
2142 case Token::EQ_STRICT:
2146 case Token::NE_STRICT:
2150 cond = is_unsigned ? lo : lt;
2153 cond = is_unsigned ? hi : gt;
2156 cond = is_unsigned ? ls : le;
2159 cond = is_unsigned ? hs : ge;
2162 case Token::INSTANCEOF:
2170 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2171 LOperand* left = instr->left();
2172 LOperand* right = instr->right();
2173 Condition cond = TokenToCondition(instr->op(), false);
2175 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2176 // We can statically evaluate the comparison.
2177 double left_val = ToDouble(LConstantOperand::cast(left));
2178 double right_val = ToDouble(LConstantOperand::cast(right));
2179 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2180 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2181 EmitGoto(next_block);
2183 if (instr->is_double()) {
2184 // Compare left and right as doubles and load the
2185 // resulting flags into the normal status register.
2186 FPURegister left_reg = ToDoubleRegister(left);
2187 FPURegister right_reg = ToDoubleRegister(right);
2189 // If a NaN is involved, i.e. the result is unordered,
2190 // jump to false block label.
2191 __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2192 left_reg, right_reg);
2194 EmitBranchF(instr, cond, left_reg, right_reg);
2197 Operand cmp_right = Operand(0);
2199 if (right->IsConstantOperand()) {
2200 int32_t value = ToInteger32(LConstantOperand::cast(right));
2201 if (instr->hydrogen_value()->representation().IsSmi()) {
2202 cmp_left = ToRegister(left);
2203 cmp_right = Operand(Smi::FromInt(value));
2205 cmp_left = ToRegister(left);
2206 cmp_right = Operand(value);
2208 } else if (left->IsConstantOperand()) {
2209 int32_t value = ToInteger32(LConstantOperand::cast(left));
2210 if (instr->hydrogen_value()->representation().IsSmi()) {
2211 cmp_left = ToRegister(right);
2212 cmp_right = Operand(Smi::FromInt(value));
2214 cmp_left = ToRegister(right);
2215 cmp_right = Operand(value);
2217 // We transposed the operands. Reverse the condition.
2218 cond = ReverseCondition(cond);
2220 cmp_left = ToRegister(left);
2221 cmp_right = Operand(ToRegister(right));
2224 EmitBranch(instr, cond, cmp_left, cmp_right);
2230 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2231 Register left = ToRegister(instr->left());
2232 Register right = ToRegister(instr->right());
2234 EmitBranch(instr, eq, left, Operand(right));
2238 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2239 if (instr->hydrogen()->representation().IsTagged()) {
2240 Register input_reg = ToRegister(instr->object());
2241 __ li(at, Operand(factory()->the_hole_value()));
2242 EmitBranch(instr, eq, input_reg, Operand(at));
2246 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2247 EmitFalseBranchF(instr, eq, input_reg, input_reg);
2249 Register scratch = scratch0();
2250 __ FmoveHigh(scratch, input_reg);
2251 EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2255 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2256 Representation rep = instr->hydrogen()->value()->representation();
2257 ASSERT(!rep.IsInteger32());
2258 Register scratch = ToRegister(instr->temp());
2260 if (rep.IsDouble()) {
2261 DoubleRegister value = ToDoubleRegister(instr->value());
2262 EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
2263 __ FmoveHigh(scratch, value);
2264 __ li(at, 0x80000000);
2266 Register value = ToRegister(instr->value());
2269 Heap::kHeapNumberMapRootIndex,
2270 instr->FalseLabel(chunk()),
2272 __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2273 EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
2274 __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2275 __ mov(at, zero_reg);
2277 EmitBranch(instr, eq, scratch, Operand(at));
2281 Condition LCodeGen::EmitIsObject(Register input,
2284 Label* is_not_object,
2286 __ JumpIfSmi(input, is_not_object);
2288 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2289 __ Branch(is_object, eq, input, Operand(temp2));
2292 __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2293 // Undetectable objects behave like undefined.
2294 __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2295 __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
2296 __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
2298 // Load instance type and check that it is in object type range.
2299 __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2300 __ Branch(is_not_object,
2301 lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2307 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2308 Register reg = ToRegister(instr->value());
2309 Register temp1 = ToRegister(instr->temp());
2310 Register temp2 = scratch0();
2312 Condition true_cond =
2313 EmitIsObject(reg, temp1, temp2,
2314 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2316 EmitBranch(instr, true_cond, temp2,
2317 Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2321 Condition LCodeGen::EmitIsString(Register input,
2323 Label* is_not_string,
2324 SmiCheck check_needed = INLINE_SMI_CHECK) {
2325 if (check_needed == INLINE_SMI_CHECK) {
2326 __ JumpIfSmi(input, is_not_string);
2328 __ GetObjectType(input, temp1, temp1);
2334 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2335 Register reg = ToRegister(instr->value());
2336 Register temp1 = ToRegister(instr->temp());
2338 SmiCheck check_needed =
2339 instr->hydrogen()->value()->IsHeapObject()
2340 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2341 Condition true_cond =
2342 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2344 EmitBranch(instr, true_cond, temp1,
2345 Operand(FIRST_NONSTRING_TYPE));
2349 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2350 Register input_reg = EmitLoadRegister(instr->value(), at);
2351 __ And(at, input_reg, kSmiTagMask);
2352 EmitBranch(instr, eq, at, Operand(zero_reg));
2356 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2357 Register input = ToRegister(instr->value());
2358 Register temp = ToRegister(instr->temp());
2360 if (!instr->hydrogen()->value()->IsHeapObject()) {
2361 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2363 __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2364 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2365 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2366 EmitBranch(instr, ne, at, Operand(zero_reg));
2370 static Condition ComputeCompareCondition(Token::Value op) {
2372 case Token::EQ_STRICT:
2385 return kNoCondition;
2390 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2391 ASSERT(ToRegister(instr->context()).is(cp));
2392 Token::Value op = instr->op();
2394 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2395 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2397 Condition condition = ComputeCompareCondition(op);
2399 EmitBranch(instr, condition, v0, Operand(zero_reg));
2403 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2404 InstanceType from = instr->from();
2405 InstanceType to = instr->to();
2406 if (from == FIRST_TYPE) return to;
2407 ASSERT(from == to || to == LAST_TYPE);
2412 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2413 InstanceType from = instr->from();
2414 InstanceType to = instr->to();
2415 if (from == to) return eq;
2416 if (to == LAST_TYPE) return hs;
2417 if (from == FIRST_TYPE) return ls;
2423 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2424 Register scratch = scratch0();
2425 Register input = ToRegister(instr->value());
2427 if (!instr->hydrogen()->value()->IsHeapObject()) {
2428 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2431 __ GetObjectType(input, scratch, scratch);
2433 BranchCondition(instr->hydrogen()),
2435 Operand(TestType(instr->hydrogen())));
2439 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2440 Register input = ToRegister(instr->value());
2441 Register result = ToRegister(instr->result());
2443 __ AssertString(input);
2445 __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2446 __ IndexFromHash(result, result);
2450 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2451 LHasCachedArrayIndexAndBranch* instr) {
2452 Register input = ToRegister(instr->value());
2453 Register scratch = scratch0();
2456 FieldMemOperand(input, String::kHashFieldOffset));
2457 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2458 EmitBranch(instr, eq, at, Operand(zero_reg));
2462 // Branches to a label or falls through with the answer in flags. Trashes
2463 // the temp registers, but not the input.
2464 void LCodeGen::EmitClassOfTest(Label* is_true,
2466 Handle<String>class_name,
2470 ASSERT(!input.is(temp));
2471 ASSERT(!input.is(temp2));
2472 ASSERT(!temp.is(temp2));
2474 __ JumpIfSmi(input, is_false);
2476 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2477 // Assuming the following assertions, we can use the same compares to test
2478 // for both being a function type and being in the object type range.
2479 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2480 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2481 FIRST_SPEC_OBJECT_TYPE + 1);
2482 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2483 LAST_SPEC_OBJECT_TYPE - 1);
2484 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2486 __ GetObjectType(input, temp, temp2);
2487 __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2488 __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2489 __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2491 // Faster code path to avoid two compares: subtract lower bound from the
2492 // actual type and do a signed compare with the width of the type range.
2493 __ GetObjectType(input, temp, temp2);
2494 __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2495 __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2496 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2499 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2500 // Check if the constructor in the map is a function.
2501 __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2503 // Objects with a non-function constructor have class 'Object'.
2504 __ GetObjectType(temp, temp2, temp2);
2505 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2506 __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
2508 __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
2511 // temp now contains the constructor function. Grab the
2512 // instance class name from there.
2513 __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2514 __ lw(temp, FieldMemOperand(temp,
2515 SharedFunctionInfo::kInstanceClassNameOffset));
2516 // The class name we are testing against is internalized since it's a literal.
2517 // The name in the constructor is internalized because of the way the context
2518 // is booted. This routine isn't expected to work for random API-created
2519 // classes and it doesn't have to because you can't access it with natives
2520 // syntax. Since both sides are internalized it is sufficient to use an
2521 // identity comparison.
2523 // End with the address of this class_name instance in temp register.
2524 // On MIPS, the caller must do the comparison with Handle<String>class_name.
2528 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2529 Register input = ToRegister(instr->value());
2530 Register temp = scratch0();
2531 Register temp2 = ToRegister(instr->temp());
2532 Handle<String> class_name = instr->hydrogen()->class_name();
2534 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2535 class_name, input, temp, temp2);
2537 EmitBranch(instr, eq, temp, Operand(class_name));
2541 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2542 Register reg = ToRegister(instr->value());
2543 Register temp = ToRegister(instr->temp());
2545 __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2546 EmitBranch(instr, eq, temp, Operand(instr->map()));
2550 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2551 ASSERT(ToRegister(instr->context()).is(cp));
2552 Label true_label, done;
2553 ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
2554 ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
2555 Register result = ToRegister(instr->result());
2556 ASSERT(result.is(v0));
2558 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2559 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2561 __ Branch(&true_label, eq, result, Operand(zero_reg));
2562 __ li(result, Operand(factory()->false_value()));
2564 __ bind(&true_label);
2565 __ li(result, Operand(factory()->true_value()));
2570 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2571 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2573 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2574 LInstanceOfKnownGlobal* instr)
2575 : LDeferredCode(codegen), instr_(instr) { }
2576 virtual void Generate() V8_OVERRIDE {
2577 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2579 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2580 Label* map_check() { return &map_check_; }
2583 LInstanceOfKnownGlobal* instr_;
2587 DeferredInstanceOfKnownGlobal* deferred;
2588 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2590 Label done, false_result;
2591 Register object = ToRegister(instr->value());
2592 Register temp = ToRegister(instr->temp());
2593 Register result = ToRegister(instr->result());
2595 ASSERT(object.is(a0));
2596 ASSERT(result.is(v0));
2598 // A Smi is not instance of anything.
2599 __ JumpIfSmi(object, &false_result);
2601 // This is the inlined call site instanceof cache. The two occurences of the
2602 // hole value will be patched to the last map/result pair generated by the
2605 Register map = temp;
2606 __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2608 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2609 __ bind(deferred->map_check()); // Label for calculating code patching.
2610 // We use Factory::the_hole_value() on purpose instead of loading from the
2611 // root array to force relocation to be able to later patch with
2613 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2614 __ li(at, Operand(Handle<Object>(cell)));
2615 __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
2616 __ BranchShort(&cache_miss, ne, map, Operand(at));
2617 // We use Factory::the_hole_value() on purpose instead of loading from the
2618 // root array to force relocation to be able to later patch
2619 // with true or false. The distance from map check has to be constant.
2620 __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2623 // The inlined call site cache did not match. Check null and string before
2624 // calling the deferred code.
2625 __ bind(&cache_miss);
2626 // Null is not instance of anything.
2627 __ LoadRoot(temp, Heap::kNullValueRootIndex);
2628 __ Branch(&false_result, eq, object, Operand(temp));
2630 // String values is not instance of anything.
2631 Condition cc = __ IsObjectStringType(object, temp, temp);
2632 __ Branch(&false_result, cc, temp, Operand(zero_reg));
2634 // Go to the deferred code.
2635 __ Branch(deferred->entry());
2637 __ bind(&false_result);
2638 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2640 // Here result has either true or false. Deferred code also produces true or
2642 __ bind(deferred->exit());
2647 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2649 Register result = ToRegister(instr->result());
2650 ASSERT(result.is(v0));
2652 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2653 flags = static_cast<InstanceofStub::Flags>(
2654 flags | InstanceofStub::kArgsInRegisters);
2655 flags = static_cast<InstanceofStub::Flags>(
2656 flags | InstanceofStub::kCallSiteInlineCheck);
2657 flags = static_cast<InstanceofStub::Flags>(
2658 flags | InstanceofStub::kReturnTrueFalseObject);
2659 InstanceofStub stub(flags);
2661 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2662 LoadContextFromDeferred(instr->context());
2664 // Get the temp register reserved by the instruction. This needs to be t0 as
2665 // its slot of the pushing of safepoint registers is used to communicate the
2666 // offset to the location of the map check.
2667 Register temp = ToRegister(instr->temp());
2668 ASSERT(temp.is(t0));
2669 __ li(InstanceofStub::right(), instr->function());
2670 static const int kAdditionalDelta = 7;
2671 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2672 Label before_push_delta;
2673 __ bind(&before_push_delta);
2675 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2676 __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2677 __ StoreToSafepointRegisterSlot(temp, temp);
2679 CallCodeGeneric(stub.GetCode(isolate()),
2680 RelocInfo::CODE_TARGET,
2682 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2683 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2684 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2685 // Put the result value into the result register slot and
2686 // restore all registers.
2687 __ StoreToSafepointRegisterSlot(result, result);
2691 void LCodeGen::DoCmpT(LCmpT* instr) {
2692 ASSERT(ToRegister(instr->context()).is(cp));
2693 Token::Value op = instr->op();
2695 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2696 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2697 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2699 Condition condition = ComputeCompareCondition(op);
2700 // A minor optimization that relies on LoadRoot always emitting one
2702 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2704 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2706 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2707 ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
2708 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2713 void LCodeGen::DoReturn(LReturn* instr) {
2714 if (FLAG_trace && info()->IsOptimizing()) {
2715 // Push the return value on the stack as the parameter.
2716 // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2717 // managed by the register allocator and tearing down the frame, it's
2718 // safe to write to the context register.
2720 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2721 __ CallRuntime(Runtime::kTraceExit, 1);
2723 if (info()->saves_caller_doubles()) {
2724 RestoreCallerDoubles();
2726 int no_frame_start = -1;
2727 if (NeedsEagerFrame()) {
2729 no_frame_start = masm_->pc_offset();
2732 if (instr->has_constant_parameter_count()) {
2733 int parameter_count = ToInteger32(instr->constant_parameter_count());
2734 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2735 if (sp_delta != 0) {
2736 __ Addu(sp, sp, Operand(sp_delta));
2739 Register reg = ToRegister(instr->parameter_count());
2740 // The argument count parameter is a smi
2742 __ sll(at, reg, kPointerSizeLog2);
2743 __ Addu(sp, sp, at);
2748 if (no_frame_start != -1) {
2749 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2754 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2755 Register result = ToRegister(instr->result());
2756 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2757 __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
2758 if (instr->hydrogen()->RequiresHoleCheck()) {
2759 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2760 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2765 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2766 ASSERT(ToRegister(instr->context()).is(cp));
2767 ASSERT(ToRegister(instr->global_object()).is(a0));
2768 ASSERT(ToRegister(instr->result()).is(v0));
2770 __ li(a2, Operand(instr->name()));
2771 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2772 Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
2773 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2777 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2778 Register value = ToRegister(instr->value());
2779 Register cell = scratch0();
2782 __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2784 // If the cell we are storing to contains the hole it could have
2785 // been deleted from the property dictionary. In that case, we need
2786 // to update the property details in the property dictionary to mark
2787 // it as no longer deleted.
2788 if (instr->hydrogen()->RequiresHoleCheck()) {
2789 // We use a temp to check the payload.
2790 Register payload = ToRegister(instr->temp());
2791 __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
2792 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2793 DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
2797 __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
2798 // Cells are always rescanned, so no write barrier here.
2803 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2804 Register context = ToRegister(instr->context());
2805 Register result = ToRegister(instr->result());
2807 __ lw(result, ContextOperand(context, instr->slot_index()));
2808 if (instr->hydrogen()->RequiresHoleCheck()) {
2809 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2811 if (instr->hydrogen()->DeoptimizesOnHole()) {
2812 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2815 __ Branch(&is_not_hole, ne, result, Operand(at));
2816 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2817 __ bind(&is_not_hole);
2823 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2824 Register context = ToRegister(instr->context());
2825 Register value = ToRegister(instr->value());
2826 Register scratch = scratch0();
2827 MemOperand target = ContextOperand(context, instr->slot_index());
2829 Label skip_assignment;
2831 if (instr->hydrogen()->RequiresHoleCheck()) {
2832 __ lw(scratch, target);
2833 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2835 if (instr->hydrogen()->DeoptimizesOnHole()) {
2836 DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
2838 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2842 __ sw(value, target);
2843 if (instr->hydrogen()->NeedsWriteBarrier()) {
2844 SmiCheck check_needed =
2845 instr->hydrogen()->value()->IsHeapObject()
2846 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2847 __ RecordWriteContextSlot(context,
2853 EMIT_REMEMBERED_SET,
2857 __ bind(&skip_assignment);
2861 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2862 HObjectAccess access = instr->hydrogen()->access();
2863 int offset = access.offset();
2864 Register object = ToRegister(instr->object());
2866 if (access.IsExternalMemory()) {
2867 Register result = ToRegister(instr->result());
2868 MemOperand operand = MemOperand(object, offset);
2869 __ Load(result, operand, access.representation());
2873 if (instr->hydrogen()->representation().IsDouble()) {
2874 DoubleRegister result = ToDoubleRegister(instr->result());
2875 __ ldc1(result, FieldMemOperand(object, offset));
2879 Register result = ToRegister(instr->result());
2880 if (!access.IsInobject()) {
2881 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2884 MemOperand operand = FieldMemOperand(object, offset);
2885 __ Load(result, operand, access.representation());
2889 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2890 ASSERT(ToRegister(instr->context()).is(cp));
2891 ASSERT(ToRegister(instr->object()).is(a0));
2892 ASSERT(ToRegister(instr->result()).is(v0));
2894 // Name is always in a2.
2895 __ li(a2, Operand(instr->name()));
2896 Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
2897 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2901 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2902 Register scratch = scratch0();
2903 Register function = ToRegister(instr->function());
2904 Register result = ToRegister(instr->result());
2906 // Check that the function really is a function. Load map into the
2908 __ GetObjectType(function, result, scratch);
2909 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2911 // Make sure that the function has an instance prototype.
2913 __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2914 __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2915 __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2917 // Get the prototype or initial map from the function.
2919 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2921 // Check that the function has a prototype or an initial map.
2922 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2923 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2925 // If the function does not have an initial map, we're done.
2927 __ GetObjectType(result, scratch, scratch);
2928 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2930 // Get the prototype from the initial map.
2931 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2934 // Non-instance prototype: Fetch prototype from constructor field
2936 __ bind(&non_instance);
2937 __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2944 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2945 Register result = ToRegister(instr->result());
2946 __ LoadRoot(result, instr->index());
2950 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2951 Register arguments = ToRegister(instr->arguments());
2952 Register result = ToRegister(instr->result());
2953 // There are two words between the frame pointer and the last argument.
2954 // Subtracting from length accounts for one of them add one more.
2955 if (instr->length()->IsConstantOperand()) {
2956 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2957 if (instr->index()->IsConstantOperand()) {
2958 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2959 int index = (const_length - const_index) + 1;
2960 __ lw(result, MemOperand(arguments, index * kPointerSize));
2962 Register index = ToRegister(instr->index());
2963 __ li(at, Operand(const_length + 1));
2964 __ Subu(result, at, index);
2965 __ sll(at, result, kPointerSizeLog2);
2966 __ Addu(at, arguments, at);
2967 __ lw(result, MemOperand(at));
2969 } else if (instr->index()->IsConstantOperand()) {
2970 Register length = ToRegister(instr->length());
2971 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2972 int loc = const_index - 1;
2974 __ Subu(result, length, Operand(loc));
2975 __ sll(at, result, kPointerSizeLog2);
2976 __ Addu(at, arguments, at);
2977 __ lw(result, MemOperand(at));
2979 __ sll(at, length, kPointerSizeLog2);
2980 __ Addu(at, arguments, at);
2981 __ lw(result, MemOperand(at));
2984 Register length = ToRegister(instr->length());
2985 Register index = ToRegister(instr->index());
2986 __ Subu(result, length, index);
2987 __ Addu(result, result, 1);
2988 __ sll(at, result, kPointerSizeLog2);
2989 __ Addu(at, arguments, at);
2990 __ lw(result, MemOperand(at));
2995 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2996 Register external_pointer = ToRegister(instr->elements());
2997 Register key = no_reg;
2998 ElementsKind elements_kind = instr->elements_kind();
2999 bool key_is_constant = instr->key()->IsConstantOperand();
3000 int constant_key = 0;
3001 if (key_is_constant) {
3002 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3003 if (constant_key & 0xF0000000) {
3004 Abort(kArrayIndexConstantValueTooBig);
3007 key = ToRegister(instr->key());
3009 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3010 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3011 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3012 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
3013 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
3016 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3017 elements_kind == FLOAT32_ELEMENTS ||
3018 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3019 elements_kind == FLOAT64_ELEMENTS) {
3021 (instr->additional_index() << element_size_shift) + additional_offset;
3022 FPURegister result = ToDoubleRegister(instr->result());
3023 if (key_is_constant) {
3024 __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
3026 __ sll(scratch0(), key, shift_size);
3027 __ Addu(scratch0(), scratch0(), external_pointer);
3029 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3030 elements_kind == FLOAT32_ELEMENTS) {
3031 __ lwc1(result, MemOperand(scratch0(), base_offset));
3032 __ cvt_d_s(result, result);
3033 } else { // loading doubles, not floats.
3034 __ ldc1(result, MemOperand(scratch0(), base_offset));
3037 Register result = ToRegister(instr->result());
3038 MemOperand mem_operand = PrepareKeyedOperand(
3039 key, external_pointer, key_is_constant, constant_key,
3040 element_size_shift, shift_size,
3041 instr->additional_index(), additional_offset);
3042 switch (elements_kind) {
3043 case EXTERNAL_INT8_ELEMENTS:
3045 __ lb(result, mem_operand);
3047 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3048 case EXTERNAL_UINT8_ELEMENTS:
3049 case UINT8_ELEMENTS:
3050 case UINT8_CLAMPED_ELEMENTS:
3051 __ lbu(result, mem_operand);
3053 case EXTERNAL_INT16_ELEMENTS:
3054 case INT16_ELEMENTS:
3055 __ lh(result, mem_operand);
3057 case EXTERNAL_UINT16_ELEMENTS:
3058 case UINT16_ELEMENTS:
3059 __ lhu(result, mem_operand);
3061 case EXTERNAL_INT32_ELEMENTS:
3062 case INT32_ELEMENTS:
3063 __ lw(result, mem_operand);
3065 case EXTERNAL_UINT32_ELEMENTS:
3066 case UINT32_ELEMENTS:
3067 __ lw(result, mem_operand);
3068 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3069 DeoptimizeIf(Ugreater_equal, instr->environment(),
3070 result, Operand(0x80000000));
3073 case FLOAT32_ELEMENTS:
3074 case FLOAT64_ELEMENTS:
3075 case EXTERNAL_FLOAT32_ELEMENTS:
3076 case EXTERNAL_FLOAT64_ELEMENTS:
3077 case FAST_DOUBLE_ELEMENTS:
3079 case FAST_SMI_ELEMENTS:
3080 case FAST_HOLEY_DOUBLE_ELEMENTS:
3081 case FAST_HOLEY_ELEMENTS:
3082 case FAST_HOLEY_SMI_ELEMENTS:
3083 case DICTIONARY_ELEMENTS:
3084 case NON_STRICT_ARGUMENTS_ELEMENTS:
3092 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3093 Register elements = ToRegister(instr->elements());
3094 bool key_is_constant = instr->key()->IsConstantOperand();
3095 Register key = no_reg;
3096 DoubleRegister result = ToDoubleRegister(instr->result());
3097 Register scratch = scratch0();
3099 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3102 FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3103 (instr->additional_index() << element_size_shift);
3104 if (key_is_constant) {
3105 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3106 if (constant_key & 0xF0000000) {
3107 Abort(kArrayIndexConstantValueTooBig);
3109 base_offset += constant_key << element_size_shift;
3111 __ Addu(scratch, elements, Operand(base_offset));
3113 if (!key_is_constant) {
3114 key = ToRegister(instr->key());
3115 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3116 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3117 __ sll(at, key, shift_size);
3118 __ Addu(scratch, scratch, at);
3121 __ ldc1(result, MemOperand(scratch));
3123 if (instr->hydrogen()->RequiresHoleCheck()) {
3124 __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3125 DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
3130 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3131 Register elements = ToRegister(instr->elements());
3132 Register result = ToRegister(instr->result());
3133 Register scratch = scratch0();
3134 Register store_base = scratch;
3137 if (instr->key()->IsConstantOperand()) {
3138 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3139 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3140 instr->additional_index());
3141 store_base = elements;
3143 Register key = ToRegister(instr->key());
3144 // Even though the HLoadKeyed instruction forces the input
3145 // representation for the key to be an integer, the input gets replaced
3146 // during bound check elimination with the index argument to the bounds
3147 // check, which can be tagged, so that case must be handled here, too.
3148 if (instr->hydrogen()->key()->representation().IsSmi()) {
3149 __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
3150 __ addu(scratch, elements, scratch);
3152 __ sll(scratch, key, kPointerSizeLog2);
3153 __ addu(scratch, elements, scratch);
3155 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3157 __ lw(result, FieldMemOperand(store_base, offset));
3159 // Check for the hole value.
3160 if (instr->hydrogen()->RequiresHoleCheck()) {
3161 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3162 __ SmiTst(result, scratch);
3163 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3165 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3166 DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
3172 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3173 if (instr->is_typed_elements()) {
3174 DoLoadKeyedExternalArray(instr);
3175 } else if (instr->hydrogen()->representation().IsDouble()) {
3176 DoLoadKeyedFixedDoubleArray(instr);
3178 DoLoadKeyedFixedArray(instr);
3183 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3185 bool key_is_constant,
3189 int additional_index,
3190 int additional_offset) {
3191 int base_offset = (additional_index << element_size) + additional_offset;
3192 if (key_is_constant) {
3193 return MemOperand(base,
3194 base_offset + (constant_key << element_size));
3197 if (additional_offset != 0) {
3198 if (shift_size >= 0) {
3199 __ sll(scratch0(), key, shift_size);
3200 __ Addu(scratch0(), scratch0(), Operand(base_offset));
3202 ASSERT_EQ(-1, shift_size);
3203 __ srl(scratch0(), key, 1);
3204 __ Addu(scratch0(), scratch0(), Operand(base_offset));
3206 __ Addu(scratch0(), base, scratch0());
3207 return MemOperand(scratch0());
3210 if (additional_index != 0) {
3211 additional_index *= 1 << (element_size - shift_size);
3212 __ Addu(scratch0(), key, Operand(additional_index));
3215 if (additional_index == 0) {
3216 if (shift_size >= 0) {
3217 __ sll(scratch0(), key, shift_size);
3218 __ Addu(scratch0(), base, scratch0());
3219 return MemOperand(scratch0());
3221 ASSERT_EQ(-1, shift_size);
3222 __ srl(scratch0(), key, 1);
3223 __ Addu(scratch0(), base, scratch0());
3224 return MemOperand(scratch0());
3228 if (shift_size >= 0) {
3229 __ sll(scratch0(), scratch0(), shift_size);
3230 __ Addu(scratch0(), base, scratch0());
3231 return MemOperand(scratch0());
3233 ASSERT_EQ(-1, shift_size);
3234 __ srl(scratch0(), scratch0(), 1);
3235 __ Addu(scratch0(), base, scratch0());
3236 return MemOperand(scratch0());
3241 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3242 ASSERT(ToRegister(instr->context()).is(cp));
3243 ASSERT(ToRegister(instr->object()).is(a1));
3244 ASSERT(ToRegister(instr->key()).is(a0));
3246 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3247 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3251 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3252 Register scratch = scratch0();
3253 Register temp = scratch1();
3254 Register result = ToRegister(instr->result());
3256 if (instr->hydrogen()->from_inlined()) {
3257 __ Subu(result, sp, 2 * kPointerSize);
3259 // Check if the calling frame is an arguments adaptor frame.
3260 Label done, adapted;
3261 __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3262 __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3263 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3265 // Result is the frame pointer for the frame if not adapted and for the real
3266 // frame below the adaptor frame if adapted.
3267 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
3268 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
3273 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3274 Register elem = ToRegister(instr->elements());
3275 Register result = ToRegister(instr->result());
3279 // If no arguments adaptor frame the number of arguments is fixed.
3280 __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
3281 __ Branch(&done, eq, fp, Operand(elem));
3283 // Arguments adaptor frame present. Get argument length from there.
3284 __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3286 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3287 __ SmiUntag(result);
3289 // Argument length is in result register.
3294 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3295 Register receiver = ToRegister(instr->receiver());
3296 Register function = ToRegister(instr->function());
3297 Register result = ToRegister(instr->result());
3298 Register scratch = scratch0();
3300 // If the receiver is null or undefined, we have to pass the global
3301 // object as a receiver to normal functions. Values have to be
3302 // passed unchanged to builtins and strict-mode functions.
3303 Label global_object, result_in_receiver;
3305 if (!instr->hydrogen()->known_function()) {
3306 // Do not transform the receiver to object for strict mode
3309 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3311 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3313 // Do not transform the receiver to object for builtins.
3314 int32_t strict_mode_function_mask =
3315 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3316 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3317 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3318 __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
3321 // Normal function. Replace undefined or null with global receiver.
3322 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3323 __ Branch(&global_object, eq, receiver, Operand(scratch));
3324 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3325 __ Branch(&global_object, eq, receiver, Operand(scratch));
3327 // Deoptimize if the receiver is not a JS object.
3328 __ SmiTst(receiver, scratch);
3329 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
3331 __ GetObjectType(receiver, scratch, scratch);
3332 DeoptimizeIf(lt, instr->environment(),
3333 scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
3335 __ Branch(&result_in_receiver);
3336 __ bind(&global_object);
3337 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
3339 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3341 FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
3343 if (result.is(receiver)) {
3344 __ bind(&result_in_receiver);
3347 __ Branch(&result_ok);
3348 __ bind(&result_in_receiver);
3349 __ mov(result, receiver);
3350 __ bind(&result_ok);
3355 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3356 Register receiver = ToRegister(instr->receiver());
3357 Register function = ToRegister(instr->function());
3358 Register length = ToRegister(instr->length());
3359 Register elements = ToRegister(instr->elements());
3360 Register scratch = scratch0();
3361 ASSERT(receiver.is(a0)); // Used for parameter count.
3362 ASSERT(function.is(a1)); // Required by InvokeFunction.
3363 ASSERT(ToRegister(instr->result()).is(v0));
3365 // Copy the arguments to this function possibly from the
3366 // adaptor frame below it.
3367 const uint32_t kArgumentsLimit = 1 * KB;
3368 DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
3370 // Push the receiver and use the register to keep the original
3371 // number of arguments.
3373 __ Move(receiver, length);
3374 // The arguments are at a one pointer size offset from elements.
3375 __ Addu(elements, elements, Operand(1 * kPointerSize));
3377 // Loop through the arguments pushing them onto the execution
3380 // length is a small non-negative integer, due to the test above.
3381 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3382 __ sll(scratch, length, 2);
3384 __ Addu(scratch, elements, scratch);
3385 __ lw(scratch, MemOperand(scratch));
3387 __ Subu(length, length, Operand(1));
3388 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3389 __ sll(scratch, length, 2);
3392 ASSERT(instr->HasPointerMap());
3393 LPointerMap* pointers = instr->pointer_map();
3394 SafepointGenerator safepoint_generator(
3395 this, pointers, Safepoint::kLazyDeopt);
3396 // The number of arguments is stored in receiver which is a0, as expected
3397 // by InvokeFunction.
3398 ParameterCount actual(receiver);
3399 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3403 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3404 LOperand* argument = instr->value();
3405 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3406 Abort(kDoPushArgumentNotImplementedForDoubleType);
3408 Register argument_reg = EmitLoadRegister(argument, at);
3409 __ push(argument_reg);
3414 void LCodeGen::DoDrop(LDrop* instr) {
3415 __ Drop(instr->count());
3419 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3420 Register result = ToRegister(instr->result());
3421 __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3425 void LCodeGen::DoContext(LContext* instr) {
3426 // If there is a non-return use, the context must be moved to a register.
3427 Register result = ToRegister(instr->result());
3428 if (info()->IsOptimizing()) {
3429 __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3431 // If there is no frame, the context must be in cp.
3432 ASSERT(result.is(cp));
3437 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3438 ASSERT(ToRegister(instr->context()).is(cp));
3439 __ li(scratch0(), instr->hydrogen()->pairs());
3440 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3441 // The context is the first argument.
3442 __ Push(cp, scratch0(), scratch1());
3443 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3447 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3448 int formal_parameter_count,
3450 LInstruction* instr,
3452 bool dont_adapt_arguments =
3453 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3454 bool can_invoke_directly =
3455 dont_adapt_arguments || formal_parameter_count == arity;
3457 LPointerMap* pointers = instr->pointer_map();
3459 if (can_invoke_directly) {
3460 if (a1_state == A1_UNINITIALIZED) {
3461 __ li(a1, function);
3465 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3467 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3468 // is available to write to at this point.
3469 if (dont_adapt_arguments) {
3470 __ li(a0, Operand(arity));
3474 __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3477 // Set up deoptimization.
3478 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3480 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3481 ParameterCount count(arity);
3482 ParameterCount expected(formal_parameter_count);
3483 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3488 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3489 ASSERT(instr->context() != NULL);
3490 ASSERT(ToRegister(instr->context()).is(cp));
3491 Register input = ToRegister(instr->value());
3492 Register result = ToRegister(instr->result());
3493 Register scratch = scratch0();
3495 // Deoptimize if not a heap number.
3496 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3497 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3498 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
3501 Register exponent = scratch0();
3503 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3504 // Check the sign of the argument. If the argument is positive, just
3506 __ Move(result, input);
3507 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3508 __ Branch(&done, eq, at, Operand(zero_reg));
3510 // Input is negative. Reverse its sign.
3511 // Preserve the value of all registers.
3513 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3515 // Registers were saved at the safepoint, so we can use
3516 // many scratch registers.
3517 Register tmp1 = input.is(a1) ? a0 : a1;
3518 Register tmp2 = input.is(a2) ? a0 : a2;
3519 Register tmp3 = input.is(a3) ? a0 : a3;
3520 Register tmp4 = input.is(t0) ? a0 : t0;
3522 // exponent: floating point exponent value.
3524 Label allocated, slow;
3525 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3526 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3527 __ Branch(&allocated);
3529 // Slow case: Call the runtime system to do the number allocation.
3532 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3534 // Set the pointer to the new heap number in tmp.
3537 // Restore input_reg after call to runtime.
3538 __ LoadFromSafepointRegisterSlot(input, input);
3539 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3541 __ bind(&allocated);
3542 // exponent: floating point exponent value.
3543 // tmp1: allocated heap number.
3544 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3545 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3546 __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3547 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3549 __ StoreToSafepointRegisterSlot(tmp1, result);
3556 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3557 Register input = ToRegister(instr->value());
3558 Register result = ToRegister(instr->result());
3559 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3561 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3562 __ mov(result, input);
3563 __ subu(result, zero_reg, input);
3564 // Overflow if result is still negative, i.e. 0x80000000.
3565 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
3570 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3571 // Class for deferred case.
3572 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3574 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3575 : LDeferredCode(codegen), instr_(instr) { }
3576 virtual void Generate() V8_OVERRIDE {
3577 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3579 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3584 Representation r = instr->hydrogen()->value()->representation();
3586 FPURegister input = ToDoubleRegister(instr->value());
3587 FPURegister result = ToDoubleRegister(instr->result());
3588 __ abs_d(result, input);
3589 } else if (r.IsSmiOrInteger32()) {
3590 EmitIntegerMathAbs(instr);
3592 // Representation is tagged.
3593 DeferredMathAbsTaggedHeapNumber* deferred =
3594 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3595 Register input = ToRegister(instr->value());
3597 __ JumpIfNotSmi(input, deferred->entry());
3598 // If smi, handle it directly.
3599 EmitIntegerMathAbs(instr);
3600 __ bind(deferred->exit());
3605 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3606 DoubleRegister input = ToDoubleRegister(instr->value());
3607 Register result = ToRegister(instr->result());
3608 Register scratch1 = scratch0();
3609 Register except_flag = ToRegister(instr->temp());
3611 __ EmitFPUTruncate(kRoundToMinusInf,
3618 // Deopt if the operation did not succeed.
3619 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3621 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3624 __ Branch(&done, ne, result, Operand(zero_reg));
3625 __ mfc1(scratch1, input.high());
3626 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3627 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3633 void LCodeGen::DoMathRound(LMathRound* instr) {
3634 DoubleRegister input = ToDoubleRegister(instr->value());
3635 Register result = ToRegister(instr->result());
3636 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3637 Register scratch = scratch0();
3638 Label done, check_sign_on_zero;
3640 // Extract exponent bits.
3641 __ mfc1(result, input.high());
3644 HeapNumber::kExponentShift,
3645 HeapNumber::kExponentBits);
3647 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3649 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3650 __ mov(result, zero_reg);
3651 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3652 __ Branch(&check_sign_on_zero);
3658 // The following conversion will not work with numbers
3659 // outside of ]-2^32, 2^32[.
3660 DeoptimizeIf(ge, instr->environment(), scratch,
3661 Operand(HeapNumber::kExponentBias + 32));
3663 // Save the original sign for later comparison.
3664 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3666 __ Move(double_scratch0(), 0.5);
3667 __ add_d(double_scratch0(), input, double_scratch0());
3669 // Check sign of the result: if the sign changed, the input
3670 // value was in ]0.5, 0[ and the result should be -0.
3671 __ mfc1(result, double_scratch0().high());
3672 __ Xor(result, result, Operand(scratch));
3673 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3674 // ARM uses 'mi' here, which is 'lt'
3675 DeoptimizeIf(lt, instr->environment(), result,
3679 // ARM uses 'mi' here, which is 'lt'
3680 // Negating it results in 'ge'
3681 __ Branch(&skip2, ge, result, Operand(zero_reg));
3682 __ mov(result, zero_reg);
3687 Register except_flag = scratch;
3688 __ EmitFPUTruncate(kRoundToMinusInf,
3695 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3697 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3699 __ Branch(&done, ne, result, Operand(zero_reg));
3700 __ bind(&check_sign_on_zero);
3701 __ mfc1(scratch, input.high());
3702 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3703 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3709 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3710 DoubleRegister input = ToDoubleRegister(instr->value());
3711 DoubleRegister result = ToDoubleRegister(instr->result());
3712 __ sqrt_d(result, input);
3716 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3717 DoubleRegister input = ToDoubleRegister(instr->value());
3718 DoubleRegister result = ToDoubleRegister(instr->result());
3719 DoubleRegister temp = ToDoubleRegister(instr->temp());
3721 ASSERT(!input.is(result));
3723 // Note that according to ECMA-262 15.8.2.13:
3724 // Math.pow(-Infinity, 0.5) == Infinity
3725 // Math.sqrt(-Infinity) == NaN
3727 __ Move(temp, -V8_INFINITY);
3728 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3729 // Set up Infinity in the delay slot.
3730 // result is overwritten if the branch is not taken.
3731 __ neg_d(result, temp);
3733 // Add +0 to convert -0 to +0.
3734 __ add_d(result, input, kDoubleRegZero);
3735 __ sqrt_d(result, result);
3740 void LCodeGen::DoPower(LPower* instr) {
3741 Representation exponent_type = instr->hydrogen()->right()->representation();
3742 // Having marked this as a call, we can use any registers.
3743 // Just make sure that the input/output registers are the expected ones.
3744 ASSERT(!instr->right()->IsDoubleRegister() ||
3745 ToDoubleRegister(instr->right()).is(f4));
3746 ASSERT(!instr->right()->IsRegister() ||
3747 ToRegister(instr->right()).is(a2));
3748 ASSERT(ToDoubleRegister(instr->left()).is(f2));
3749 ASSERT(ToDoubleRegister(instr->result()).is(f0));
3751 if (exponent_type.IsSmi()) {
3752 MathPowStub stub(MathPowStub::TAGGED);
3754 } else if (exponent_type.IsTagged()) {
3756 __ JumpIfSmi(a2, &no_deopt);
3757 __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
3758 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3759 DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
3761 MathPowStub stub(MathPowStub::TAGGED);
3763 } else if (exponent_type.IsInteger32()) {
3764 MathPowStub stub(MathPowStub::INTEGER);
3767 ASSERT(exponent_type.IsDouble());
3768 MathPowStub stub(MathPowStub::DOUBLE);
3774 void LCodeGen::DoMathExp(LMathExp* instr) {
3775 DoubleRegister input = ToDoubleRegister(instr->value());
3776 DoubleRegister result = ToDoubleRegister(instr->result());
3777 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3778 DoubleRegister double_scratch2 = double_scratch0();
3779 Register temp1 = ToRegister(instr->temp1());
3780 Register temp2 = ToRegister(instr->temp2());
3782 MathExpGenerator::EmitMathExp(
3783 masm(), input, result, double_scratch1, double_scratch2,
3784 temp1, temp2, scratch0());
3788 void LCodeGen::DoMathLog(LMathLog* instr) {
3789 __ PrepareCallCFunction(0, 1, scratch0());
3790 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3791 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3793 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3797 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3798 ASSERT(ToRegister(instr->context()).is(cp));
3799 ASSERT(ToRegister(instr->function()).is(a1));
3800 ASSERT(instr->HasPointerMap());
3802 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3803 if (known_function.is_null()) {
3804 LPointerMap* pointers = instr->pointer_map();
3805 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3806 ParameterCount count(instr->arity());
3807 __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
3809 CallKnownFunction(known_function,
3810 instr->hydrogen()->formal_parameter_count(),
3813 A1_CONTAINS_TARGET);
3818 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3819 ASSERT(ToRegister(instr->result()).is(v0));
3821 LPointerMap* pointers = instr->pointer_map();
3822 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3824 if (instr->target()->IsConstantOperand()) {
3825 LConstantOperand* target = LConstantOperand::cast(instr->target());
3826 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3827 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3828 __ Call(code, RelocInfo::CODE_TARGET);
3830 ASSERT(instr->target()->IsRegister());
3831 Register target = ToRegister(instr->target());
3832 generator.BeforeCall(__ CallSize(target));
3833 __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3836 generator.AfterCall();
3840 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3841 ASSERT(ToRegister(instr->function()).is(a1));
3842 ASSERT(ToRegister(instr->result()).is(v0));
3844 if (instr->hydrogen()->pass_argument_count()) {
3845 __ li(a0, Operand(instr->arity()));
3849 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3851 // Load the code entry address
3852 __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3855 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3859 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3860 ASSERT(ToRegister(instr->context()).is(cp));
3861 ASSERT(ToRegister(instr->function()).is(a1));
3862 ASSERT(ToRegister(instr->result()).is(v0));
3864 int arity = instr->arity();
3865 CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
3866 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3870 void LCodeGen::DoCallNew(LCallNew* instr) {
3871 ASSERT(ToRegister(instr->context()).is(cp));
3872 ASSERT(ToRegister(instr->constructor()).is(a1));
3873 ASSERT(ToRegister(instr->result()).is(v0));
3875 __ li(a0, Operand(instr->arity()));
3876 // No cell in a2 for construct type feedback in optimized code
3877 Handle<Object> undefined_value(isolate()->factory()->undefined_value());
3878 __ li(a2, Operand(undefined_value));
3879 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3880 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3884 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3885 ASSERT(ToRegister(instr->context()).is(cp));
3886 ASSERT(ToRegister(instr->constructor()).is(a1));
3887 ASSERT(ToRegister(instr->result()).is(v0));
3889 __ li(a0, Operand(instr->arity()));
3890 __ li(a2, Operand(factory()->undefined_value()));
3891 ElementsKind kind = instr->hydrogen()->elements_kind();
3892 AllocationSiteOverrideMode override_mode =
3893 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3894 ? DISABLE_ALLOCATION_SITES
3897 if (instr->arity() == 0) {
3898 ArrayNoArgumentConstructorStub stub(kind, override_mode);
3899 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3900 } else if (instr->arity() == 1) {
3902 if (IsFastPackedElementsKind(kind)) {
3904 // We might need a change here,
3905 // look at the first argument.
3906 __ lw(t1, MemOperand(sp, 0));
3907 __ Branch(&packed_case, eq, t1, Operand(zero_reg));
3909 ElementsKind holey_kind = GetHoleyElementsKind(kind);
3910 ArraySingleArgumentConstructorStub stub(holey_kind, override_mode);
3911 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3913 __ bind(&packed_case);
3916 ArraySingleArgumentConstructorStub stub(kind, override_mode);
3917 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3920 ArrayNArgumentsConstructorStub stub(kind, override_mode);
3921 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3926 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
3927 CallRuntime(instr->function(), instr->arity(), instr);
3931 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
3932 Register function = ToRegister(instr->function());
3933 Register code_object = ToRegister(instr->code_object());
3934 __ Addu(code_object, code_object,
3935 Operand(Code::kHeaderSize - kHeapObjectTag));
3937 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
3941 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
3942 Register result = ToRegister(instr->result());
3943 Register base = ToRegister(instr->base_object());
3944 if (instr->offset()->IsConstantOperand()) {
3945 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
3946 __ Addu(result, base, Operand(ToInteger32(offset)));
3948 Register offset = ToRegister(instr->offset());
3949 __ Addu(result, base, offset);
3954 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
3955 Representation representation = instr->representation();
3957 Register object = ToRegister(instr->object());
3958 Register scratch = scratch0();
3959 HObjectAccess access = instr->hydrogen()->access();
3960 int offset = access.offset();
3962 if (access.IsExternalMemory()) {
3963 Register value = ToRegister(instr->value());
3964 MemOperand operand = MemOperand(object, offset);
3965 __ Store(value, operand, representation);
3969 Handle<Map> transition = instr->transition();
3971 if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
3972 Register value = ToRegister(instr->value());
3973 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
3974 __ SmiTst(value, scratch);
3975 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
3977 } else if (representation.IsDouble()) {
3978 ASSERT(transition.is_null());
3979 ASSERT(access.IsInobject());
3980 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
3981 DoubleRegister value = ToDoubleRegister(instr->value());
3982 __ sdc1(value, FieldMemOperand(object, offset));
3986 if (!transition.is_null()) {
3987 __ li(scratch, Operand(transition));
3988 __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3989 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
3990 Register temp = ToRegister(instr->temp());
3991 // Update the write barrier for the map field.
3992 __ RecordWriteField(object,
3993 HeapObject::kMapOffset,
3998 OMIT_REMEMBERED_SET,
4004 Register value = ToRegister(instr->value());
4005 SmiCheck check_needed =
4006 instr->hydrogen()->value()->IsHeapObject()
4007 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4008 if (access.IsInobject()) {
4009 MemOperand operand = FieldMemOperand(object, offset);
4010 __ Store(value, operand, representation);
4011 if (instr->hydrogen()->NeedsWriteBarrier()) {
4012 // Update the write barrier for the object for in-object properties.
4013 __ RecordWriteField(object,
4019 EMIT_REMEMBERED_SET,
4023 __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4024 MemOperand operand = FieldMemOperand(scratch, offset);
4025 __ Store(value, operand, representation);
4026 if (instr->hydrogen()->NeedsWriteBarrier()) {
4027 // Update the write barrier for the properties array.
4028 // object is used as a scratch register.
4029 __ RecordWriteField(scratch,
4035 EMIT_REMEMBERED_SET,
4042 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4043 ASSERT(ToRegister(instr->context()).is(cp));
4044 ASSERT(ToRegister(instr->object()).is(a1));
4045 ASSERT(ToRegister(instr->value()).is(a0));
4047 // Name is always in a2.
4048 __ li(a2, Operand(instr->name()));
4049 Handle<Code> ic = StoreIC::initialize_stub(isolate(),
4050 instr->strict_mode_flag());
4051 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4055 void LCodeGen::ApplyCheckIf(Condition condition,
4056 LBoundsCheck* check,
4058 const Operand& src2) {
4059 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4061 __ Branch(&done, NegateCondition(condition), src1, src2);
4062 __ stop("eliminated bounds check failed");
4065 DeoptimizeIf(condition, check->environment(), src1, src2);
4070 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4071 if (instr->hydrogen()->skip_check()) return;
4073 Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
4074 if (instr->index()->IsConstantOperand()) {
4075 int constant_index =
4076 ToInteger32(LConstantOperand::cast(instr->index()));
4077 if (instr->hydrogen()->length()->representation().IsSmi()) {
4078 __ li(at, Operand(Smi::FromInt(constant_index)));
4080 __ li(at, Operand(constant_index));
4082 ApplyCheckIf(condition,
4085 Operand(ToRegister(instr->length())));
4087 ApplyCheckIf(condition,
4089 ToRegister(instr->index()),
4090 Operand(ToRegister(instr->length())));
4095 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4096 Register external_pointer = ToRegister(instr->elements());
4097 Register key = no_reg;
4098 ElementsKind elements_kind = instr->elements_kind();
4099 bool key_is_constant = instr->key()->IsConstantOperand();
4100 int constant_key = 0;
4101 if (key_is_constant) {
4102 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4103 if (constant_key & 0xF0000000) {
4104 Abort(kArrayIndexConstantValueTooBig);
4107 key = ToRegister(instr->key());
4109 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4110 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4111 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4112 int additional_offset = IsFixedTypedArrayElementsKind(elements_kind)
4113 ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
4116 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4117 elements_kind == FLOAT32_ELEMENTS ||
4118 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4119 elements_kind == FLOAT64_ELEMENTS) {
4121 (instr->additional_index() << element_size_shift) + additional_offset;
4122 Register address = scratch0();
4123 FPURegister value(ToDoubleRegister(instr->value()));
4124 if (key_is_constant) {
4125 if (constant_key != 0) {
4126 __ Addu(address, external_pointer,
4127 Operand(constant_key << element_size_shift));
4129 address = external_pointer;
4132 __ sll(address, key, shift_size);
4133 __ Addu(address, external_pointer, address);
4136 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4137 elements_kind == FLOAT32_ELEMENTS) {
4138 __ cvt_s_d(double_scratch0(), value);
4139 __ swc1(double_scratch0(), MemOperand(address, base_offset));
4140 } else { // Storing doubles, not floats.
4141 __ sdc1(value, MemOperand(address, base_offset));
4144 Register value(ToRegister(instr->value()));
4145 MemOperand mem_operand = PrepareKeyedOperand(
4146 key, external_pointer, key_is_constant, constant_key,
4147 element_size_shift, shift_size,
4148 instr->additional_index(), additional_offset);
4149 switch (elements_kind) {
4150 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4151 case EXTERNAL_INT8_ELEMENTS:
4152 case EXTERNAL_UINT8_ELEMENTS:
4153 case UINT8_ELEMENTS:
4154 case UINT8_CLAMPED_ELEMENTS:
4156 __ sb(value, mem_operand);
4158 case EXTERNAL_INT16_ELEMENTS:
4159 case EXTERNAL_UINT16_ELEMENTS:
4160 case INT16_ELEMENTS:
4161 case UINT16_ELEMENTS:
4162 __ sh(value, mem_operand);
4164 case EXTERNAL_INT32_ELEMENTS:
4165 case EXTERNAL_UINT32_ELEMENTS:
4166 case INT32_ELEMENTS:
4167 case UINT32_ELEMENTS:
4168 __ sw(value, mem_operand);
4170 case FLOAT32_ELEMENTS:
4171 case FLOAT64_ELEMENTS:
4172 case EXTERNAL_FLOAT32_ELEMENTS:
4173 case EXTERNAL_FLOAT64_ELEMENTS:
4174 case FAST_DOUBLE_ELEMENTS:
4176 case FAST_SMI_ELEMENTS:
4177 case FAST_HOLEY_DOUBLE_ELEMENTS:
4178 case FAST_HOLEY_ELEMENTS:
4179 case FAST_HOLEY_SMI_ELEMENTS:
4180 case DICTIONARY_ELEMENTS:
4181 case NON_STRICT_ARGUMENTS_ELEMENTS:
4189 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4190 DoubleRegister value = ToDoubleRegister(instr->value());
4191 Register elements = ToRegister(instr->elements());
4192 Register scratch = scratch0();
4193 DoubleRegister double_scratch = double_scratch0();
4194 bool key_is_constant = instr->key()->IsConstantOperand();
4195 Label not_nan, done;
4197 // Calculate the effective address of the slot in the array to store the
4199 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4200 if (key_is_constant) {
4201 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4202 if (constant_key & 0xF0000000) {
4203 Abort(kArrayIndexConstantValueTooBig);
4205 __ Addu(scratch, elements,
4206 Operand((constant_key << element_size_shift) +
4207 FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4209 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4210 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4211 __ Addu(scratch, elements,
4212 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4213 __ sll(at, ToRegister(instr->key()), shift_size);
4214 __ Addu(scratch, scratch, at);
4217 if (instr->NeedsCanonicalization()) {
4219 // Check for NaN. All NaNs must be canonicalized.
4220 __ BranchF(NULL, &is_nan, eq, value, value);
4221 __ Branch(¬_nan);
4223 // Only load canonical NaN if the comparison above set the overflow.
4225 __ LoadRoot(at, Heap::kNanValueRootIndex);
4226 __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
4227 __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
4228 element_size_shift));
4233 __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
4234 element_size_shift));
4239 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4240 Register value = ToRegister(instr->value());
4241 Register elements = ToRegister(instr->elements());
4242 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4244 Register scratch = scratch0();
4245 Register store_base = scratch;
4249 if (instr->key()->IsConstantOperand()) {
4250 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4251 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4252 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4253 instr->additional_index());
4254 store_base = elements;
4256 // Even though the HLoadKeyed instruction forces the input
4257 // representation for the key to be an integer, the input gets replaced
4258 // during bound check elimination with the index argument to the bounds
4259 // check, which can be tagged, so that case must be handled here, too.
4260 if (instr->hydrogen()->key()->representation().IsSmi()) {
4261 __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
4262 __ addu(scratch, elements, scratch);
4264 __ sll(scratch, key, kPointerSizeLog2);
4265 __ addu(scratch, elements, scratch);
4267 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4269 __ sw(value, FieldMemOperand(store_base, offset));
4271 if (instr->hydrogen()->NeedsWriteBarrier()) {
4272 SmiCheck check_needed =
4273 instr->hydrogen()->value()->IsHeapObject()
4274 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4275 // Compute address of modified element and store it into key register.
4276 __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
4277 __ RecordWrite(elements,
4282 EMIT_REMEMBERED_SET,
4288 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4289 // By cases: external, fast double
4290 if (instr->is_typed_elements()) {
4291 DoStoreKeyedExternalArray(instr);
4292 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4293 DoStoreKeyedFixedDoubleArray(instr);
4295 DoStoreKeyedFixedArray(instr);
4300 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4301 ASSERT(ToRegister(instr->context()).is(cp));
4302 ASSERT(ToRegister(instr->object()).is(a2));
4303 ASSERT(ToRegister(instr->key()).is(a1));
4304 ASSERT(ToRegister(instr->value()).is(a0));
4306 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4307 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4308 : isolate()->builtins()->KeyedStoreIC_Initialize();
4309 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4313 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4314 Register object_reg = ToRegister(instr->object());
4315 Register scratch = scratch0();
4317 Handle<Map> from_map = instr->original_map();
4318 Handle<Map> to_map = instr->transitioned_map();
4319 ElementsKind from_kind = instr->from_kind();
4320 ElementsKind to_kind = instr->to_kind();
4322 Label not_applicable;
4323 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4324 __ Branch(¬_applicable, ne, scratch, Operand(from_map));
4326 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4327 Register new_map_reg = ToRegister(instr->new_map_temp());
4328 __ li(new_map_reg, Operand(to_map));
4329 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4331 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4332 scratch, GetRAState(), kDontSaveFPRegs);
4334 ASSERT(ToRegister(instr->context()).is(cp));
4335 PushSafepointRegistersScope scope(
4336 this, Safepoint::kWithRegistersAndDoubles);
4337 __ mov(a0, object_reg);
4338 __ li(a1, Operand(to_map));
4339 TransitionElementsKindStub stub(from_kind, to_kind);
4341 RecordSafepointWithRegistersAndDoubles(
4342 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4344 __ bind(¬_applicable);
4348 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4349 Register object = ToRegister(instr->object());
4350 Register temp = ToRegister(instr->temp());
4351 Label no_memento_found;
4352 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4353 ne, &no_memento_found);
4354 DeoptimizeIf(al, instr->environment());
4355 __ bind(&no_memento_found);
4359 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4360 ASSERT(ToRegister(instr->context()).is(cp));
4361 ASSERT(ToRegister(instr->left()).is(a1));
4362 ASSERT(ToRegister(instr->right()).is(a0));
4363 StringAddStub stub(instr->hydrogen()->flags(),
4364 instr->hydrogen()->pretenure_flag());
4365 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4369 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4370 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4372 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4373 : LDeferredCode(codegen), instr_(instr) { }
4374 virtual void Generate() V8_OVERRIDE {
4375 codegen()->DoDeferredStringCharCodeAt(instr_);
4377 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4379 LStringCharCodeAt* instr_;
4382 DeferredStringCharCodeAt* deferred =
4383 new(zone()) DeferredStringCharCodeAt(this, instr);
4384 StringCharLoadGenerator::Generate(masm(),
4385 ToRegister(instr->string()),
4386 ToRegister(instr->index()),
4387 ToRegister(instr->result()),
4389 __ bind(deferred->exit());
4393 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4394 Register string = ToRegister(instr->string());
4395 Register result = ToRegister(instr->result());
4396 Register scratch = scratch0();
4398 // TODO(3095996): Get rid of this. For now, we need to make the
4399 // result register contain a valid pointer because it is already
4400 // contained in the register pointer map.
4401 __ mov(result, zero_reg);
4403 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4405 // Push the index as a smi. This is safe because of the checks in
4406 // DoStringCharCodeAt above.
4407 if (instr->index()->IsConstantOperand()) {
4408 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4409 __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4412 Register index = ToRegister(instr->index());
4416 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
4420 __ StoreToSafepointRegisterSlot(v0, result);
4424 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4425 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4427 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4428 : LDeferredCode(codegen), instr_(instr) { }
4429 virtual void Generate() V8_OVERRIDE {
4430 codegen()->DoDeferredStringCharFromCode(instr_);
4432 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4434 LStringCharFromCode* instr_;
4437 DeferredStringCharFromCode* deferred =
4438 new(zone()) DeferredStringCharFromCode(this, instr);
4440 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4441 Register char_code = ToRegister(instr->char_code());
4442 Register result = ToRegister(instr->result());
4443 Register scratch = scratch0();
4444 ASSERT(!char_code.is(result));
4446 __ Branch(deferred->entry(), hi,
4447 char_code, Operand(String::kMaxOneByteCharCode));
4448 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4449 __ sll(scratch, char_code, kPointerSizeLog2);
4450 __ Addu(result, result, scratch);
4451 __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4452 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4453 __ Branch(deferred->entry(), eq, result, Operand(scratch));
4454 __ bind(deferred->exit());
4458 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4459 Register char_code = ToRegister(instr->char_code());
4460 Register result = ToRegister(instr->result());
4462 // TODO(3095996): Get rid of this. For now, we need to make the
4463 // result register contain a valid pointer because it is already
4464 // contained in the register pointer map.
4465 __ mov(result, zero_reg);
4467 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4468 __ SmiTag(char_code);
4470 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4471 __ StoreToSafepointRegisterSlot(v0, result);
4475 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4476 LOperand* input = instr->value();
4477 ASSERT(input->IsRegister() || input->IsStackSlot());
4478 LOperand* output = instr->result();
4479 ASSERT(output->IsDoubleRegister());
4480 FPURegister single_scratch = double_scratch0().low();
4481 if (input->IsStackSlot()) {
4482 Register scratch = scratch0();
4483 __ lw(scratch, ToMemOperand(input));
4484 __ mtc1(scratch, single_scratch);
4486 __ mtc1(ToRegister(input), single_scratch);
4488 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4492 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4493 LOperand* input = instr->value();
4494 LOperand* output = instr->result();
4495 Register scratch = scratch0();
4497 ASSERT(output->IsRegister());
4498 if (!instr->hydrogen()->value()->HasRange() ||
4499 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4500 __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
4501 DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
4503 __ SmiTag(ToRegister(output), ToRegister(input));
4508 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4509 LOperand* input = instr->value();
4510 LOperand* output = instr->result();
4512 FPURegister dbl_scratch = double_scratch0();
4513 __ mtc1(ToRegister(input), dbl_scratch);
4514 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
4518 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
4519 LOperand* input = instr->value();
4520 LOperand* output = instr->result();
4521 if (!instr->hydrogen()->value()->HasRange() ||
4522 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4523 Register scratch = scratch0();
4524 __ And(scratch, ToRegister(input), Operand(0xc0000000));
4525 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
4527 __ SmiTag(ToRegister(output), ToRegister(input));
4531 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4532 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4534 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4535 : LDeferredCode(codegen), instr_(instr) { }
4536 virtual void Generate() V8_OVERRIDE {
4537 codegen()->DoDeferredNumberTagI(instr_,
4541 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4543 LNumberTagI* instr_;
4546 Register src = ToRegister(instr->value());
4547 Register dst = ToRegister(instr->result());
4548 Register overflow = scratch0();
4550 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4551 __ SmiTagCheckOverflow(dst, src, overflow);
4552 __ BranchOnOverflow(deferred->entry(), overflow);
4553 __ bind(deferred->exit());
4557 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4558 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4560 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4561 : LDeferredCode(codegen), instr_(instr) { }
4562 virtual void Generate() V8_OVERRIDE {
4563 codegen()->DoDeferredNumberTagI(instr_,
4567 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4569 LNumberTagU* instr_;
4572 Register input = ToRegister(instr->value());
4573 Register result = ToRegister(instr->result());
4575 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4576 __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4577 __ SmiTag(result, input);
4578 __ bind(deferred->exit());
4582 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4584 IntegerSignedness signedness) {
4586 Register src = ToRegister(value);
4587 Register dst = ToRegister(instr->result());
4588 DoubleRegister dbl_scratch = double_scratch0();
4590 // Preserve the value of all registers.
4591 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4594 if (signedness == SIGNED_INT32) {
4595 // There was overflow, so bits 30 and 31 of the original integer
4596 // disagree. Try to allocate a heap number in new space and store
4597 // the value in there. If that fails, call the runtime system.
4599 __ SmiUntag(src, dst);
4600 __ Xor(src, src, Operand(0x80000000));
4602 __ mtc1(src, dbl_scratch);
4603 __ cvt_d_w(dbl_scratch, dbl_scratch);
4605 __ mtc1(src, dbl_scratch);
4606 __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4609 if (FLAG_inline_new) {
4610 __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
4611 __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
4616 // Slow case: Call the runtime system to do the number allocation.
4619 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4620 // register is stored, as this register is in the pointer map, but contains an
4622 __ StoreToSafepointRegisterSlot(zero_reg, dst);
4623 // NumberTagI and NumberTagD use the context from the frame, rather than
4624 // the environment's HContext or HInlinedContext value.
4625 // They only call Runtime::kAllocateHeapNumber.
4626 // The corresponding HChange instructions are added in a phase that does
4627 // not have easy access to the local context.
4628 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4629 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4630 RecordSafepointWithRegisters(
4631 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4633 __ Subu(dst, dst, kHeapObjectTag);
4635 // Done. Put the value in dbl_scratch into the value of the allocated heap
4638 __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
4639 __ Addu(dst, dst, kHeapObjectTag);
4640 __ StoreToSafepointRegisterSlot(dst, dst);
4644 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4645 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4647 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4648 : LDeferredCode(codegen), instr_(instr) { }
4649 virtual void Generate() V8_OVERRIDE {
4650 codegen()->DoDeferredNumberTagD(instr_);
4652 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4654 LNumberTagD* instr_;
4657 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4658 Register scratch = scratch0();
4659 Register reg = ToRegister(instr->result());
4660 Register temp1 = ToRegister(instr->temp());
4661 Register temp2 = ToRegister(instr->temp2());
4663 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4664 if (FLAG_inline_new) {
4665 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4666 // We want the untagged address first for performance
4667 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4670 __ Branch(deferred->entry());
4672 __ bind(deferred->exit());
4673 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4674 // Now that we have finished with the object's real address tag it
4675 __ Addu(reg, reg, kHeapObjectTag);
4679 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4680 // TODO(3095996): Get rid of this. For now, we need to make the
4681 // result register contain a valid pointer because it is already
4682 // contained in the register pointer map.
4683 Register reg = ToRegister(instr->result());
4684 __ mov(reg, zero_reg);
4686 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4687 // NumberTagI and NumberTagD use the context from the frame, rather than
4688 // the environment's HContext or HInlinedContext value.
4689 // They only call Runtime::kAllocateHeapNumber.
4690 // The corresponding HChange instructions are added in a phase that does
4691 // not have easy access to the local context.
4692 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4693 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4694 RecordSafepointWithRegisters(
4695 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4696 __ Subu(v0, v0, kHeapObjectTag);
4697 __ StoreToSafepointRegisterSlot(v0, reg);
4701 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4702 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4703 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4707 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4708 Register scratch = scratch0();
4709 Register input = ToRegister(instr->value());
4710 Register result = ToRegister(instr->result());
4711 if (instr->needs_check()) {
4712 STATIC_ASSERT(kHeapObjectTag == 1);
4713 // If the input is a HeapObject, value of scratch won't be zero.
4714 __ And(scratch, input, Operand(kHeapObjectTag));
4715 __ SmiUntag(result, input);
4716 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
4718 __ SmiUntag(result, input);
4723 void LCodeGen::EmitNumberUntagD(Register input_reg,
4724 DoubleRegister result_reg,
4725 bool can_convert_undefined_to_nan,
4726 bool deoptimize_on_minus_zero,
4728 NumberUntagDMode mode) {
4729 Register scratch = scratch0();
4730 Label convert, load_smi, done;
4731 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4733 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4734 // Heap number map check.
4735 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4736 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4737 if (can_convert_undefined_to_nan) {
4738 __ Branch(&convert, ne, scratch, Operand(at));
4740 DeoptimizeIf(ne, env, scratch, Operand(at));
4742 // Load heap number.
4743 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4744 if (deoptimize_on_minus_zero) {
4745 __ mfc1(at, result_reg.low());
4746 __ Branch(&done, ne, at, Operand(zero_reg));
4747 __ mfc1(scratch, result_reg.high());
4748 DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4751 if (can_convert_undefined_to_nan) {
4753 // Convert undefined (and hole) to NaN.
4754 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4755 DeoptimizeIf(ne, env, input_reg, Operand(at));
4756 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4757 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4761 __ SmiUntag(scratch, input_reg);
4762 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4764 // Smi to double register conversion
4766 // scratch: untagged value of input_reg
4767 __ mtc1(scratch, result_reg);
4768 __ cvt_d_w(result_reg, result_reg);
4773 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4774 Register input_reg = ToRegister(instr->value());
4775 Register scratch1 = scratch0();
4776 Register scratch2 = ToRegister(instr->temp());
4777 DoubleRegister double_scratch = double_scratch0();
4778 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4780 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4781 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4785 // The input is a tagged HeapObject.
4786 // Heap number map check.
4787 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4788 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4789 // This 'at' value and scratch1 map value are used for tests in both clauses
4792 if (instr->truncating()) {
4793 // Performs a truncating conversion of a floating point number as used by
4794 // the JS bitwise operations.
4795 Label no_heap_number, check_bools, check_false;
4796 // Check HeapNumber map.
4797 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
4798 __ mov(scratch2, input_reg); // In delay slot.
4799 __ TruncateHeapNumberToI(input_reg, scratch2);
4802 // Check for Oddballs. Undefined/False is converted to zero and True to one
4803 // for truncating conversions.
4804 __ bind(&no_heap_number);
4805 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4806 __ Branch(&check_bools, ne, input_reg, Operand(at));
4807 ASSERT(ToRegister(instr->result()).is(input_reg));
4808 __ Branch(USE_DELAY_SLOT, &done);
4809 __ mov(input_reg, zero_reg); // In delay slot.
4811 __ bind(&check_bools);
4812 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4813 __ Branch(&check_false, ne, scratch2, Operand(at));
4814 __ Branch(USE_DELAY_SLOT, &done);
4815 __ li(input_reg, Operand(1)); // In delay slot.
4817 __ bind(&check_false);
4818 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4819 DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
4820 __ Branch(USE_DELAY_SLOT, &done);
4821 __ mov(input_reg, zero_reg); // In delay slot.
4823 // Deoptimize if we don't have a heap number.
4824 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
4826 // Load the double value.
4827 __ ldc1(double_scratch,
4828 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4830 Register except_flag = scratch2;
4831 __ EmitFPUTruncate(kRoundToZero,
4837 kCheckForInexactConversion);
4839 // Deopt if the operation did not succeed.
4840 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4842 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4843 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4845 __ mfc1(scratch1, double_scratch.high());
4846 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4847 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4854 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4855 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
4857 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4858 : LDeferredCode(codegen), instr_(instr) { }
4859 virtual void Generate() V8_OVERRIDE {
4860 codegen()->DoDeferredTaggedToI(instr_);
4862 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4867 LOperand* input = instr->value();
4868 ASSERT(input->IsRegister());
4869 ASSERT(input->Equals(instr->result()));
4871 Register input_reg = ToRegister(input);
4873 if (instr->hydrogen()->value()->representation().IsSmi()) {
4874 __ SmiUntag(input_reg);
4876 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4878 // Let the deferred code handle the HeapObject case.
4879 __ JumpIfNotSmi(input_reg, deferred->entry());
4881 // Smi to int32 conversion.
4882 __ SmiUntag(input_reg);
4883 __ bind(deferred->exit());
4888 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4889 LOperand* input = instr->value();
4890 ASSERT(input->IsRegister());
4891 LOperand* result = instr->result();
4892 ASSERT(result->IsDoubleRegister());
4894 Register input_reg = ToRegister(input);
4895 DoubleRegister result_reg = ToDoubleRegister(result);
4897 HValue* value = instr->hydrogen()->value();
4898 NumberUntagDMode mode = value->representation().IsSmi()
4899 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4901 EmitNumberUntagD(input_reg, result_reg,
4902 instr->hydrogen()->can_convert_undefined_to_nan(),
4903 instr->hydrogen()->deoptimize_on_minus_zero(),
4904 instr->environment(),
4909 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4910 Register result_reg = ToRegister(instr->result());
4911 Register scratch1 = scratch0();
4912 DoubleRegister double_input = ToDoubleRegister(instr->value());
4914 if (instr->truncating()) {
4915 __ TruncateDoubleToI(result_reg, double_input);
4917 Register except_flag = LCodeGen::scratch1();
4919 __ EmitFPUTruncate(kRoundToMinusInf,
4925 kCheckForInexactConversion);
4927 // Deopt if the operation did not succeed (except_flag != 0).
4928 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4930 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4932 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4933 __ mfc1(scratch1, double_input.high());
4934 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4935 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4942 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
4943 Register result_reg = ToRegister(instr->result());
4944 Register scratch1 = LCodeGen::scratch0();
4945 DoubleRegister double_input = ToDoubleRegister(instr->value());
4947 if (instr->truncating()) {
4948 __ TruncateDoubleToI(result_reg, double_input);
4950 Register except_flag = LCodeGen::scratch1();
4952 __ EmitFPUTruncate(kRoundToMinusInf,
4958 kCheckForInexactConversion);
4960 // Deopt if the operation did not succeed (except_flag != 0).
4961 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4963 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4965 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4966 __ mfc1(scratch1, double_input.high());
4967 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4968 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4972 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
4973 DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
4977 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
4978 LOperand* input = instr->value();
4979 __ SmiTst(ToRegister(input), at);
4980 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
4984 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
4985 if (!instr->hydrogen()->value()->IsHeapObject()) {
4986 LOperand* input = instr->value();
4987 __ SmiTst(ToRegister(input), at);
4988 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
4993 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
4994 Register input = ToRegister(instr->value());
4995 Register scratch = scratch0();
4997 __ GetObjectType(input, scratch, scratch);
4999 if (instr->hydrogen()->is_interval_check()) {
5002 instr->hydrogen()->GetCheckInterval(&first, &last);
5004 // If there is only one type in the interval check for equality.
5005 if (first == last) {
5006 DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
5008 DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
5009 // Omit check for the last type.
5010 if (last != LAST_TYPE) {
5011 DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
5017 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5019 if (IsPowerOf2(mask)) {
5020 ASSERT(tag == 0 || IsPowerOf2(tag));
5021 __ And(at, scratch, mask);
5022 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
5023 at, Operand(zero_reg));
5025 __ And(scratch, scratch, Operand(mask));
5026 DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
5032 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5033 Register reg = ToRegister(instr->value());
5034 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5035 AllowDeferredHandleDereference smi_check;
5036 if (isolate()->heap()->InNewSpace(*object)) {
5037 Register reg = ToRegister(instr->value());
5038 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5039 __ li(at, Operand(Handle<Object>(cell)));
5040 __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
5041 DeoptimizeIf(ne, instr->environment(), reg,
5044 DeoptimizeIf(ne, instr->environment(), reg,
5050 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5052 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5054 __ mov(cp, zero_reg);
5055 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5056 RecordSafepointWithRegisters(
5057 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5058 __ StoreToSafepointRegisterSlot(v0, scratch0());
5060 __ SmiTst(scratch0(), at);
5061 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5065 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5066 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5068 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5069 : LDeferredCode(codegen), instr_(instr), object_(object) {
5070 SetExit(check_maps());
5072 virtual void Generate() V8_OVERRIDE {
5073 codegen()->DoDeferredInstanceMigration(instr_, object_);
5075 Label* check_maps() { return &check_maps_; }
5076 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5083 if (instr->hydrogen()->CanOmitMapChecks()) return;
5084 Register map_reg = scratch0();
5085 LOperand* input = instr->value();
5086 ASSERT(input->IsRegister());
5087 Register reg = ToRegister(input);
5088 __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5090 DeferredCheckMaps* deferred = NULL;
5091 if (instr->hydrogen()->has_migration_target()) {
5092 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5093 __ bind(deferred->check_maps());
5096 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5098 for (int i = 0; i < map_set.size() - 1; i++) {
5099 Handle<Map> map = map_set.at(i).handle();
5100 __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5102 Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5103 // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5104 if (instr->hydrogen()->has_migration_target()) {
5105 __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5107 DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
5114 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5115 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5116 Register result_reg = ToRegister(instr->result());
5117 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5118 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5122 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5123 Register unclamped_reg = ToRegister(instr->unclamped());
5124 Register result_reg = ToRegister(instr->result());
5125 __ ClampUint8(result_reg, unclamped_reg);
5129 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5130 Register scratch = scratch0();
5131 Register input_reg = ToRegister(instr->unclamped());
5132 Register result_reg = ToRegister(instr->result());
5133 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5134 Label is_smi, done, heap_number;
5136 // Both smi and heap number cases are handled.
5137 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5139 // Check for heap number
5140 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5141 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5143 // Check for undefined. Undefined is converted to zero for clamping
5145 DeoptimizeIf(ne, instr->environment(), input_reg,
5146 Operand(factory()->undefined_value()));
5147 __ mov(result_reg, zero_reg);
5151 __ bind(&heap_number);
5152 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5153 HeapNumber::kValueOffset));
5154 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5158 __ ClampUint8(result_reg, scratch);
5164 void LCodeGen::DoAllocate(LAllocate* instr) {
5165 class DeferredAllocate V8_FINAL : public LDeferredCode {
5167 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5168 : LDeferredCode(codegen), instr_(instr) { }
5169 virtual void Generate() V8_OVERRIDE {
5170 codegen()->DoDeferredAllocate(instr_);
5172 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5177 DeferredAllocate* deferred =
5178 new(zone()) DeferredAllocate(this, instr);
5180 Register result = ToRegister(instr->result());
5181 Register scratch = ToRegister(instr->temp1());
5182 Register scratch2 = ToRegister(instr->temp2());
5184 // Allocate memory for the object.
5185 AllocationFlags flags = TAG_OBJECT;
5186 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5187 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5189 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5190 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5191 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5192 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5193 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5194 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5195 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5197 if (instr->size()->IsConstantOperand()) {
5198 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5199 if (size <= Page::kMaxRegularHeapObjectSize) {
5200 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5202 __ jmp(deferred->entry());
5205 Register size = ToRegister(instr->size());
5214 __ bind(deferred->exit());
5216 if (instr->hydrogen()->MustPrefillWithFiller()) {
5217 if (instr->size()->IsConstantOperand()) {
5218 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5219 __ li(scratch, Operand(size));
5221 scratch = ToRegister(instr->size());
5223 __ Subu(scratch, scratch, Operand(kPointerSize));
5224 __ Subu(result, result, Operand(kHeapObjectTag));
5227 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5228 __ Addu(at, result, Operand(scratch));
5229 __ sw(scratch2, MemOperand(at));
5230 __ Subu(scratch, scratch, Operand(kPointerSize));
5231 __ Branch(&loop, ge, scratch, Operand(zero_reg));
5232 __ Addu(result, result, Operand(kHeapObjectTag));
5237 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5238 Register result = ToRegister(instr->result());
5240 // TODO(3095996): Get rid of this. For now, we need to make the
5241 // result register contain a valid pointer because it is already
5242 // contained in the register pointer map.
5243 __ mov(result, zero_reg);
5245 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5246 if (instr->size()->IsRegister()) {
5247 Register size = ToRegister(instr->size());
5248 ASSERT(!size.is(result));
5252 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5253 __ Push(Smi::FromInt(size));
5256 int flags = AllocateDoubleAlignFlag::encode(
5257 instr->hydrogen()->MustAllocateDoubleAligned());
5258 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5259 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5260 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5261 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5262 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5263 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5264 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5266 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5268 __ Push(Smi::FromInt(flags));
5270 CallRuntimeFromDeferred(
5271 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5272 __ StoreToSafepointRegisterSlot(v0, result);
5276 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5277 ASSERT(ToRegister(instr->value()).is(a0));
5278 ASSERT(ToRegister(instr->result()).is(v0));
5280 CallRuntime(Runtime::kToFastProperties, 1, instr);
5284 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5285 ASSERT(ToRegister(instr->context()).is(cp));
5287 // Registers will be used as follows:
5288 // t3 = literals array.
5289 // a1 = regexp literal.
5290 // a0 = regexp literal clone.
5291 // a2 and t0-t2 are used as temporaries.
5292 int literal_offset =
5293 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5294 __ li(t3, instr->hydrogen()->literals());
5295 __ lw(a1, FieldMemOperand(t3, literal_offset));
5296 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5297 __ Branch(&materialized, ne, a1, Operand(at));
5299 // Create regexp literal using runtime function
5300 // Result will be in v0.
5301 __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5302 __ li(t1, Operand(instr->hydrogen()->pattern()));
5303 __ li(t0, Operand(instr->hydrogen()->flags()));
5304 __ Push(t3, t2, t1, t0);
5305 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5308 __ bind(&materialized);
5309 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5310 Label allocated, runtime_allocate;
5312 __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5315 __ bind(&runtime_allocate);
5316 __ li(a0, Operand(Smi::FromInt(size)));
5318 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5321 __ bind(&allocated);
5322 // Copy the content into the newly allocated memory.
5323 // (Unroll copy loop once for better throughput).
5324 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5325 __ lw(a3, FieldMemOperand(a1, i));
5326 __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
5327 __ sw(a3, FieldMemOperand(v0, i));
5328 __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
5330 if ((size % (2 * kPointerSize)) != 0) {
5331 __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
5332 __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
5337 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5338 ASSERT(ToRegister(instr->context()).is(cp));
5339 // Use the fast case closure allocation code that allocates in new
5340 // space for nested functions that don't need literals cloning.
5341 bool pretenure = instr->hydrogen()->pretenure();
5342 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5343 FastNewClosureStub stub(instr->hydrogen()->language_mode(),
5344 instr->hydrogen()->is_generator());
5345 __ li(a2, Operand(instr->hydrogen()->shared_info()));
5346 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5348 __ li(a2, Operand(instr->hydrogen()->shared_info()));
5349 __ li(a1, Operand(pretenure ? factory()->true_value()
5350 : factory()->false_value()));
5351 __ Push(cp, a2, a1);
5352 CallRuntime(Runtime::kNewClosure, 3, instr);
5357 void LCodeGen::DoTypeof(LTypeof* instr) {
5358 ASSERT(ToRegister(instr->result()).is(v0));
5359 Register input = ToRegister(instr->value());
5361 CallRuntime(Runtime::kTypeof, 1, instr);
5365 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5366 Register input = ToRegister(instr->value());
5368 Register cmp1 = no_reg;
5369 Operand cmp2 = Operand(no_reg);
5371 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5372 instr->FalseLabel(chunk_),
5374 instr->type_literal(),
5378 ASSERT(cmp1.is_valid());
5379 ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
5381 if (final_branch_condition != kNoCondition) {
5382 EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5387 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5390 Handle<String> type_name,
5393 // This function utilizes the delay slot heavily. This is used to load
5394 // values that are always usable without depending on the type of the input
5396 Condition final_branch_condition = kNoCondition;
5397 Register scratch = scratch0();
5398 if (type_name->Equals(heap()->number_string())) {
5399 __ JumpIfSmi(input, true_label);
5400 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5401 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5404 final_branch_condition = eq;
5406 } else if (type_name->Equals(heap()->string_string())) {
5407 __ JumpIfSmi(input, false_label);
5408 __ GetObjectType(input, input, scratch);
5409 __ Branch(USE_DELAY_SLOT, false_label,
5410 ge, scratch, Operand(FIRST_NONSTRING_TYPE));
5411 // input is an object so we can load the BitFieldOffset even if we take the
5413 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5414 __ And(at, at, 1 << Map::kIsUndetectable);
5416 cmp2 = Operand(zero_reg);
5417 final_branch_condition = eq;
5419 } else if (type_name->Equals(heap()->symbol_string())) {
5420 __ JumpIfSmi(input, false_label);
5421 __ GetObjectType(input, input, scratch);
5423 cmp2 = Operand(SYMBOL_TYPE);
5424 final_branch_condition = eq;
5426 } else if (type_name->Equals(heap()->boolean_string())) {
5427 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5428 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5429 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5431 cmp2 = Operand(input);
5432 final_branch_condition = eq;
5434 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5435 __ LoadRoot(at, Heap::kNullValueRootIndex);
5437 cmp2 = Operand(input);
5438 final_branch_condition = eq;
5440 } else if (type_name->Equals(heap()->undefined_string())) {
5441 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5442 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5443 // The first instruction of JumpIfSmi is an And - it is safe in the delay
5445 __ JumpIfSmi(input, false_label);
5446 // Check for undetectable objects => true.
5447 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5448 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5449 __ And(at, at, 1 << Map::kIsUndetectable);
5451 cmp2 = Operand(zero_reg);
5452 final_branch_condition = ne;
5454 } else if (type_name->Equals(heap()->function_string())) {
5455 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5456 __ JumpIfSmi(input, false_label);
5457 __ GetObjectType(input, scratch, input);
5458 __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
5460 cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
5461 final_branch_condition = eq;
5463 } else if (type_name->Equals(heap()->object_string())) {
5464 __ JumpIfSmi(input, false_label);
5465 if (!FLAG_harmony_typeof) {
5466 __ LoadRoot(at, Heap::kNullValueRootIndex);
5467 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5469 Register map = input;
5470 __ GetObjectType(input, map, scratch);
5471 __ Branch(false_label,
5472 lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
5473 __ Branch(USE_DELAY_SLOT, false_label,
5474 gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
5475 // map is still valid, so the BitField can be loaded in delay slot.
5476 // Check for undetectable objects => false.
5477 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
5478 __ And(at, at, 1 << Map::kIsUndetectable);
5480 cmp2 = Operand(zero_reg);
5481 final_branch_condition = eq;
5485 cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5486 __ Branch(false_label);
5489 return final_branch_condition;
5493 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5494 Register temp1 = ToRegister(instr->temp());
5496 EmitIsConstructCall(temp1, scratch0());
5498 EmitBranch(instr, eq, temp1,
5499 Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5503 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5504 ASSERT(!temp1.is(temp2));
5505 // Get the frame pointer for the calling frame.
5506 __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5508 // Skip the arguments adaptor frame if it exists.
5509 Label check_frame_marker;
5510 __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5511 __ Branch(&check_frame_marker, ne, temp2,
5512 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5513 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5515 // Check the marker in the calling frame.
5516 __ bind(&check_frame_marker);
5517 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5521 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5522 if (!info()->IsStub()) {
5523 // Ensure that we have enough space after the previous lazy-bailout
5524 // instruction for patching the code here.
5525 int current_pc = masm()->pc_offset();
5526 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5527 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5528 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5529 while (padding_size > 0) {
5531 padding_size -= Assembler::kInstrSize;
5535 last_lazy_deopt_pc_ = masm()->pc_offset();
5539 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5540 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5541 ASSERT(instr->HasEnvironment());
5542 LEnvironment* env = instr->environment();
5543 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5544 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5548 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5549 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5550 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5551 // needed return address), even though the implementation of LAZY and EAGER is
5552 // now identical. When LAZY is eventually completely folded into EAGER, remove
5553 // the special case below.
5554 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5555 type = Deoptimizer::LAZY;
5558 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5559 DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
5563 void LCodeGen::DoDummy(LDummy* instr) {
5564 // Nothing to see here, move on!
5568 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5569 // Nothing to see here, move on!
5573 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5574 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5575 LoadContextFromDeferred(instr->context());
5576 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5577 RecordSafepointWithLazyDeopt(
5578 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5579 ASSERT(instr->HasEnvironment());
5580 LEnvironment* env = instr->environment();
5581 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5585 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5586 class DeferredStackCheck V8_FINAL : public LDeferredCode {
5588 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5589 : LDeferredCode(codegen), instr_(instr) { }
5590 virtual void Generate() V8_OVERRIDE {
5591 codegen()->DoDeferredStackCheck(instr_);
5593 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5595 LStackCheck* instr_;
5598 ASSERT(instr->HasEnvironment());
5599 LEnvironment* env = instr->environment();
5600 // There is no LLazyBailout instruction for stack-checks. We have to
5601 // prepare for lazy deoptimization explicitly here.
5602 if (instr->hydrogen()->is_function_entry()) {
5603 // Perform stack overflow check.
5605 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5606 __ Branch(&done, hs, sp, Operand(at));
5607 ASSERT(instr->context()->IsRegister());
5608 ASSERT(ToRegister(instr->context()).is(cp));
5609 CallCode(isolate()->builtins()->StackCheck(),
5610 RelocInfo::CODE_TARGET,
5612 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5614 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5615 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5617 ASSERT(instr->hydrogen()->is_backwards_branch());
5618 // Perform stack overflow check if this goto needs it before jumping.
5619 DeferredStackCheck* deferred_stack_check =
5620 new(zone()) DeferredStackCheck(this, instr);
5621 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5622 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5623 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5624 __ bind(instr->done_label());
5625 deferred_stack_check->SetExit(instr->done_label());
5626 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5627 // Don't record a deoptimization index for the safepoint here.
5628 // This will be done explicitly when emitting call and the safepoint in
5629 // the deferred code.
5634 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5635 // This is a pseudo-instruction that ensures that the environment here is
5636 // properly registered for deoptimization and records the assembler's PC
5638 LEnvironment* environment = instr->environment();
5640 // If the environment were already registered, we would have no way of
5641 // backpatching it with the spill slot operands.
5642 ASSERT(!environment->HasBeenRegistered());
5643 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5645 GenerateOsrPrologue();
5649 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5650 Register result = ToRegister(instr->result());
5651 Register object = ToRegister(instr->object());
5652 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5653 DeoptimizeIf(eq, instr->environment(), object, Operand(at));
5655 Register null_value = t1;
5656 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5657 DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
5659 __ And(at, object, kSmiTagMask);
5660 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5662 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5663 __ GetObjectType(object, a1, a1);
5664 DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
5666 Label use_cache, call_runtime;
5667 ASSERT(object.is(a0));
5668 __ CheckEnumCache(null_value, &call_runtime);
5670 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5671 __ Branch(&use_cache);
5673 // Get the set of properties to enumerate.
5674 __ bind(&call_runtime);
5676 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5678 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5679 ASSERT(result.is(v0));
5680 __ LoadRoot(at, Heap::kMetaMapRootIndex);
5681 DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
5682 __ bind(&use_cache);
5686 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5687 Register map = ToRegister(instr->map());
5688 Register result = ToRegister(instr->result());
5689 Label load_cache, done;
5690 __ EnumLength(result, map);
5691 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5692 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5695 __ bind(&load_cache);
5696 __ LoadInstanceDescriptors(map, result);
5698 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5700 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5701 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5707 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5708 Register object = ToRegister(instr->value());
5709 Register map = ToRegister(instr->map());
5710 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5711 DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
5715 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5716 Register object = ToRegister(instr->object());
5717 Register index = ToRegister(instr->index());
5718 Register result = ToRegister(instr->result());
5719 Register scratch = scratch0();
5721 Label out_of_object, done;
5722 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5723 __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
5725 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5726 __ Addu(scratch, object, scratch);
5727 __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5731 __ bind(&out_of_object);
5732 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5733 // Index is equal to negated out of object property index plus 1.
5734 __ Subu(scratch, result, scratch);
5735 __ lw(result, FieldMemOperand(scratch,
5736 FixedArray::kHeaderSize - kPointerSize));
5743 } } // namespace v8::internal