1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "mips/lithium-codegen-mips.h"
31 #include "mips/lithium-gap-resolver-mips.h"
32 #include "code-stubs.h"
33 #include "stub-cache.h"
34 #include "hydrogen-osr.h"
40 class SafepointGenerator V8_FINAL : public CallWrapper {
42 SafepointGenerator(LCodeGen* codegen,
43 LPointerMap* pointers,
44 Safepoint::DeoptMode mode)
48 virtual ~SafepointGenerator() {}
50 virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
52 virtual void AfterCall() const V8_OVERRIDE {
53 codegen_->RecordSafepoint(pointers_, deopt_mode_);
58 LPointerMap* pointers_;
59 Safepoint::DeoptMode deopt_mode_;
65 bool LCodeGen::GenerateCode() {
66 LPhase phase("Z_Code generation", chunk());
70 // Open a frame scope to indicate that there is a frame on the stack. The
71 // NONE indicates that the scope shouldn't actually generate code to set up
72 // the frame (that is done in GeneratePrologue).
73 FrameScope frame_scope(masm_, StackFrame::NONE);
75 return GeneratePrologue() &&
77 GenerateDeferredCode() &&
78 GenerateDeoptJumpTable() &&
79 GenerateSafepointTable();
83 void LCodeGen::FinishCode(Handle<Code> code) {
85 code->set_stack_slots(GetStackSlotCount());
86 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
87 if (FLAG_weak_embedded_maps_in_optimized_code) {
88 RegisterDependentCodeForEmbeddedMaps(code);
90 PopulateDeoptimizationData(code);
91 info()->CommitDependencies(code);
95 void LChunkBuilder::Abort(BailoutReason reason) {
96 info()->set_bailout_reason(reason);
101 bool LCodeGen::GeneratePrologue() {
102 ASSERT(is_generating());
104 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
108 if (strlen(FLAG_stop_at) > 0 &&
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
114 // a1: Callee's JS function.
115 // cp: Callee's context.
116 // fp: Caller's frame pointer.
119 // Strict mode functions and builtins need to replace the receiver
120 // with undefined when called as functions (without an explicit
121 // receiver object). r5 is zero for method calls and non-zero for
123 if (!info_->is_classic_mode() || info_->is_native()) {
125 __ Branch(&ok, eq, t1, Operand(zero_reg));
127 int receiver_offset = scope()->num_parameters() * kPointerSize;
128 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
129 __ sw(a2, MemOperand(sp, receiver_offset));
134 info()->set_prologue_offset(masm_->pc_offset());
135 if (NeedsEagerFrame()) {
136 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
137 frame_is_built_ = true;
138 info_->AddNoFrameRange(0, masm_->pc_offset());
141 // Reserve space for the stack slots needed by the code.
142 int slots = GetStackSlotCount();
144 if (FLAG_debug_code) {
145 __ Subu(sp, sp, Operand(slots * kPointerSize));
148 __ Addu(a0, sp, Operand(slots * kPointerSize));
149 __ li(a1, Operand(kSlotsZapValue));
152 __ Subu(a0, a0, Operand(kPointerSize));
153 __ sw(a1, MemOperand(a0, 2 * kPointerSize));
154 __ Branch(&loop, ne, a0, Operand(sp));
158 __ Subu(sp, sp, Operand(slots * kPointerSize));
162 if (info()->saves_caller_doubles()) {
163 Comment(";;; Save clobbered callee double registers");
165 BitVector* doubles = chunk()->allocated_double_registers();
166 BitVector::Iterator save_iterator(doubles);
167 while (!save_iterator.Done()) {
168 __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
169 MemOperand(sp, count * kDoubleSize));
170 save_iterator.Advance();
175 // Possibly allocate a local context.
176 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
177 if (heap_slots > 0) {
178 Comment(";;; Allocate local context");
179 // Argument to NewContext is the function, which is in a1.
181 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
182 FastNewContextStub stub(heap_slots);
185 __ CallRuntime(Runtime::kNewFunctionContext, 1);
187 RecordSafepoint(Safepoint::kNoLazyDeopt);
188 // Context is returned in both v0 and cp. It replaces the context
189 // passed to us. It's saved in the stack and kept live in cp.
190 __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
191 // Copy any necessary parameters into the context.
192 int num_parameters = scope()->num_parameters();
193 for (int i = 0; i < num_parameters; i++) {
194 Variable* var = scope()->parameter(i);
195 if (var->IsContextSlot()) {
196 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
197 (num_parameters - 1 - i) * kPointerSize;
198 // Load parameter from stack.
199 __ lw(a0, MemOperand(fp, parameter_offset));
200 // Store it in the context.
201 MemOperand target = ContextOperand(cp, var->index());
203 // Update the write barrier. This clobbers a3 and a0.
204 __ RecordWriteContextSlot(
205 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
208 Comment(";;; End allocate local context");
212 if (FLAG_trace && info()->IsOptimizing()) {
213 // We have not executed any compiled code yet, so cp still holds the
215 __ CallRuntime(Runtime::kTraceEnter, 0);
217 return !is_aborted();
221 void LCodeGen::GenerateOsrPrologue() {
222 // Generate the OSR entry prologue at the first unknown OSR value, or if there
223 // are none, at the OSR entrypoint instruction.
224 if (osr_pc_offset_ >= 0) return;
226 osr_pc_offset_ = masm()->pc_offset();
228 // Adjust the frame size, subsuming the unoptimized frame into the
230 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
232 __ Subu(sp, sp, Operand(slots * kPointerSize));
236 bool LCodeGen::GenerateDeferredCode() {
237 ASSERT(is_generating());
238 if (deferred_.length() > 0) {
239 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
240 LDeferredCode* code = deferred_[i];
243 instructions_->at(code->instruction_index())->hydrogen_value();
244 RecordAndWritePosition(value->position());
246 Comment(";;; <@%d,#%d> "
247 "-------------------- Deferred %s --------------------",
248 code->instruction_index(),
249 code->instr()->hydrogen_value()->id(),
250 code->instr()->Mnemonic());
251 __ bind(code->entry());
252 if (NeedsDeferredFrame()) {
253 Comment(";;; Build frame");
254 ASSERT(!frame_is_built_);
255 ASSERT(info()->IsStub());
256 frame_is_built_ = true;
257 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
258 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
260 __ Addu(fp, sp, Operand(2 * kPointerSize));
261 Comment(";;; Deferred code");
264 if (NeedsDeferredFrame()) {
265 Comment(";;; Destroy frame");
266 ASSERT(frame_is_built_);
268 __ MultiPop(cp.bit() | fp.bit() | ra.bit());
269 frame_is_built_ = false;
271 __ jmp(code->exit());
274 // Deferred code is the last part of the instruction sequence. Mark
275 // the generated code as done unless we bailed out.
276 if (!is_aborted()) status_ = DONE;
277 return !is_aborted();
281 bool LCodeGen::GenerateDeoptJumpTable() {
282 if (deopt_jump_table_.length() > 0) {
283 Comment(";;; -------------------- Jump table --------------------");
285 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
287 __ bind(&table_start);
289 for (int i = 0; i < deopt_jump_table_.length(); i++) {
290 __ bind(&deopt_jump_table_[i].label);
291 Address entry = deopt_jump_table_[i].address;
292 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
293 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
294 if (id == Deoptimizer::kNotDeoptimizationEntry) {
295 Comment(";;; jump table entry %d.", i);
297 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
299 __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
300 if (deopt_jump_table_[i].needs_frame) {
301 if (needs_frame.is_bound()) {
302 __ Branch(&needs_frame);
304 __ bind(&needs_frame);
305 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
306 // This variant of deopt can only be used with stubs. Since we don't
307 // have a function pointer to install in the stack frame that we're
308 // building, install a special marker there instead.
309 ASSERT(info()->IsStub());
310 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
312 __ Addu(fp, sp, Operand(2 * kPointerSize));
319 __ RecordComment("]");
321 // The deoptimization jump table is the last part of the instruction
322 // sequence. Mark the generated code as done unless we bailed out.
323 if (!is_aborted()) status_ = DONE;
324 return !is_aborted();
328 bool LCodeGen::GenerateSafepointTable() {
330 safepoints_.Emit(masm(), GetStackSlotCount());
331 return !is_aborted();
335 Register LCodeGen::ToRegister(int index) const {
336 return Register::FromAllocationIndex(index);
340 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
341 return DoubleRegister::FromAllocationIndex(index);
345 Register LCodeGen::ToRegister(LOperand* op) const {
346 ASSERT(op->IsRegister());
347 return ToRegister(op->index());
351 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
352 if (op->IsRegister()) {
353 return ToRegister(op->index());
354 } else if (op->IsConstantOperand()) {
355 LConstantOperand* const_op = LConstantOperand::cast(op);
356 HConstant* constant = chunk_->LookupConstant(const_op);
357 Handle<Object> literal = constant->handle(isolate());
358 Representation r = chunk_->LookupLiteralRepresentation(const_op);
359 if (r.IsInteger32()) {
360 ASSERT(literal->IsNumber());
361 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
362 } else if (r.IsSmi()) {
363 ASSERT(constant->HasSmiValue());
364 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
365 } else if (r.IsDouble()) {
366 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
368 ASSERT(r.IsSmiOrTagged());
369 __ LoadObject(scratch, literal);
372 } else if (op->IsStackSlot() || op->IsArgument()) {
373 __ lw(scratch, ToMemOperand(op));
381 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
382 ASSERT(op->IsDoubleRegister());
383 return ToDoubleRegister(op->index());
387 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
388 FloatRegister flt_scratch,
389 DoubleRegister dbl_scratch) {
390 if (op->IsDoubleRegister()) {
391 return ToDoubleRegister(op->index());
392 } else if (op->IsConstantOperand()) {
393 LConstantOperand* const_op = LConstantOperand::cast(op);
394 HConstant* constant = chunk_->LookupConstant(const_op);
395 Handle<Object> literal = constant->handle(isolate());
396 Representation r = chunk_->LookupLiteralRepresentation(const_op);
397 if (r.IsInteger32()) {
398 ASSERT(literal->IsNumber());
399 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
400 __ mtc1(at, flt_scratch);
401 __ cvt_d_w(dbl_scratch, flt_scratch);
403 } else if (r.IsDouble()) {
404 Abort(kUnsupportedDoubleImmediate);
405 } else if (r.IsTagged()) {
406 Abort(kUnsupportedTaggedImmediate);
408 } else if (op->IsStackSlot() || op->IsArgument()) {
409 MemOperand mem_op = ToMemOperand(op);
410 __ ldc1(dbl_scratch, mem_op);
418 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
419 HConstant* constant = chunk_->LookupConstant(op);
420 ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
421 return constant->handle(isolate());
425 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
426 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
430 bool LCodeGen::IsSmi(LConstantOperand* op) const {
431 return chunk_->LookupLiteralRepresentation(op).IsSmi();
435 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
436 return ToRepresentation(op, Representation::Integer32());
440 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
441 const Representation& r) const {
442 HConstant* constant = chunk_->LookupConstant(op);
443 int32_t value = constant->Integer32Value();
444 if (r.IsInteger32()) return value;
445 ASSERT(r.IsSmiOrTagged());
446 return reinterpret_cast<int32_t>(Smi::FromInt(value));
450 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
451 HConstant* constant = chunk_->LookupConstant(op);
452 return Smi::FromInt(constant->Integer32Value());
456 double LCodeGen::ToDouble(LConstantOperand* op) const {
457 HConstant* constant = chunk_->LookupConstant(op);
458 ASSERT(constant->HasDoubleValue());
459 return constant->DoubleValue();
463 Operand LCodeGen::ToOperand(LOperand* op) {
464 if (op->IsConstantOperand()) {
465 LConstantOperand* const_op = LConstantOperand::cast(op);
466 HConstant* constant = chunk()->LookupConstant(const_op);
467 Representation r = chunk_->LookupLiteralRepresentation(const_op);
469 ASSERT(constant->HasSmiValue());
470 return Operand(Smi::FromInt(constant->Integer32Value()));
471 } else if (r.IsInteger32()) {
472 ASSERT(constant->HasInteger32Value());
473 return Operand(constant->Integer32Value());
474 } else if (r.IsDouble()) {
475 Abort(kToOperandUnsupportedDoubleImmediate);
477 ASSERT(r.IsTagged());
478 return Operand(constant->handle(isolate()));
479 } else if (op->IsRegister()) {
480 return Operand(ToRegister(op));
481 } else if (op->IsDoubleRegister()) {
482 Abort(kToOperandIsDoubleRegisterUnimplemented);
485 // Stack slots not implemented, use ToMemOperand instead.
491 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
492 ASSERT(!op->IsRegister());
493 ASSERT(!op->IsDoubleRegister());
494 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
495 return MemOperand(fp, StackSlotOffset(op->index()));
499 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
500 ASSERT(op->IsDoubleStackSlot());
501 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
505 void LCodeGen::WriteTranslation(LEnvironment* environment,
506 Translation* translation) {
507 if (environment == NULL) return;
509 // The translation includes one command per value in the environment.
510 int translation_size = environment->translation_size();
511 // The output frame height does not include the parameters.
512 int height = translation_size - environment->parameter_count();
514 WriteTranslation(environment->outer(), translation);
515 bool has_closure_id = !info()->closure().is_null() &&
516 !info()->closure().is_identical_to(environment->closure());
517 int closure_id = has_closure_id
518 ? DefineDeoptimizationLiteral(environment->closure())
519 : Translation::kSelfLiteralId;
521 switch (environment->frame_type()) {
523 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
526 translation->BeginConstructStubFrame(closure_id, translation_size);
529 ASSERT(translation_size == 1);
531 translation->BeginGetterStubFrame(closure_id);
534 ASSERT(translation_size == 2);
536 translation->BeginSetterStubFrame(closure_id);
539 translation->BeginCompiledStubFrame();
541 case ARGUMENTS_ADAPTOR:
542 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
546 int object_index = 0;
547 int dematerialized_index = 0;
548 for (int i = 0; i < translation_size; ++i) {
549 LOperand* value = environment->values()->at(i);
550 AddToTranslation(environment,
553 environment->HasTaggedValueAt(i),
554 environment->HasUint32ValueAt(i),
556 &dematerialized_index);
561 void LCodeGen::AddToTranslation(LEnvironment* environment,
562 Translation* translation,
566 int* object_index_pointer,
567 int* dematerialized_index_pointer) {
568 if (op == LEnvironment::materialization_marker()) {
569 int object_index = (*object_index_pointer)++;
570 if (environment->ObjectIsDuplicateAt(object_index)) {
571 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
572 translation->DuplicateObject(dupe_of);
575 int object_length = environment->ObjectLengthAt(object_index);
576 if (environment->ObjectIsArgumentsAt(object_index)) {
577 translation->BeginArgumentsObject(object_length);
579 translation->BeginCapturedObject(object_length);
581 int dematerialized_index = *dematerialized_index_pointer;
582 int env_offset = environment->translation_size() + dematerialized_index;
583 *dematerialized_index_pointer += object_length;
584 for (int i = 0; i < object_length; ++i) {
585 LOperand* value = environment->values()->at(env_offset + i);
586 AddToTranslation(environment,
589 environment->HasTaggedValueAt(env_offset + i),
590 environment->HasUint32ValueAt(env_offset + i),
591 object_index_pointer,
592 dematerialized_index_pointer);
597 if (op->IsStackSlot()) {
599 translation->StoreStackSlot(op->index());
600 } else if (is_uint32) {
601 translation->StoreUint32StackSlot(op->index());
603 translation->StoreInt32StackSlot(op->index());
605 } else if (op->IsDoubleStackSlot()) {
606 translation->StoreDoubleStackSlot(op->index());
607 } else if (op->IsArgument()) {
609 int src_index = GetStackSlotCount() + op->index();
610 translation->StoreStackSlot(src_index);
611 } else if (op->IsRegister()) {
612 Register reg = ToRegister(op);
614 translation->StoreRegister(reg);
615 } else if (is_uint32) {
616 translation->StoreUint32Register(reg);
618 translation->StoreInt32Register(reg);
620 } else if (op->IsDoubleRegister()) {
621 DoubleRegister reg = ToDoubleRegister(op);
622 translation->StoreDoubleRegister(reg);
623 } else if (op->IsConstantOperand()) {
624 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
625 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
626 translation->StoreLiteral(src_index);
633 void LCodeGen::CallCode(Handle<Code> code,
634 RelocInfo::Mode mode,
635 LInstruction* instr) {
636 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
640 void LCodeGen::CallCodeGeneric(Handle<Code> code,
641 RelocInfo::Mode mode,
643 SafepointMode safepoint_mode) {
644 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
645 ASSERT(instr != NULL);
647 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
651 void LCodeGen::CallRuntime(const Runtime::Function* function,
654 SaveFPRegsMode save_doubles) {
655 ASSERT(instr != NULL);
657 __ CallRuntime(function, num_arguments, save_doubles);
659 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
663 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
664 if (context->IsRegister()) {
665 __ Move(cp, ToRegister(context));
666 } else if (context->IsStackSlot()) {
667 __ lw(cp, ToMemOperand(context));
668 } else if (context->IsConstantOperand()) {
669 HConstant* constant =
670 chunk_->LookupConstant(LConstantOperand::cast(context));
671 __ LoadObject(cp, Handle<Object>::cast(constant->handle(isolate())));
678 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
682 LoadContextFromDeferred(context);
683 __ CallRuntimeSaveDoubles(id);
684 RecordSafepointWithRegisters(
685 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
689 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
690 Safepoint::DeoptMode mode) {
691 if (!environment->HasBeenRegistered()) {
692 // Physical stack frame layout:
693 // -x ............. -4 0 ..................................... y
694 // [incoming arguments] [spill slots] [pushed outgoing arguments]
696 // Layout of the environment:
697 // 0 ..................................................... size-1
698 // [parameters] [locals] [expression stack including arguments]
700 // Layout of the translation:
701 // 0 ........................................................ size - 1 + 4
702 // [expression stack including arguments] [locals] [4 words] [parameters]
703 // |>------------ translation_size ------------<|
706 int jsframe_count = 0;
707 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
709 if (e->frame_type() == JS_FUNCTION) {
713 Translation translation(&translations_, frame_count, jsframe_count, zone());
714 WriteTranslation(environment, &translation);
715 int deoptimization_index = deoptimizations_.length();
716 int pc_offset = masm()->pc_offset();
717 environment->Register(deoptimization_index,
719 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
720 deoptimizations_.Add(environment, zone());
725 void LCodeGen::DeoptimizeIf(Condition condition,
726 LEnvironment* environment,
727 Deoptimizer::BailoutType bailout_type,
729 const Operand& src2) {
730 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
731 ASSERT(environment->HasBeenRegistered());
732 int id = environment->deoptimization_index();
733 ASSERT(info()->IsOptimizing() || info()->IsStub());
735 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
737 Abort(kBailoutWasNotPrepared);
741 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
742 if (FLAG_deopt_every_n_times == 1 &&
744 info()->opt_count() == id) {
745 ASSERT(frame_is_built_);
746 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
750 if (info()->ShouldTrapOnDeopt()) {
752 if (condition != al) {
753 __ Branch(&skip, NegateCondition(condition), src1, src2);
755 __ stop("trap_on_deopt");
759 ASSERT(info()->IsStub() || frame_is_built_);
760 if (condition == al && frame_is_built_) {
761 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
763 // We often have several deopts to the same entry, reuse the last
764 // jump entry if this is the case.
765 if (deopt_jump_table_.is_empty() ||
766 (deopt_jump_table_.last().address != entry) ||
767 (deopt_jump_table_.last().bailout_type != bailout_type) ||
768 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
769 Deoptimizer::JumpTableEntry table_entry(entry,
772 deopt_jump_table_.Add(table_entry, zone());
774 __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
779 void LCodeGen::DeoptimizeIf(Condition condition,
780 LEnvironment* environment,
782 const Operand& src2) {
783 Deoptimizer::BailoutType bailout_type = info()->IsStub()
785 : Deoptimizer::EAGER;
786 DeoptimizeIf(condition, environment, bailout_type, src1, src2);
790 void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
791 ZoneList<Handle<Map> > maps(1, zone());
792 ZoneList<Handle<JSObject> > objects(1, zone());
793 int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
794 for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
795 if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
796 if (it.rinfo()->target_object()->IsMap()) {
797 Handle<Map> map(Map::cast(it.rinfo()->target_object()));
798 maps.Add(map, zone());
799 } else if (it.rinfo()->target_object()->IsJSObject()) {
800 Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
801 objects.Add(object, zone());
806 // This disables verification of weak embedded objects after full GC.
807 // AddDependentCode can cause a GC, which would observe the state where
808 // this code is not yet in the depended code lists of the embedded maps.
809 NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
811 for (int i = 0; i < maps.length(); i++) {
812 maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
814 for (int i = 0; i < objects.length(); i++) {
815 AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
820 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
821 int length = deoptimizations_.length();
822 if (length == 0) return;
823 Handle<DeoptimizationInputData> data =
824 factory()->NewDeoptimizationInputData(length, TENURED);
826 Handle<ByteArray> translations =
827 translations_.CreateByteArray(isolate()->factory());
828 data->SetTranslationByteArray(*translations);
829 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
831 Handle<FixedArray> literals =
832 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
833 { AllowDeferredHandleDereference copy_handles;
834 for (int i = 0; i < deoptimization_literals_.length(); i++) {
835 literals->set(i, *deoptimization_literals_[i]);
837 data->SetLiteralArray(*literals);
840 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
841 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
843 // Populate the deoptimization entries.
844 for (int i = 0; i < length; i++) {
845 LEnvironment* env = deoptimizations_[i];
846 data->SetAstId(i, env->ast_id());
847 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
848 data->SetArgumentsStackHeight(i,
849 Smi::FromInt(env->arguments_stack_height()));
850 data->SetPc(i, Smi::FromInt(env->pc_offset()));
852 code->set_deoptimization_data(*data);
856 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
857 int result = deoptimization_literals_.length();
858 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
859 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
861 deoptimization_literals_.Add(literal, zone());
866 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
867 ASSERT(deoptimization_literals_.length() == 0);
869 const ZoneList<Handle<JSFunction> >* inlined_closures =
870 chunk()->inlined_closures();
872 for (int i = 0, length = inlined_closures->length();
875 DefineDeoptimizationLiteral(inlined_closures->at(i));
878 inlined_function_count_ = deoptimization_literals_.length();
882 void LCodeGen::RecordSafepointWithLazyDeopt(
883 LInstruction* instr, SafepointMode safepoint_mode) {
884 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
885 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
887 ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
888 RecordSafepointWithRegisters(
889 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
894 void LCodeGen::RecordSafepoint(
895 LPointerMap* pointers,
896 Safepoint::Kind kind,
898 Safepoint::DeoptMode deopt_mode) {
899 ASSERT(expected_safepoint_kind_ == kind);
901 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
902 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
903 kind, arguments, deopt_mode);
904 for (int i = 0; i < operands->length(); i++) {
905 LOperand* pointer = operands->at(i);
906 if (pointer->IsStackSlot()) {
907 safepoint.DefinePointerSlot(pointer->index(), zone());
908 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
909 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
915 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
916 Safepoint::DeoptMode deopt_mode) {
917 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
921 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
922 LPointerMap empty_pointers(zone());
923 RecordSafepoint(&empty_pointers, deopt_mode);
927 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
929 Safepoint::DeoptMode deopt_mode) {
931 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
935 void LCodeGen::RecordSafepointWithRegistersAndDoubles(
936 LPointerMap* pointers,
938 Safepoint::DeoptMode deopt_mode) {
940 pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
944 void LCodeGen::RecordAndWritePosition(int position) {
945 if (position == RelocInfo::kNoPosition) return;
946 masm()->positions_recorder()->RecordPosition(position);
947 masm()->positions_recorder()->WriteRecordedPositions();
951 static const char* LabelType(LLabel* label) {
952 if (label->is_loop_header()) return " (loop header)";
953 if (label->is_osr_entry()) return " (OSR entry)";
958 void LCodeGen::DoLabel(LLabel* label) {
959 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
960 current_instruction_,
961 label->hydrogen_value()->id(),
964 __ bind(label->label());
965 current_block_ = label->block_id();
970 void LCodeGen::DoParallelMove(LParallelMove* move) {
971 resolver_.Resolve(move);
975 void LCodeGen::DoGap(LGap* gap) {
976 for (int i = LGap::FIRST_INNER_POSITION;
977 i <= LGap::LAST_INNER_POSITION;
979 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
980 LParallelMove* move = gap->GetParallelMove(inner_pos);
981 if (move != NULL) DoParallelMove(move);
986 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
991 void LCodeGen::DoParameter(LParameter* instr) {
996 void LCodeGen::DoCallStub(LCallStub* instr) {
997 ASSERT(ToRegister(instr->context()).is(cp));
998 ASSERT(ToRegister(instr->result()).is(v0));
999 switch (instr->hydrogen()->major_key()) {
1000 case CodeStub::RegExpConstructResult: {
1001 RegExpConstructResultStub stub;
1002 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1005 case CodeStub::RegExpExec: {
1006 RegExpExecStub stub;
1007 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1010 case CodeStub::SubString: {
1012 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1015 case CodeStub::StringCompare: {
1016 StringCompareStub stub;
1017 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1020 case CodeStub::TranscendentalCache: {
1021 __ lw(a0, MemOperand(sp, 0));
1022 TranscendentalCacheStub stub(instr->transcendental_type(),
1023 TranscendentalCacheStub::TAGGED);
1024 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1033 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1034 GenerateOsrPrologue();
1038 void LCodeGen::DoModI(LModI* instr) {
1039 HMod* hmod = instr->hydrogen();
1040 HValue* left = hmod->left();
1041 HValue* right = hmod->right();
1042 if (hmod->HasPowerOf2Divisor()) {
1043 const Register left_reg = ToRegister(instr->left());
1044 const Register result_reg = ToRegister(instr->result());
1046 // Note: The code below even works when right contains kMinInt.
1047 int32_t divisor = Abs(right->GetInteger32Constant());
1049 Label left_is_not_negative, done;
1050 if (left->CanBeNegative()) {
1051 __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
1052 &left_is_not_negative, ge, left_reg, Operand(zero_reg));
1053 __ subu(result_reg, zero_reg, left_reg);
1054 __ And(result_reg, result_reg, divisor - 1);
1055 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1056 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1058 __ Branch(USE_DELAY_SLOT, &done);
1059 __ subu(result_reg, zero_reg, result_reg);
1062 __ bind(&left_is_not_negative);
1063 __ And(result_reg, left_reg, divisor - 1);
1066 } else if (hmod->fixed_right_arg().has_value) {
1067 const Register left_reg = ToRegister(instr->left());
1068 const Register result_reg = ToRegister(instr->result());
1069 const Register right_reg = ToRegister(instr->right());
1071 int32_t divisor = hmod->fixed_right_arg().value;
1072 ASSERT(IsPowerOf2(divisor));
1074 // Check if our assumption of a fixed right operand still holds.
1075 DeoptimizeIf(ne, instr->environment(), right_reg, Operand(divisor));
1077 Label left_is_not_negative, done;
1078 if (left->CanBeNegative()) {
1079 __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
1080 &left_is_not_negative, ge, left_reg, Operand(zero_reg));
1081 __ subu(result_reg, zero_reg, left_reg);
1082 __ And(result_reg, result_reg, divisor - 1);
1083 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1084 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1086 __ Branch(USE_DELAY_SLOT, &done);
1087 __ subu(result_reg, zero_reg, result_reg);
1090 __ bind(&left_is_not_negative);
1091 __ And(result_reg, left_reg, divisor - 1);
1095 const Register scratch = scratch0();
1096 const Register left_reg = ToRegister(instr->left());
1097 const Register result_reg = ToRegister(instr->result());
1099 // div runs in the background while we check for special cases.
1100 Register right_reg = EmitLoadRegister(instr->right(), scratch);
1101 __ div(left_reg, right_reg);
1104 // Check for x % 0, we have to deopt in this case because we can't return a
1106 if (right->CanBeZero()) {
1107 DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
1110 // Check for kMinInt % -1, we have to deopt if we care about -0, because we
1111 // can't return that.
1112 if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
1113 Label left_not_min_int;
1114 __ Branch(&left_not_min_int, ne, left_reg, Operand(kMinInt));
1115 // TODO(svenpanne) Don't deopt when we don't care about -0.
1116 DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
1117 __ bind(&left_not_min_int);
1120 // TODO(svenpanne) Only emit the test/deopt if we have to.
1121 __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
1122 __ mfhi(result_reg);
1124 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1125 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1132 void LCodeGen::EmitSignedIntegerDivisionByConstant(
1138 LEnvironment* environment) {
1139 ASSERT(!AreAliased(dividend, scratch, at, no_reg));
1141 uint32_t divisor_abs = abs(divisor);
1143 int32_t power_of_2_factor =
1144 CompilerIntrinsics::CountTrailingZeros(divisor_abs);
1146 switch (divisor_abs) {
1148 DeoptimizeIf(al, environment);
1153 __ Move(result, dividend);
1155 __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
1156 DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
1158 // Compute the remainder.
1159 __ Move(remainder, zero_reg);
1163 if (IsPowerOf2(divisor_abs)) {
1164 // Branch and condition free code for integer division by a power
1166 int32_t power = WhichPowerOf2(divisor_abs);
1168 __ sra(scratch, dividend, power - 1);
1170 __ srl(scratch, scratch, 32 - power);
1171 __ Addu(scratch, dividend, Operand(scratch));
1172 __ sra(result, scratch, power);
1173 // Negate if necessary.
1174 // We don't need to check for overflow because the case '-1' is
1175 // handled separately.
1177 ASSERT(divisor != -1);
1178 __ Subu(result, zero_reg, Operand(result));
1180 // Compute the remainder.
1182 __ sll(scratch, result, power);
1183 __ Subu(remainder, dividend, Operand(scratch));
1185 __ sll(scratch, result, power);
1186 __ Addu(remainder, dividend, Operand(scratch));
1189 } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
1190 // Use magic numbers for a few specific divisors.
1191 // Details and proofs can be found in:
1192 // - Hacker's Delight, Henry S. Warren, Jr.
1193 // - The PowerPC Compiler Writer's Guide
1194 // and probably many others.
1197 // <divisor with magic numbers> * <power of 2>
1199 // <divisor with magic numbers> * <other divisor with magic numbers>
1200 DivMagicNumbers magic_numbers =
1201 DivMagicNumberFor(divisor_abs >> power_of_2_factor);
1202 // Branch and condition free code for integer division by a power
1204 const int32_t M = magic_numbers.M;
1205 const int32_t s = magic_numbers.s + power_of_2_factor;
1207 __ li(scratch, Operand(M));
1208 __ mult(dividend, scratch);
1211 __ Addu(scratch, scratch, Operand(dividend));
1214 __ sra(scratch, scratch, s);
1215 __ mov(scratch, scratch);
1217 __ srl(at, dividend, 31);
1218 __ Addu(result, scratch, Operand(at));
1219 if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
1220 // Compute the remainder.
1221 __ li(scratch, Operand(divisor));
1222 __ Mul(scratch, result, Operand(scratch));
1223 __ Subu(remainder, dividend, Operand(scratch));
1225 __ li(scratch, Operand(divisor));
1226 __ div(dividend, scratch);
1234 void LCodeGen::DoDivI(LDivI* instr) {
1235 const Register left = ToRegister(instr->left());
1236 const Register right = ToRegister(instr->right());
1237 const Register result = ToRegister(instr->result());
1239 // On MIPS div is asynchronous - it will run in the background while we
1240 // check for special cases.
1241 __ div(left, right);
1244 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1245 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
1248 // Check for (0 / -x) that will produce negative zero.
1249 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1250 Label left_not_zero;
1251 __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
1252 DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
1253 __ bind(&left_not_zero);
1256 // Check for (kMinInt / -1).
1257 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1258 Label left_not_min_int;
1259 __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
1260 DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
1261 __ bind(&left_not_min_int);
1264 if (!instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1266 DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
1272 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1273 DoubleRegister addend = ToDoubleRegister(instr->addend());
1274 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1275 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1277 // This is computed in-place.
1278 ASSERT(addend.is(ToDoubleRegister(instr->result())));
1280 __ madd_d(addend, addend, multiplier, multiplicand);
1284 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1285 const Register result = ToRegister(instr->result());
1286 const Register left = ToRegister(instr->left());
1287 const Register remainder = ToRegister(instr->temp());
1288 const Register scratch = scratch0();
1290 if (instr->right()->IsConstantOperand()) {
1292 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1294 DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
1296 EmitSignedIntegerDivisionByConstant(result,
1301 instr->environment());
1302 // We performed a truncating division. Correct the result if necessary.
1303 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1304 __ Xor(scratch , remainder, Operand(divisor));
1305 __ Branch(&done, ge, scratch, Operand(zero_reg));
1306 __ Subu(result, result, Operand(1));
1310 const Register right = ToRegister(instr->right());
1312 // On MIPS div is asynchronous - it will run in the background while we
1313 // check for special cases.
1314 __ div(left, right);
1317 DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
1319 // Check for (0 / -x) that will produce negative zero.
1320 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1321 Label left_not_zero;
1322 __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
1323 DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
1324 __ bind(&left_not_zero);
1327 // Check for (kMinInt / -1).
1328 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1329 Label left_not_min_int;
1330 __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
1331 DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
1332 __ bind(&left_not_min_int);
1338 // We performed a truncating division. Correct the result if necessary.
1339 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1340 __ Xor(scratch , remainder, Operand(right));
1341 __ Branch(&done, ge, scratch, Operand(zero_reg));
1342 __ Subu(result, result, Operand(1));
1348 void LCodeGen::DoMulI(LMulI* instr) {
1349 Register scratch = scratch0();
1350 Register result = ToRegister(instr->result());
1351 // Note that result may alias left.
1352 Register left = ToRegister(instr->left());
1353 LOperand* right_op = instr->right();
1355 bool bailout_on_minus_zero =
1356 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1357 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1359 if (right_op->IsConstantOperand()) {
1360 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1362 if (bailout_on_minus_zero && (constant < 0)) {
1363 // The case of a null constant will be handled separately.
1364 // If constant is negative and left is null, the result should be -0.
1365 DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
1371 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1372 DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
1374 __ Subu(result, zero_reg, left);
1378 if (bailout_on_minus_zero) {
1379 // If left is strictly negative and the constant is null, the
1380 // result is -0. Deoptimize if required, otherwise return 0.
1381 DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
1383 __ mov(result, zero_reg);
1387 __ Move(result, left);
1390 // Multiplying by powers of two and powers of two plus or minus
1391 // one can be done faster with shifted operands.
1392 // For other constants we emit standard code.
1393 int32_t mask = constant >> 31;
1394 uint32_t constant_abs = (constant + mask) ^ mask;
1396 if (IsPowerOf2(constant_abs)) {
1397 int32_t shift = WhichPowerOf2(constant_abs);
1398 __ sll(result, left, shift);
1399 // Correct the sign of the result if the constant is negative.
1400 if (constant < 0) __ Subu(result, zero_reg, result);
1401 } else if (IsPowerOf2(constant_abs - 1)) {
1402 int32_t shift = WhichPowerOf2(constant_abs - 1);
1403 __ sll(scratch, left, shift);
1404 __ Addu(result, scratch, left);
1405 // Correct the sign of the result if the constant is negative.
1406 if (constant < 0) __ Subu(result, zero_reg, result);
1407 } else if (IsPowerOf2(constant_abs + 1)) {
1408 int32_t shift = WhichPowerOf2(constant_abs + 1);
1409 __ sll(scratch, left, shift);
1410 __ Subu(result, scratch, left);
1411 // Correct the sign of the result if the constant is negative.
1412 if (constant < 0) __ Subu(result, zero_reg, result);
1414 // Generate standard code.
1415 __ li(at, constant);
1416 __ Mul(result, left, at);
1421 ASSERT(right_op->IsRegister());
1422 Register right = ToRegister(right_op);
1425 // hi:lo = left * right.
1426 if (instr->hydrogen()->representation().IsSmi()) {
1427 __ SmiUntag(result, left);
1428 __ mult(result, right);
1432 __ mult(left, right);
1436 __ sra(at, result, 31);
1437 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1439 if (instr->hydrogen()->representation().IsSmi()) {
1440 __ SmiUntag(result, left);
1441 __ Mul(result, result, right);
1443 __ Mul(result, left, right);
1447 if (bailout_on_minus_zero) {
1449 __ Xor(at, left, right);
1450 __ Branch(&done, ge, at, Operand(zero_reg));
1451 // Bail out if the result is minus zero.
1453 instr->environment(),
1462 void LCodeGen::DoBitI(LBitI* instr) {
1463 LOperand* left_op = instr->left();
1464 LOperand* right_op = instr->right();
1465 ASSERT(left_op->IsRegister());
1466 Register left = ToRegister(left_op);
1467 Register result = ToRegister(instr->result());
1468 Operand right(no_reg);
1470 if (right_op->IsStackSlot() || right_op->IsArgument()) {
1471 right = Operand(EmitLoadRegister(right_op, at));
1473 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1474 right = ToOperand(right_op);
1477 switch (instr->op()) {
1478 case Token::BIT_AND:
1479 __ And(result, left, right);
1482 __ Or(result, left, right);
1484 case Token::BIT_XOR:
1485 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1486 __ Nor(result, zero_reg, left);
1488 __ Xor(result, left, right);
1498 void LCodeGen::DoShiftI(LShiftI* instr) {
1499 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1500 // result may alias either of them.
1501 LOperand* right_op = instr->right();
1502 Register left = ToRegister(instr->left());
1503 Register result = ToRegister(instr->result());
1504 Register scratch = scratch0();
1506 if (right_op->IsRegister()) {
1507 // No need to mask the right operand on MIPS, it is built into the variable
1508 // shift instructions.
1509 switch (instr->op()) {
1511 __ Ror(result, left, Operand(ToRegister(right_op)));
1514 __ srav(result, left, ToRegister(right_op));
1517 __ srlv(result, left, ToRegister(right_op));
1518 if (instr->can_deopt()) {
1519 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1523 __ sllv(result, left, ToRegister(right_op));
1530 // Mask the right_op operand.
1531 int value = ToInteger32(LConstantOperand::cast(right_op));
1532 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1533 switch (instr->op()) {
1535 if (shift_count != 0) {
1536 __ Ror(result, left, Operand(shift_count));
1538 __ Move(result, left);
1542 if (shift_count != 0) {
1543 __ sra(result, left, shift_count);
1545 __ Move(result, left);
1549 if (shift_count != 0) {
1550 __ srl(result, left, shift_count);
1552 if (instr->can_deopt()) {
1553 __ And(at, left, Operand(0x80000000));
1554 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1556 __ Move(result, left);
1560 if (shift_count != 0) {
1561 if (instr->hydrogen_value()->representation().IsSmi() &&
1562 instr->can_deopt()) {
1563 if (shift_count != 1) {
1564 __ sll(result, left, shift_count - 1);
1565 __ SmiTagCheckOverflow(result, result, scratch);
1567 __ SmiTagCheckOverflow(result, left, scratch);
1569 DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
1571 __ sll(result, left, shift_count);
1574 __ Move(result, left);
1585 void LCodeGen::DoSubI(LSubI* instr) {
1586 LOperand* left = instr->left();
1587 LOperand* right = instr->right();
1588 LOperand* result = instr->result();
1589 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1591 if (!can_overflow) {
1592 if (right->IsStackSlot() || right->IsArgument()) {
1593 Register right_reg = EmitLoadRegister(right, at);
1594 __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1596 ASSERT(right->IsRegister() || right->IsConstantOperand());
1597 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1599 } else { // can_overflow.
1600 Register overflow = scratch0();
1601 Register scratch = scratch1();
1602 if (right->IsStackSlot() ||
1603 right->IsArgument() ||
1604 right->IsConstantOperand()) {
1605 Register right_reg = EmitLoadRegister(right, scratch);
1606 __ SubuAndCheckForOverflow(ToRegister(result),
1609 overflow); // Reg at also used as scratch.
1611 ASSERT(right->IsRegister());
1612 // Due to overflow check macros not supporting constant operands,
1613 // handling the IsConstantOperand case was moved to prev if clause.
1614 __ SubuAndCheckForOverflow(ToRegister(result),
1617 overflow); // Reg at also used as scratch.
1619 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1624 void LCodeGen::DoConstantI(LConstantI* instr) {
1625 __ li(ToRegister(instr->result()), Operand(instr->value()));
1629 void LCodeGen::DoConstantS(LConstantS* instr) {
1630 __ li(ToRegister(instr->result()), Operand(instr->value()));
1634 void LCodeGen::DoConstantD(LConstantD* instr) {
1635 ASSERT(instr->result()->IsDoubleRegister());
1636 DoubleRegister result = ToDoubleRegister(instr->result());
1637 double v = instr->value();
1642 void LCodeGen::DoConstantE(LConstantE* instr) {
1643 __ li(ToRegister(instr->result()), Operand(instr->value()));
1647 void LCodeGen::DoConstantT(LConstantT* instr) {
1648 Handle<Object> value = instr->value(isolate());
1649 AllowDeferredHandleDereference smi_check;
1650 __ LoadObject(ToRegister(instr->result()), value);
1654 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1655 Register result = ToRegister(instr->result());
1656 Register map = ToRegister(instr->value());
1657 __ EnumLength(result, map);
1661 void LCodeGen::DoElementsKind(LElementsKind* instr) {
1662 Register result = ToRegister(instr->result());
1663 Register input = ToRegister(instr->value());
1665 // Load map into |result|.
1666 __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
1667 // Load the map's "bit field 2" into |result|. We only need the first byte,
1668 // but the following bit field extraction takes care of that anyway.
1669 __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
1670 // Retrieve elements_kind from bit field 2.
1671 __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1675 void LCodeGen::DoValueOf(LValueOf* instr) {
1676 Register input = ToRegister(instr->value());
1677 Register result = ToRegister(instr->result());
1678 Register map = ToRegister(instr->temp());
1681 if (!instr->hydrogen()->value()->IsHeapObject()) {
1682 // If the object is a smi return the object.
1683 __ Move(result, input);
1684 __ JumpIfSmi(input, &done);
1687 // If the object is not a value type, return the object.
1688 __ GetObjectType(input, map, map);
1689 __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
1690 __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
1696 void LCodeGen::DoDateField(LDateField* instr) {
1697 Register object = ToRegister(instr->date());
1698 Register result = ToRegister(instr->result());
1699 Register scratch = ToRegister(instr->temp());
1700 Smi* index = instr->index();
1701 Label runtime, done;
1702 ASSERT(object.is(a0));
1703 ASSERT(result.is(v0));
1704 ASSERT(!scratch.is(scratch0()));
1705 ASSERT(!scratch.is(object));
1707 __ And(at, object, Operand(kSmiTagMask));
1708 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1709 __ GetObjectType(object, scratch, scratch);
1710 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
1712 if (index->value() == 0) {
1713 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1715 if (index->value() < JSDate::kFirstUncachedField) {
1716 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1717 __ li(scratch, Operand(stamp));
1718 __ lw(scratch, MemOperand(scratch));
1719 __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1720 __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1721 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1722 kPointerSize * index->value()));
1726 __ PrepareCallCFunction(2, scratch);
1727 __ li(a1, Operand(index));
1728 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1734 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1735 Register string = ToRegister(instr->string());
1736 LOperand* index_op = instr->index();
1737 Register value = ToRegister(instr->value());
1738 Register scratch = scratch0();
1739 String::Encoding encoding = instr->encoding();
1741 if (FLAG_debug_code) {
1742 __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1743 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1745 __ And(scratch, scratch,
1746 Operand(kStringRepresentationMask | kStringEncodingMask));
1747 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1748 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1749 __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1750 ? one_byte_seq_type : two_byte_seq_type));
1751 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1754 if (index_op->IsConstantOperand()) {
1755 int constant_index = ToInteger32(LConstantOperand::cast(index_op));
1756 if (encoding == String::ONE_BYTE_ENCODING) {
1758 FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
1761 FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
1764 Register index = ToRegister(index_op);
1765 if (encoding == String::ONE_BYTE_ENCODING) {
1766 __ Addu(scratch, string, Operand(index));
1767 __ sb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
1769 __ sll(scratch, index, 1);
1770 __ Addu(scratch, string, scratch);
1771 __ sh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
1777 void LCodeGen::DoThrow(LThrow* instr) {
1778 Register input_reg = EmitLoadRegister(instr->value(), at);
1780 ASSERT(ToRegister(instr->context()).is(cp));
1781 CallRuntime(Runtime::kThrow, 1, instr);
1783 if (FLAG_debug_code) {
1784 __ stop("Unreachable code.");
1789 void LCodeGen::DoAddI(LAddI* instr) {
1790 LOperand* left = instr->left();
1791 LOperand* right = instr->right();
1792 LOperand* result = instr->result();
1793 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1795 if (!can_overflow) {
1796 if (right->IsStackSlot() || right->IsArgument()) {
1797 Register right_reg = EmitLoadRegister(right, at);
1798 __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1800 ASSERT(right->IsRegister() || right->IsConstantOperand());
1801 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1803 } else { // can_overflow.
1804 Register overflow = scratch0();
1805 Register scratch = scratch1();
1806 if (right->IsStackSlot() ||
1807 right->IsArgument() ||
1808 right->IsConstantOperand()) {
1809 Register right_reg = EmitLoadRegister(right, scratch);
1810 __ AdduAndCheckForOverflow(ToRegister(result),
1813 overflow); // Reg at also used as scratch.
1815 ASSERT(right->IsRegister());
1816 // Due to overflow check macros not supporting constant operands,
1817 // handling the IsConstantOperand case was moved to prev if clause.
1818 __ AdduAndCheckForOverflow(ToRegister(result),
1821 overflow); // Reg at also used as scratch.
1823 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1828 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1829 LOperand* left = instr->left();
1830 LOperand* right = instr->right();
1831 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1832 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1833 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1834 Register left_reg = ToRegister(left);
1835 Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1837 : Operand(EmitLoadRegister(right, at));
1838 Register result_reg = ToRegister(instr->result());
1839 Label return_right, done;
1840 if (!result_reg.is(left_reg)) {
1841 __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
1842 __ mov(result_reg, left_reg);
1845 __ Branch(&done, condition, left_reg, right_op);
1846 __ bind(&return_right);
1847 __ Addu(result_reg, zero_reg, right_op);
1850 ASSERT(instr->hydrogen()->representation().IsDouble());
1851 FPURegister left_reg = ToDoubleRegister(left);
1852 FPURegister right_reg = ToDoubleRegister(right);
1853 FPURegister result_reg = ToDoubleRegister(instr->result());
1854 Label check_nan_left, check_zero, return_left, return_right, done;
1855 __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1856 __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1857 __ Branch(&return_right);
1859 __ bind(&check_zero);
1860 // left == right != 0.
1861 __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1862 // At this point, both left and right are either 0 or -0.
1863 if (operation == HMathMinMax::kMathMin) {
1864 __ neg_d(left_reg, left_reg);
1865 __ sub_d(result_reg, left_reg, right_reg);
1866 __ neg_d(result_reg, result_reg);
1868 __ add_d(result_reg, left_reg, right_reg);
1872 __ bind(&check_nan_left);
1874 __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1875 __ bind(&return_right);
1876 if (!right_reg.is(result_reg)) {
1877 __ mov_d(result_reg, right_reg);
1881 __ bind(&return_left);
1882 if (!left_reg.is(result_reg)) {
1883 __ mov_d(result_reg, left_reg);
1890 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1891 DoubleRegister left = ToDoubleRegister(instr->left());
1892 DoubleRegister right = ToDoubleRegister(instr->right());
1893 DoubleRegister result = ToDoubleRegister(instr->result());
1894 switch (instr->op()) {
1896 __ add_d(result, left, right);
1899 __ sub_d(result, left, right);
1902 __ mul_d(result, left, right);
1905 __ div_d(result, left, right);
1908 // Save a0-a3 on the stack.
1909 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1910 __ MultiPush(saved_regs);
1912 __ PrepareCallCFunction(0, 2, scratch0());
1913 __ SetCallCDoubleArguments(left, right);
1915 ExternalReference::double_fp_operation(Token::MOD, isolate()),
1917 // Move the result in the double result register.
1918 __ GetCFunctionDoubleResult(result);
1920 // Restore saved register.
1921 __ MultiPop(saved_regs);
1931 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1932 ASSERT(ToRegister(instr->context()).is(cp));
1933 ASSERT(ToRegister(instr->left()).is(a1));
1934 ASSERT(ToRegister(instr->right()).is(a0));
1935 ASSERT(ToRegister(instr->result()).is(v0));
1937 BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1938 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1939 // Other arch use a nop here, to signal that there is no inlined
1940 // patchable code. Mips does not need the nop, since our marker
1941 // instruction (andi zero_reg) will never be used in normal code.
1945 template<class InstrType>
1946 void LCodeGen::EmitBranch(InstrType instr,
1947 Condition condition,
1949 const Operand& src2) {
1950 int left_block = instr->TrueDestination(chunk_);
1951 int right_block = instr->FalseDestination(chunk_);
1953 int next_block = GetNextEmittedBlock();
1954 if (right_block == left_block || condition == al) {
1955 EmitGoto(left_block);
1956 } else if (left_block == next_block) {
1957 __ Branch(chunk_->GetAssemblyLabel(right_block),
1958 NegateCondition(condition), src1, src2);
1959 } else if (right_block == next_block) {
1960 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1962 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1963 __ Branch(chunk_->GetAssemblyLabel(right_block));
1968 template<class InstrType>
1969 void LCodeGen::EmitBranchF(InstrType instr,
1970 Condition condition,
1973 int right_block = instr->FalseDestination(chunk_);
1974 int left_block = instr->TrueDestination(chunk_);
1976 int next_block = GetNextEmittedBlock();
1977 if (right_block == left_block) {
1978 EmitGoto(left_block);
1979 } else if (left_block == next_block) {
1980 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1981 NegateCondition(condition), src1, src2);
1982 } else if (right_block == next_block) {
1983 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1984 condition, src1, src2);
1986 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1987 condition, src1, src2);
1988 __ Branch(chunk_->GetAssemblyLabel(right_block));
1993 template<class InstrType>
1994 void LCodeGen::EmitFalseBranchF(InstrType instr,
1995 Condition condition,
1998 int false_block = instr->FalseDestination(chunk_);
1999 __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2000 condition, src1, src2);
2004 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2005 __ stop("LDebugBreak");
2009 void LCodeGen::DoBranch(LBranch* instr) {
2010 Representation r = instr->hydrogen()->value()->representation();
2011 if (r.IsInteger32() || r.IsSmi()) {
2012 ASSERT(!info()->IsStub());
2013 Register reg = ToRegister(instr->value());
2014 EmitBranch(instr, ne, reg, Operand(zero_reg));
2015 } else if (r.IsDouble()) {
2016 ASSERT(!info()->IsStub());
2017 DoubleRegister reg = ToDoubleRegister(instr->value());
2018 // Test the double value. Zero and NaN are false.
2019 EmitBranchF(instr, nue, reg, kDoubleRegZero);
2021 ASSERT(r.IsTagged());
2022 Register reg = ToRegister(instr->value());
2023 HType type = instr->hydrogen()->value()->type();
2024 if (type.IsBoolean()) {
2025 ASSERT(!info()->IsStub());
2026 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2027 EmitBranch(instr, eq, reg, Operand(at));
2028 } else if (type.IsSmi()) {
2029 ASSERT(!info()->IsStub());
2030 EmitBranch(instr, ne, reg, Operand(zero_reg));
2031 } else if (type.IsJSArray()) {
2032 ASSERT(!info()->IsStub());
2033 EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2034 } else if (type.IsHeapNumber()) {
2035 ASSERT(!info()->IsStub());
2036 DoubleRegister dbl_scratch = double_scratch0();
2037 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2038 // Test the double value. Zero and NaN are false.
2039 EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
2040 } else if (type.IsString()) {
2041 ASSERT(!info()->IsStub());
2042 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2043 EmitBranch(instr, ne, at, Operand(zero_reg));
2045 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2046 // Avoid deopts in the case where we've never executed this path before.
2047 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2049 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2050 // undefined -> false.
2051 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2052 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2054 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2055 // Boolean -> its value.
2056 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2057 __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2058 __ LoadRoot(at, Heap::kFalseValueRootIndex);
2059 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2061 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2063 __ LoadRoot(at, Heap::kNullValueRootIndex);
2064 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2067 if (expected.Contains(ToBooleanStub::SMI)) {
2068 // Smis: 0 -> false, all other -> true.
2069 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2070 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2071 } else if (expected.NeedsMap()) {
2072 // If we need a map later and have a Smi -> deopt.
2073 __ And(at, reg, Operand(kSmiTagMask));
2074 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
2077 const Register map = scratch0();
2078 if (expected.NeedsMap()) {
2079 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2080 if (expected.CanBeUndetectable()) {
2081 // Undetectable -> false.
2082 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2083 __ And(at, at, Operand(1 << Map::kIsUndetectable));
2084 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2088 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2089 // spec object -> true.
2090 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2091 __ Branch(instr->TrueLabel(chunk_),
2092 ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2095 if (expected.Contains(ToBooleanStub::STRING)) {
2096 // String value -> false iff empty.
2098 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2099 __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2100 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2101 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2102 __ Branch(instr->FalseLabel(chunk_));
2103 __ bind(¬_string);
2106 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2107 // Symbol value -> true.
2108 const Register scratch = scratch1();
2109 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2110 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2113 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2114 // heap number -> false iff +0, -0, or NaN.
2115 DoubleRegister dbl_scratch = double_scratch0();
2116 Label not_heap_number;
2117 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2118 __ Branch(¬_heap_number, ne, map, Operand(at));
2119 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2120 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2121 ne, dbl_scratch, kDoubleRegZero);
2122 // Falls through if dbl_scratch == 0.
2123 __ Branch(instr->FalseLabel(chunk_));
2124 __ bind(¬_heap_number);
2127 if (!expected.IsGeneric()) {
2128 // We've seen something for the first time -> deopt.
2129 // This can only happen if we are not generic already.
2130 DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
2137 void LCodeGen::EmitGoto(int block) {
2138 if (!IsNextEmittedBlock(block)) {
2139 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2144 void LCodeGen::DoGoto(LGoto* instr) {
2145 EmitGoto(instr->block_id());
2149 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2150 Condition cond = kNoCondition;
2153 case Token::EQ_STRICT:
2157 case Token::NE_STRICT:
2161 cond = is_unsigned ? lo : lt;
2164 cond = is_unsigned ? hi : gt;
2167 cond = is_unsigned ? ls : le;
2170 cond = is_unsigned ? hs : ge;
2173 case Token::INSTANCEOF:
2181 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2182 LOperand* left = instr->left();
2183 LOperand* right = instr->right();
2184 Condition cond = TokenToCondition(instr->op(), false);
2186 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2187 // We can statically evaluate the comparison.
2188 double left_val = ToDouble(LConstantOperand::cast(left));
2189 double right_val = ToDouble(LConstantOperand::cast(right));
2190 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2191 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2192 EmitGoto(next_block);
2194 if (instr->is_double()) {
2195 // Compare left and right as doubles and load the
2196 // resulting flags into the normal status register.
2197 FPURegister left_reg = ToDoubleRegister(left);
2198 FPURegister right_reg = ToDoubleRegister(right);
2200 // If a NaN is involved, i.e. the result is unordered,
2201 // jump to false block label.
2202 __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2203 left_reg, right_reg);
2205 EmitBranchF(instr, cond, left_reg, right_reg);
2208 Operand cmp_right = Operand(0);
2210 if (right->IsConstantOperand()) {
2211 int32_t value = ToInteger32(LConstantOperand::cast(right));
2212 if (instr->hydrogen_value()->representation().IsSmi()) {
2213 cmp_left = ToRegister(left);
2214 cmp_right = Operand(Smi::FromInt(value));
2216 cmp_left = ToRegister(left);
2217 cmp_right = Operand(value);
2219 } else if (left->IsConstantOperand()) {
2220 int32_t value = ToInteger32(LConstantOperand::cast(left));
2221 if (instr->hydrogen_value()->representation().IsSmi()) {
2222 cmp_left = ToRegister(right);
2223 cmp_right = Operand(Smi::FromInt(value));
2225 cmp_left = ToRegister(right);
2226 cmp_right = Operand(value);
2228 // We transposed the operands. Reverse the condition.
2229 cond = ReverseCondition(cond);
2231 cmp_left = ToRegister(left);
2232 cmp_right = Operand(ToRegister(right));
2235 EmitBranch(instr, cond, cmp_left, cmp_right);
2241 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2242 Register left = ToRegister(instr->left());
2243 Register right = ToRegister(instr->right());
2245 EmitBranch(instr, eq, left, Operand(right));
2249 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2250 if (instr->hydrogen()->representation().IsTagged()) {
2251 Register input_reg = ToRegister(instr->object());
2252 __ li(at, Operand(factory()->the_hole_value()));
2253 EmitBranch(instr, eq, input_reg, Operand(at));
2257 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2258 EmitFalseBranchF(instr, eq, input_reg, input_reg);
2260 Register scratch = scratch0();
2261 __ FmoveHigh(scratch, input_reg);
2262 EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2266 Condition LCodeGen::EmitIsObject(Register input,
2269 Label* is_not_object,
2271 __ JumpIfSmi(input, is_not_object);
2273 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2274 __ Branch(is_object, eq, input, Operand(temp2));
2277 __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2278 // Undetectable objects behave like undefined.
2279 __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2280 __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
2281 __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
2283 // Load instance type and check that it is in object type range.
2284 __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2285 __ Branch(is_not_object,
2286 lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2292 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2293 Register reg = ToRegister(instr->value());
2294 Register temp1 = ToRegister(instr->temp());
2295 Register temp2 = scratch0();
2297 Condition true_cond =
2298 EmitIsObject(reg, temp1, temp2,
2299 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2301 EmitBranch(instr, true_cond, temp2,
2302 Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2306 Condition LCodeGen::EmitIsString(Register input,
2308 Label* is_not_string,
2309 SmiCheck check_needed = INLINE_SMI_CHECK) {
2310 if (check_needed == INLINE_SMI_CHECK) {
2311 __ JumpIfSmi(input, is_not_string);
2313 __ GetObjectType(input, temp1, temp1);
2319 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2320 Register reg = ToRegister(instr->value());
2321 Register temp1 = ToRegister(instr->temp());
2323 SmiCheck check_needed =
2324 instr->hydrogen()->value()->IsHeapObject()
2325 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2326 Condition true_cond =
2327 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2329 EmitBranch(instr, true_cond, temp1,
2330 Operand(FIRST_NONSTRING_TYPE));
2334 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2335 Register input_reg = EmitLoadRegister(instr->value(), at);
2336 __ And(at, input_reg, kSmiTagMask);
2337 EmitBranch(instr, eq, at, Operand(zero_reg));
2341 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2342 Register input = ToRegister(instr->value());
2343 Register temp = ToRegister(instr->temp());
2345 if (!instr->hydrogen()->value()->IsHeapObject()) {
2346 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2348 __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2349 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2350 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2351 EmitBranch(instr, ne, at, Operand(zero_reg));
2355 static Condition ComputeCompareCondition(Token::Value op) {
2357 case Token::EQ_STRICT:
2370 return kNoCondition;
2375 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2376 ASSERT(ToRegister(instr->context()).is(cp));
2377 Token::Value op = instr->op();
2379 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2380 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2382 Condition condition = ComputeCompareCondition(op);
2384 EmitBranch(instr, condition, v0, Operand(zero_reg));
2388 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2389 InstanceType from = instr->from();
2390 InstanceType to = instr->to();
2391 if (from == FIRST_TYPE) return to;
2392 ASSERT(from == to || to == LAST_TYPE);
2397 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2398 InstanceType from = instr->from();
2399 InstanceType to = instr->to();
2400 if (from == to) return eq;
2401 if (to == LAST_TYPE) return hs;
2402 if (from == FIRST_TYPE) return ls;
2408 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2409 Register scratch = scratch0();
2410 Register input = ToRegister(instr->value());
2412 if (!instr->hydrogen()->value()->IsHeapObject()) {
2413 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2416 __ GetObjectType(input, scratch, scratch);
2418 BranchCondition(instr->hydrogen()),
2420 Operand(TestType(instr->hydrogen())));
2424 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2425 Register input = ToRegister(instr->value());
2426 Register result = ToRegister(instr->result());
2428 __ AssertString(input);
2430 __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2431 __ IndexFromHash(result, result);
2435 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2436 LHasCachedArrayIndexAndBranch* instr) {
2437 Register input = ToRegister(instr->value());
2438 Register scratch = scratch0();
2441 FieldMemOperand(input, String::kHashFieldOffset));
2442 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2443 EmitBranch(instr, eq, at, Operand(zero_reg));
2447 // Branches to a label or falls through with the answer in flags. Trashes
2448 // the temp registers, but not the input.
2449 void LCodeGen::EmitClassOfTest(Label* is_true,
2451 Handle<String>class_name,
2455 ASSERT(!input.is(temp));
2456 ASSERT(!input.is(temp2));
2457 ASSERT(!temp.is(temp2));
2459 __ JumpIfSmi(input, is_false);
2461 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2462 // Assuming the following assertions, we can use the same compares to test
2463 // for both being a function type and being in the object type range.
2464 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2465 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2466 FIRST_SPEC_OBJECT_TYPE + 1);
2467 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2468 LAST_SPEC_OBJECT_TYPE - 1);
2469 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2471 __ GetObjectType(input, temp, temp2);
2472 __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2473 __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2474 __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2476 // Faster code path to avoid two compares: subtract lower bound from the
2477 // actual type and do a signed compare with the width of the type range.
2478 __ GetObjectType(input, temp, temp2);
2479 __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2480 __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2481 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2484 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2485 // Check if the constructor in the map is a function.
2486 __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2488 // Objects with a non-function constructor have class 'Object'.
2489 __ GetObjectType(temp, temp2, temp2);
2490 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2491 __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
2493 __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
2496 // temp now contains the constructor function. Grab the
2497 // instance class name from there.
2498 __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2499 __ lw(temp, FieldMemOperand(temp,
2500 SharedFunctionInfo::kInstanceClassNameOffset));
2501 // The class name we are testing against is internalized since it's a literal.
2502 // The name in the constructor is internalized because of the way the context
2503 // is booted. This routine isn't expected to work for random API-created
2504 // classes and it doesn't have to because you can't access it with natives
2505 // syntax. Since both sides are internalized it is sufficient to use an
2506 // identity comparison.
2508 // End with the address of this class_name instance in temp register.
2509 // On MIPS, the caller must do the comparison with Handle<String>class_name.
2513 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2514 Register input = ToRegister(instr->value());
2515 Register temp = scratch0();
2516 Register temp2 = ToRegister(instr->temp());
2517 Handle<String> class_name = instr->hydrogen()->class_name();
2519 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2520 class_name, input, temp, temp2);
2522 EmitBranch(instr, eq, temp, Operand(class_name));
2526 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2527 Register reg = ToRegister(instr->value());
2528 Register temp = ToRegister(instr->temp());
2530 __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2531 EmitBranch(instr, eq, temp, Operand(instr->map()));
2535 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2536 ASSERT(ToRegister(instr->context()).is(cp));
2537 Label true_label, done;
2538 ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
2539 ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
2540 Register result = ToRegister(instr->result());
2541 ASSERT(result.is(v0));
2543 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2544 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2546 __ Branch(&true_label, eq, result, Operand(zero_reg));
2547 __ li(result, Operand(factory()->false_value()));
2549 __ bind(&true_label);
2550 __ li(result, Operand(factory()->true_value()));
2555 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2556 class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2558 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2559 LInstanceOfKnownGlobal* instr)
2560 : LDeferredCode(codegen), instr_(instr) { }
2561 virtual void Generate() V8_OVERRIDE {
2562 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2564 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2565 Label* map_check() { return &map_check_; }
2568 LInstanceOfKnownGlobal* instr_;
2572 DeferredInstanceOfKnownGlobal* deferred;
2573 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2575 Label done, false_result;
2576 Register object = ToRegister(instr->value());
2577 Register temp = ToRegister(instr->temp());
2578 Register result = ToRegister(instr->result());
2580 ASSERT(object.is(a0));
2581 ASSERT(result.is(v0));
2583 // A Smi is not instance of anything.
2584 __ JumpIfSmi(object, &false_result);
2586 // This is the inlined call site instanceof cache. The two occurences of the
2587 // hole value will be patched to the last map/result pair generated by the
2590 Register map = temp;
2591 __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2593 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2594 __ bind(deferred->map_check()); // Label for calculating code patching.
2595 // We use Factory::the_hole_value() on purpose instead of loading from the
2596 // root array to force relocation to be able to later patch with
2598 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2599 __ li(at, Operand(Handle<Object>(cell)));
2600 __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
2601 __ Branch(&cache_miss, ne, map, Operand(at));
2602 // We use Factory::the_hole_value() on purpose instead of loading from the
2603 // root array to force relocation to be able to later patch
2604 // with true or false.
2605 __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2608 // The inlined call site cache did not match. Check null and string before
2609 // calling the deferred code.
2610 __ bind(&cache_miss);
2611 // Null is not instance of anything.
2612 __ LoadRoot(temp, Heap::kNullValueRootIndex);
2613 __ Branch(&false_result, eq, object, Operand(temp));
2615 // String values is not instance of anything.
2616 Condition cc = __ IsObjectStringType(object, temp, temp);
2617 __ Branch(&false_result, cc, temp, Operand(zero_reg));
2619 // Go to the deferred code.
2620 __ Branch(deferred->entry());
2622 __ bind(&false_result);
2623 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2625 // Here result has either true or false. Deferred code also produces true or
2627 __ bind(deferred->exit());
2632 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2634 Register result = ToRegister(instr->result());
2635 ASSERT(result.is(v0));
2637 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2638 flags = static_cast<InstanceofStub::Flags>(
2639 flags | InstanceofStub::kArgsInRegisters);
2640 flags = static_cast<InstanceofStub::Flags>(
2641 flags | InstanceofStub::kCallSiteInlineCheck);
2642 flags = static_cast<InstanceofStub::Flags>(
2643 flags | InstanceofStub::kReturnTrueFalseObject);
2644 InstanceofStub stub(flags);
2646 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2647 LoadContextFromDeferred(instr->context());
2649 // Get the temp register reserved by the instruction. This needs to be t0 as
2650 // its slot of the pushing of safepoint registers is used to communicate the
2651 // offset to the location of the map check.
2652 Register temp = ToRegister(instr->temp());
2653 ASSERT(temp.is(t0));
2654 __ LoadHeapObject(InstanceofStub::right(), instr->function());
2655 static const int kAdditionalDelta = 7;
2656 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2657 Label before_push_delta;
2658 __ bind(&before_push_delta);
2660 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2661 __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2662 __ StoreToSafepointRegisterSlot(temp, temp);
2664 CallCodeGeneric(stub.GetCode(isolate()),
2665 RelocInfo::CODE_TARGET,
2667 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2668 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2669 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2670 // Put the result value into the result register slot and
2671 // restore all registers.
2672 __ StoreToSafepointRegisterSlot(result, result);
2676 void LCodeGen::DoCmpT(LCmpT* instr) {
2677 ASSERT(ToRegister(instr->context()).is(cp));
2678 Token::Value op = instr->op();
2680 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2681 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2682 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2684 Condition condition = ComputeCompareCondition(op);
2685 // A minor optimization that relies on LoadRoot always emitting one
2687 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2689 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2691 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2692 ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
2693 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2698 void LCodeGen::DoReturn(LReturn* instr) {
2699 if (FLAG_trace && info()->IsOptimizing()) {
2700 // Push the return value on the stack as the parameter.
2701 // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2702 // managed by the register allocator and tearing down the frame, it's
2703 // safe to write to the context register.
2705 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2706 __ CallRuntime(Runtime::kTraceExit, 1);
2708 if (info()->saves_caller_doubles()) {
2709 ASSERT(NeedsEagerFrame());
2710 BitVector* doubles = chunk()->allocated_double_registers();
2711 BitVector::Iterator save_iterator(doubles);
2713 while (!save_iterator.Done()) {
2714 __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
2715 MemOperand(sp, count * kDoubleSize));
2716 save_iterator.Advance();
2720 int no_frame_start = -1;
2721 if (NeedsEagerFrame()) {
2723 no_frame_start = masm_->pc_offset();
2726 if (instr->has_constant_parameter_count()) {
2727 int parameter_count = ToInteger32(instr->constant_parameter_count());
2728 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2729 if (sp_delta != 0) {
2730 __ Addu(sp, sp, Operand(sp_delta));
2733 Register reg = ToRegister(instr->parameter_count());
2734 // The argument count parameter is a smi
2736 __ sll(at, reg, kPointerSizeLog2);
2737 __ Addu(sp, sp, at);
2742 if (no_frame_start != -1) {
2743 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2748 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2749 Register result = ToRegister(instr->result());
2750 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2751 __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
2752 if (instr->hydrogen()->RequiresHoleCheck()) {
2753 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2754 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2759 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2760 ASSERT(ToRegister(instr->context()).is(cp));
2761 ASSERT(ToRegister(instr->global_object()).is(a0));
2762 ASSERT(ToRegister(instr->result()).is(v0));
2764 __ li(a2, Operand(instr->name()));
2765 RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2766 : RelocInfo::CODE_TARGET_CONTEXT;
2767 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2768 CallCode(ic, mode, instr);
2772 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2773 Register value = ToRegister(instr->value());
2774 Register cell = scratch0();
2777 __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2779 // If the cell we are storing to contains the hole it could have
2780 // been deleted from the property dictionary. In that case, we need
2781 // to update the property details in the property dictionary to mark
2782 // it as no longer deleted.
2783 if (instr->hydrogen()->RequiresHoleCheck()) {
2784 // We use a temp to check the payload.
2785 Register payload = ToRegister(instr->temp());
2786 __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
2787 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2788 DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
2792 __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
2793 // Cells are always rescanned, so no write barrier here.
2797 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2798 ASSERT(ToRegister(instr->context()).is(cp));
2799 ASSERT(ToRegister(instr->global_object()).is(a1));
2800 ASSERT(ToRegister(instr->value()).is(a0));
2802 __ li(a2, Operand(instr->name()));
2803 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2804 ? isolate()->builtins()->StoreIC_Initialize_Strict()
2805 : isolate()->builtins()->StoreIC_Initialize();
2806 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2810 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2811 Register context = ToRegister(instr->context());
2812 Register result = ToRegister(instr->result());
2814 __ lw(result, ContextOperand(context, instr->slot_index()));
2815 if (instr->hydrogen()->RequiresHoleCheck()) {
2816 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2818 if (instr->hydrogen()->DeoptimizesOnHole()) {
2819 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2822 __ Branch(&is_not_hole, ne, result, Operand(at));
2823 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2824 __ bind(&is_not_hole);
2830 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2831 Register context = ToRegister(instr->context());
2832 Register value = ToRegister(instr->value());
2833 Register scratch = scratch0();
2834 MemOperand target = ContextOperand(context, instr->slot_index());
2836 Label skip_assignment;
2838 if (instr->hydrogen()->RequiresHoleCheck()) {
2839 __ lw(scratch, target);
2840 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2842 if (instr->hydrogen()->DeoptimizesOnHole()) {
2843 DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
2845 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2849 __ sw(value, target);
2850 if (instr->hydrogen()->NeedsWriteBarrier()) {
2851 SmiCheck check_needed =
2852 instr->hydrogen()->value()->IsHeapObject()
2853 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2854 __ RecordWriteContextSlot(context,
2860 EMIT_REMEMBERED_SET,
2864 __ bind(&skip_assignment);
2868 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2869 HObjectAccess access = instr->hydrogen()->access();
2870 int offset = access.offset();
2871 Register object = ToRegister(instr->object());
2873 if (access.IsExternalMemory()) {
2874 Register result = ToRegister(instr->result());
2875 MemOperand operand = MemOperand(object, offset);
2876 if (access.representation().IsByte()) {
2877 __ lb(result, operand);
2879 __ lw(result, operand);
2884 if (instr->hydrogen()->representation().IsDouble()) {
2885 DoubleRegister result = ToDoubleRegister(instr->result());
2886 __ ldc1(result, FieldMemOperand(object, offset));
2890 Register result = ToRegister(instr->result());
2891 if (!access.IsInobject()) {
2892 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2895 MemOperand operand = FieldMemOperand(object, offset);
2896 if (access.representation().IsByte()) {
2897 __ lb(result, operand);
2899 __ lw(result, operand);
2904 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2905 ASSERT(ToRegister(instr->context()).is(cp));
2906 ASSERT(ToRegister(instr->object()).is(a0));
2907 ASSERT(ToRegister(instr->result()).is(v0));
2909 // Name is always in a2.
2910 __ li(a2, Operand(instr->name()));
2911 Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2912 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2916 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2917 Register scratch = scratch0();
2918 Register function = ToRegister(instr->function());
2919 Register result = ToRegister(instr->result());
2921 // Check that the function really is a function. Load map into the
2923 __ GetObjectType(function, result, scratch);
2924 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2926 // Make sure that the function has an instance prototype.
2928 __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2929 __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2930 __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2932 // Get the prototype or initial map from the function.
2934 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2936 // Check that the function has a prototype or an initial map.
2937 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2938 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2940 // If the function does not have an initial map, we're done.
2942 __ GetObjectType(result, scratch, scratch);
2943 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2945 // Get the prototype from the initial map.
2946 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2949 // Non-instance prototype: Fetch prototype from constructor field
2951 __ bind(&non_instance);
2952 __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2959 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2960 Register result = ToRegister(instr->result());
2961 __ LoadRoot(result, instr->index());
2965 void LCodeGen::DoLoadExternalArrayPointer(
2966 LLoadExternalArrayPointer* instr) {
2967 Register to_reg = ToRegister(instr->result());
2968 Register from_reg = ToRegister(instr->object());
2969 __ lw(to_reg, FieldMemOperand(from_reg,
2970 ExternalArray::kExternalPointerOffset));
2974 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2975 Register arguments = ToRegister(instr->arguments());
2976 Register result = ToRegister(instr->result());
2977 if (instr->length()->IsConstantOperand() &&
2978 instr->index()->IsConstantOperand()) {
2979 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2980 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2981 int index = (const_length - const_index) + 1;
2982 __ lw(result, MemOperand(arguments, index * kPointerSize));
2984 Register length = ToRegister(instr->length());
2985 Register index = ToRegister(instr->index());
2986 // There are two words between the frame pointer and the last argument.
2987 // Subtracting from length accounts for one of them, add one more.
2988 __ subu(length, length, index);
2989 __ Addu(length, length, Operand(1));
2990 __ sll(length, length, kPointerSizeLog2);
2991 __ Addu(at, arguments, Operand(length));
2992 __ lw(result, MemOperand(at, 0));
2997 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2998 Register external_pointer = ToRegister(instr->elements());
2999 Register key = no_reg;
3000 ElementsKind elements_kind = instr->elements_kind();
3001 bool key_is_constant = instr->key()->IsConstantOperand();
3002 int constant_key = 0;
3003 if (key_is_constant) {
3004 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3005 if (constant_key & 0xF0000000) {
3006 Abort(kArrayIndexConstantValueTooBig);
3009 key = ToRegister(instr->key());
3011 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3012 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3013 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3014 int additional_offset = instr->additional_index() << element_size_shift;
3016 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3017 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3018 FPURegister result = ToDoubleRegister(instr->result());
3019 if (key_is_constant) {
3020 __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
3022 __ sll(scratch0(), key, shift_size);
3023 __ Addu(scratch0(), scratch0(), external_pointer);
3025 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3026 __ lwc1(result, MemOperand(scratch0(), additional_offset));
3027 __ cvt_d_s(result, result);
3028 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3029 __ ldc1(result, MemOperand(scratch0(), additional_offset));
3032 Register result = ToRegister(instr->result());
3033 MemOperand mem_operand = PrepareKeyedOperand(
3034 key, external_pointer, key_is_constant, constant_key,
3035 element_size_shift, shift_size,
3036 instr->additional_index(), additional_offset);
3037 switch (elements_kind) {
3038 case EXTERNAL_BYTE_ELEMENTS:
3039 __ lb(result, mem_operand);
3041 case EXTERNAL_PIXEL_ELEMENTS:
3042 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3043 __ lbu(result, mem_operand);
3045 case EXTERNAL_SHORT_ELEMENTS:
3046 __ lh(result, mem_operand);
3048 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3049 __ lhu(result, mem_operand);
3051 case EXTERNAL_INT_ELEMENTS:
3052 __ lw(result, mem_operand);
3054 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3055 __ lw(result, mem_operand);
3056 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3057 DeoptimizeIf(Ugreater_equal, instr->environment(),
3058 result, Operand(0x80000000));
3061 case EXTERNAL_FLOAT_ELEMENTS:
3062 case EXTERNAL_DOUBLE_ELEMENTS:
3063 case FAST_DOUBLE_ELEMENTS:
3065 case FAST_SMI_ELEMENTS:
3066 case FAST_HOLEY_DOUBLE_ELEMENTS:
3067 case FAST_HOLEY_ELEMENTS:
3068 case FAST_HOLEY_SMI_ELEMENTS:
3069 case DICTIONARY_ELEMENTS:
3070 case NON_STRICT_ARGUMENTS_ELEMENTS:
3078 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3079 Register elements = ToRegister(instr->elements());
3080 bool key_is_constant = instr->key()->IsConstantOperand();
3081 Register key = no_reg;
3082 DoubleRegister result = ToDoubleRegister(instr->result());
3083 Register scratch = scratch0();
3085 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3088 FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3089 (instr->additional_index() << element_size_shift);
3090 if (key_is_constant) {
3091 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3092 if (constant_key & 0xF0000000) {
3093 Abort(kArrayIndexConstantValueTooBig);
3095 base_offset += constant_key << element_size_shift;
3097 __ Addu(scratch, elements, Operand(base_offset));
3099 if (!key_is_constant) {
3100 key = ToRegister(instr->key());
3101 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3102 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3103 __ sll(at, key, shift_size);
3104 __ Addu(scratch, scratch, at);
3107 __ ldc1(result, MemOperand(scratch));
3109 if (instr->hydrogen()->RequiresHoleCheck()) {
3110 __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3111 DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
3116 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3117 Register elements = ToRegister(instr->elements());
3118 Register result = ToRegister(instr->result());
3119 Register scratch = scratch0();
3120 Register store_base = scratch;
3123 if (instr->key()->IsConstantOperand()) {
3124 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3125 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3126 instr->additional_index());
3127 store_base = elements;
3129 Register key = ToRegister(instr->key());
3130 // Even though the HLoadKeyed instruction forces the input
3131 // representation for the key to be an integer, the input gets replaced
3132 // during bound check elimination with the index argument to the bounds
3133 // check, which can be tagged, so that case must be handled here, too.
3134 if (instr->hydrogen()->key()->representation().IsSmi()) {
3135 __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
3136 __ addu(scratch, elements, scratch);
3138 __ sll(scratch, key, kPointerSizeLog2);
3139 __ addu(scratch, elements, scratch);
3141 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3143 __ lw(result, FieldMemOperand(store_base, offset));
3145 // Check for the hole value.
3146 if (instr->hydrogen()->RequiresHoleCheck()) {
3147 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3148 __ And(scratch, result, Operand(kSmiTagMask));
3149 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3151 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3152 DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
3158 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3159 if (instr->is_external()) {
3160 DoLoadKeyedExternalArray(instr);
3161 } else if (instr->hydrogen()->representation().IsDouble()) {
3162 DoLoadKeyedFixedDoubleArray(instr);
3164 DoLoadKeyedFixedArray(instr);
3169 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3171 bool key_is_constant,
3175 int additional_index,
3176 int additional_offset) {
3177 if (additional_index != 0 && !key_is_constant) {
3178 additional_index *= 1 << (element_size - shift_size);
3179 __ Addu(scratch0(), key, Operand(additional_index));
3182 if (key_is_constant) {
3183 return MemOperand(base,
3184 (constant_key << element_size) + additional_offset);
3187 if (additional_index == 0) {
3188 if (shift_size >= 0) {
3189 __ sll(scratch0(), key, shift_size);
3190 __ Addu(scratch0(), base, scratch0());
3191 return MemOperand(scratch0());
3193 ASSERT_EQ(-1, shift_size);
3194 __ srl(scratch0(), key, 1);
3195 __ Addu(scratch0(), base, scratch0());
3196 return MemOperand(scratch0());
3200 if (shift_size >= 0) {
3201 __ sll(scratch0(), scratch0(), shift_size);
3202 __ Addu(scratch0(), base, scratch0());
3203 return MemOperand(scratch0());
3205 ASSERT_EQ(-1, shift_size);
3206 __ srl(scratch0(), scratch0(), 1);
3207 __ Addu(scratch0(), base, scratch0());
3208 return MemOperand(scratch0());
3213 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3214 ASSERT(ToRegister(instr->context()).is(cp));
3215 ASSERT(ToRegister(instr->object()).is(a1));
3216 ASSERT(ToRegister(instr->key()).is(a0));
3218 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3219 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3223 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3224 Register scratch = scratch0();
3225 Register temp = scratch1();
3226 Register result = ToRegister(instr->result());
3228 if (instr->hydrogen()->from_inlined()) {
3229 __ Subu(result, sp, 2 * kPointerSize);
3231 // Check if the calling frame is an arguments adaptor frame.
3232 Label done, adapted;
3233 __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3234 __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3235 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3237 // Result is the frame pointer for the frame if not adapted and for the real
3238 // frame below the adaptor frame if adapted.
3239 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
3240 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
3245 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3246 Register elem = ToRegister(instr->elements());
3247 Register result = ToRegister(instr->result());
3251 // If no arguments adaptor frame the number of arguments is fixed.
3252 __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
3253 __ Branch(&done, eq, fp, Operand(elem));
3255 // Arguments adaptor frame present. Get argument length from there.
3256 __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3258 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3259 __ SmiUntag(result);
3261 // Argument length is in result register.
3266 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3267 Register receiver = ToRegister(instr->receiver());
3268 Register function = ToRegister(instr->function());
3269 Register scratch = scratch0();
3271 // If the receiver is null or undefined, we have to pass the global
3272 // object as a receiver to normal functions. Values have to be
3273 // passed unchanged to builtins and strict-mode functions.
3274 Label global_object, receiver_ok;
3276 // Do not transform the receiver to object for strict mode
3279 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3281 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3283 // Do not transform the receiver to object for builtins.
3284 int32_t strict_mode_function_mask =
3285 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3286 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3287 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3288 __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
3290 // Normal function. Replace undefined or null with global receiver.
3291 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3292 __ Branch(&global_object, eq, receiver, Operand(scratch));
3293 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3294 __ Branch(&global_object, eq, receiver, Operand(scratch));
3296 // Deoptimize if the receiver is not a JS object.
3297 __ And(scratch, receiver, Operand(kSmiTagMask));
3298 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
3300 __ GetObjectType(receiver, scratch, scratch);
3301 DeoptimizeIf(lt, instr->environment(),
3302 scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
3303 __ Branch(&receiver_ok);
3305 __ bind(&global_object);
3306 __ lw(receiver, GlobalObjectOperand());
3308 FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
3309 __ bind(&receiver_ok);
3313 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3314 Register receiver = ToRegister(instr->receiver());
3315 Register function = ToRegister(instr->function());
3316 Register length = ToRegister(instr->length());
3317 Register elements = ToRegister(instr->elements());
3318 Register scratch = scratch0();
3319 ASSERT(receiver.is(a0)); // Used for parameter count.
3320 ASSERT(function.is(a1)); // Required by InvokeFunction.
3321 ASSERT(ToRegister(instr->result()).is(v0));
3323 // Copy the arguments to this function possibly from the
3324 // adaptor frame below it.
3325 const uint32_t kArgumentsLimit = 1 * KB;
3326 DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
3328 // Push the receiver and use the register to keep the original
3329 // number of arguments.
3331 __ Move(receiver, length);
3332 // The arguments are at a one pointer size offset from elements.
3333 __ Addu(elements, elements, Operand(1 * kPointerSize));
3335 // Loop through the arguments pushing them onto the execution
3338 // length is a small non-negative integer, due to the test above.
3339 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3340 __ sll(scratch, length, 2);
3342 __ Addu(scratch, elements, scratch);
3343 __ lw(scratch, MemOperand(scratch));
3345 __ Subu(length, length, Operand(1));
3346 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3347 __ sll(scratch, length, 2);
3350 ASSERT(instr->HasPointerMap());
3351 LPointerMap* pointers = instr->pointer_map();
3352 SafepointGenerator safepoint_generator(
3353 this, pointers, Safepoint::kLazyDeopt);
3354 // The number of arguments is stored in receiver which is a0, as expected
3355 // by InvokeFunction.
3356 ParameterCount actual(receiver);
3357 __ InvokeFunction(function, actual, CALL_FUNCTION,
3358 safepoint_generator, CALL_AS_METHOD);
3362 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3363 LOperand* argument = instr->value();
3364 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3365 Abort(kDoPushArgumentNotImplementedForDoubleType);
3367 Register argument_reg = EmitLoadRegister(argument, at);
3368 __ push(argument_reg);
3373 void LCodeGen::DoDrop(LDrop* instr) {
3374 __ Drop(instr->count());
3378 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3379 Register result = ToRegister(instr->result());
3380 __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3384 void LCodeGen::DoContext(LContext* instr) {
3385 // If there is a non-return use, the context must be moved to a register.
3386 Register result = ToRegister(instr->result());
3387 if (info()->IsOptimizing()) {
3388 __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3390 // If there is no frame, the context must be in cp.
3391 ASSERT(result.is(cp));
3396 void LCodeGen::DoOuterContext(LOuterContext* instr) {
3397 Register context = ToRegister(instr->context());
3398 Register result = ToRegister(instr->result());
3400 MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3404 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3405 ASSERT(ToRegister(instr->context()).is(cp));
3406 __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3407 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3408 // The context is the first argument.
3409 __ Push(cp, scratch0(), scratch1());
3410 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3414 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3415 Register context = ToRegister(instr->context());
3416 Register result = ToRegister(instr->result());
3417 __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
3421 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3422 Register global = ToRegister(instr->global_object());
3423 Register result = ToRegister(instr->result());
3424 __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
3428 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3429 int formal_parameter_count,
3431 LInstruction* instr,
3434 bool dont_adapt_arguments =
3435 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3436 bool can_invoke_directly =
3437 dont_adapt_arguments || formal_parameter_count == arity;
3439 LPointerMap* pointers = instr->pointer_map();
3441 if (can_invoke_directly) {
3442 if (a1_state == A1_UNINITIALIZED) {
3443 __ LoadHeapObject(a1, function);
3447 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3449 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3450 // is available to write to at this point.
3451 if (dont_adapt_arguments) {
3452 __ li(a0, Operand(arity));
3456 __ SetCallKind(t1, call_kind);
3457 __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3460 // Set up deoptimization.
3461 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3463 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3464 ParameterCount count(arity);
3465 ParameterCount expected(formal_parameter_count);
3467 function, expected, count, CALL_FUNCTION, generator, call_kind);
3472 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3473 ASSERT(ToRegister(instr->result()).is(v0));
3475 CallKnownFunction(instr->hydrogen()->function(),
3476 instr->hydrogen()->formal_parameter_count(),
3484 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3485 ASSERT(instr->context() != NULL);
3486 ASSERT(ToRegister(instr->context()).is(cp));
3487 Register input = ToRegister(instr->value());
3488 Register result = ToRegister(instr->result());
3489 Register scratch = scratch0();
3491 // Deoptimize if not a heap number.
3492 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3493 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3494 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
3497 Register exponent = scratch0();
3499 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3500 // Check the sign of the argument. If the argument is positive, just
3502 __ Move(result, input);
3503 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3504 __ Branch(&done, eq, at, Operand(zero_reg));
3506 // Input is negative. Reverse its sign.
3507 // Preserve the value of all registers.
3509 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3511 // Registers were saved at the safepoint, so we can use
3512 // many scratch registers.
3513 Register tmp1 = input.is(a1) ? a0 : a1;
3514 Register tmp2 = input.is(a2) ? a0 : a2;
3515 Register tmp3 = input.is(a3) ? a0 : a3;
3516 Register tmp4 = input.is(t0) ? a0 : t0;
3518 // exponent: floating point exponent value.
3520 Label allocated, slow;
3521 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3522 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3523 __ Branch(&allocated);
3525 // Slow case: Call the runtime system to do the number allocation.
3528 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3530 // Set the pointer to the new heap number in tmp.
3533 // Restore input_reg after call to runtime.
3534 __ LoadFromSafepointRegisterSlot(input, input);
3535 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3537 __ bind(&allocated);
3538 // exponent: floating point exponent value.
3539 // tmp1: allocated heap number.
3540 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3541 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3542 __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3543 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3545 __ StoreToSafepointRegisterSlot(tmp1, result);
3552 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3553 Register input = ToRegister(instr->value());
3554 Register result = ToRegister(instr->result());
3555 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3557 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3558 __ mov(result, input);
3559 __ subu(result, zero_reg, input);
3560 // Overflow if result is still negative, i.e. 0x80000000.
3561 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
3566 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3567 // Class for deferred case.
3568 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3570 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3571 : LDeferredCode(codegen), instr_(instr) { }
3572 virtual void Generate() V8_OVERRIDE {
3573 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3575 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3580 Representation r = instr->hydrogen()->value()->representation();
3582 FPURegister input = ToDoubleRegister(instr->value());
3583 FPURegister result = ToDoubleRegister(instr->result());
3584 __ abs_d(result, input);
3585 } else if (r.IsSmiOrInteger32()) {
3586 EmitIntegerMathAbs(instr);
3588 // Representation is tagged.
3589 DeferredMathAbsTaggedHeapNumber* deferred =
3590 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3591 Register input = ToRegister(instr->value());
3593 __ JumpIfNotSmi(input, deferred->entry());
3594 // If smi, handle it directly.
3595 EmitIntegerMathAbs(instr);
3596 __ bind(deferred->exit());
3601 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3602 DoubleRegister input = ToDoubleRegister(instr->value());
3603 Register result = ToRegister(instr->result());
3604 Register scratch1 = scratch0();
3605 Register except_flag = ToRegister(instr->temp());
3607 __ EmitFPUTruncate(kRoundToMinusInf,
3614 // Deopt if the operation did not succeed.
3615 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3617 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3620 __ Branch(&done, ne, result, Operand(zero_reg));
3621 __ mfc1(scratch1, input.high());
3622 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3623 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3629 void LCodeGen::DoMathRound(LMathRound* instr) {
3630 DoubleRegister input = ToDoubleRegister(instr->value());
3631 Register result = ToRegister(instr->result());
3632 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3633 Register scratch = scratch0();
3634 Label done, check_sign_on_zero;
3636 // Extract exponent bits.
3637 __ mfc1(result, input.high());
3640 HeapNumber::kExponentShift,
3641 HeapNumber::kExponentBits);
3643 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3645 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3646 __ mov(result, zero_reg);
3647 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3648 __ Branch(&check_sign_on_zero);
3654 // The following conversion will not work with numbers
3655 // outside of ]-2^32, 2^32[.
3656 DeoptimizeIf(ge, instr->environment(), scratch,
3657 Operand(HeapNumber::kExponentBias + 32));
3659 // Save the original sign for later comparison.
3660 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3662 __ Move(double_scratch0(), 0.5);
3663 __ add_d(double_scratch0(), input, double_scratch0());
3665 // Check sign of the result: if the sign changed, the input
3666 // value was in ]0.5, 0[ and the result should be -0.
3667 __ mfc1(result, double_scratch0().high());
3668 __ Xor(result, result, Operand(scratch));
3669 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3670 // ARM uses 'mi' here, which is 'lt'
3671 DeoptimizeIf(lt, instr->environment(), result,
3675 // ARM uses 'mi' here, which is 'lt'
3676 // Negating it results in 'ge'
3677 __ Branch(&skip2, ge, result, Operand(zero_reg));
3678 __ mov(result, zero_reg);
3683 Register except_flag = scratch;
3684 __ EmitFPUTruncate(kRoundToMinusInf,
3691 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3693 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3695 __ Branch(&done, ne, result, Operand(zero_reg));
3696 __ bind(&check_sign_on_zero);
3697 __ mfc1(scratch, input.high());
3698 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3699 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3705 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3706 DoubleRegister input = ToDoubleRegister(instr->value());
3707 DoubleRegister result = ToDoubleRegister(instr->result());
3708 __ sqrt_d(result, input);
3712 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3713 DoubleRegister input = ToDoubleRegister(instr->value());
3714 DoubleRegister result = ToDoubleRegister(instr->result());
3715 DoubleRegister temp = ToDoubleRegister(instr->temp());
3717 ASSERT(!input.is(result));
3719 // Note that according to ECMA-262 15.8.2.13:
3720 // Math.pow(-Infinity, 0.5) == Infinity
3721 // Math.sqrt(-Infinity) == NaN
3723 __ Move(temp, -V8_INFINITY);
3724 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3725 // Set up Infinity in the delay slot.
3726 // result is overwritten if the branch is not taken.
3727 __ neg_d(result, temp);
3729 // Add +0 to convert -0 to +0.
3730 __ add_d(result, input, kDoubleRegZero);
3731 __ sqrt_d(result, result);
3736 void LCodeGen::DoPower(LPower* instr) {
3737 Representation exponent_type = instr->hydrogen()->right()->representation();
3738 // Having marked this as a call, we can use any registers.
3739 // Just make sure that the input/output registers are the expected ones.
3740 ASSERT(!instr->right()->IsDoubleRegister() ||
3741 ToDoubleRegister(instr->right()).is(f4));
3742 ASSERT(!instr->right()->IsRegister() ||
3743 ToRegister(instr->right()).is(a2));
3744 ASSERT(ToDoubleRegister(instr->left()).is(f2));
3745 ASSERT(ToDoubleRegister(instr->result()).is(f0));
3747 if (exponent_type.IsSmi()) {
3748 MathPowStub stub(MathPowStub::TAGGED);
3750 } else if (exponent_type.IsTagged()) {
3752 __ JumpIfSmi(a2, &no_deopt);
3753 __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
3754 DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
3756 MathPowStub stub(MathPowStub::TAGGED);
3758 } else if (exponent_type.IsInteger32()) {
3759 MathPowStub stub(MathPowStub::INTEGER);
3762 ASSERT(exponent_type.IsDouble());
3763 MathPowStub stub(MathPowStub::DOUBLE);
3769 void LCodeGen::DoRandom(LRandom* instr) {
3770 // Assert that the register size is indeed the size of each seed.
3771 static const int kSeedSize = sizeof(uint32_t);
3772 STATIC_ASSERT(kPointerSize == kSeedSize);
3774 // Load native context.
3775 Register global_object = ToRegister(instr->global_object());
3776 Register native_context = global_object;
3777 __ lw(native_context, FieldMemOperand(
3778 global_object, GlobalObject::kNativeContextOffset));
3780 // Load state (FixedArray of the native context's random seeds).
3781 static const int kRandomSeedOffset =
3782 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3783 Register state = native_context;
3784 __ lw(state, FieldMemOperand(native_context, kRandomSeedOffset));
3787 Register state0 = ToRegister(instr->scratch());
3788 __ lw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3790 Register state1 = ToRegister(instr->scratch2());
3791 __ lw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3793 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3794 Register scratch3 = ToRegister(instr->scratch3());
3795 Register scratch4 = scratch0();
3796 __ And(scratch3, state0, Operand(0xFFFF));
3797 __ li(scratch4, Operand(18273));
3798 __ Mul(scratch3, scratch3, scratch4);
3799 __ srl(state0, state0, 16);
3800 __ Addu(state0, scratch3, state0);
3802 __ sw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3804 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3805 __ And(scratch3, state1, Operand(0xFFFF));
3806 __ li(scratch4, Operand(36969));
3807 __ Mul(scratch3, scratch3, scratch4);
3808 __ srl(state1, state1, 16),
3809 __ Addu(state1, scratch3, state1);
3811 __ sw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3813 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3814 Register random = scratch4;
3815 __ And(random, state1, Operand(0x3FFFF));
3816 __ sll(state0, state0, 14);
3817 __ Addu(random, random, state0);
3819 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3820 __ li(scratch3, Operand(0x41300000));
3821 // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
3822 DoubleRegister result = ToDoubleRegister(instr->result());
3823 __ Move(result, random, scratch3);
3824 // Move 0x4130000000000000 to FPU.
3825 DoubleRegister scratch5 = double_scratch0();
3826 __ Move(scratch5, zero_reg, scratch3);
3827 __ sub_d(result, result, scratch5);
3831 void LCodeGen::DoMathExp(LMathExp* instr) {
3832 DoubleRegister input = ToDoubleRegister(instr->value());
3833 DoubleRegister result = ToDoubleRegister(instr->result());
3834 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3835 DoubleRegister double_scratch2 = double_scratch0();
3836 Register temp1 = ToRegister(instr->temp1());
3837 Register temp2 = ToRegister(instr->temp2());
3839 MathExpGenerator::EmitMathExp(
3840 masm(), input, result, double_scratch1, double_scratch2,
3841 temp1, temp2, scratch0());
3845 void LCodeGen::DoMathLog(LMathLog* instr) {
3846 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3847 // Set the context register to a GC-safe fake value. Clobbering it is
3848 // OK because this instruction is marked as a call.
3849 __ mov(cp, zero_reg);
3850 TranscendentalCacheStub stub(TranscendentalCache::LOG,
3851 TranscendentalCacheStub::UNTAGGED);
3852 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3856 void LCodeGen::DoMathTan(LMathTan* instr) {
3857 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3858 // Set the context register to a GC-safe fake value. Clobbering it is
3859 // OK because this instruction is marked as a call.
3860 __ mov(cp, zero_reg);
3861 TranscendentalCacheStub stub(TranscendentalCache::TAN,
3862 TranscendentalCacheStub::UNTAGGED);
3863 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3867 void LCodeGen::DoMathCos(LMathCos* instr) {
3868 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3869 // Set the context register to a GC-safe fake value. Clobbering it is
3870 // OK because this instruction is marked as a call.
3871 __ mov(cp, zero_reg);
3872 TranscendentalCacheStub stub(TranscendentalCache::COS,
3873 TranscendentalCacheStub::UNTAGGED);
3874 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3878 void LCodeGen::DoMathSin(LMathSin* instr) {
3879 ASSERT(ToDoubleRegister(instr->result()).is(f4));
3880 // Set the context register to a GC-safe fake value. Clobbering it is
3881 // OK because this instruction is marked as a call.
3882 __ mov(cp, zero_reg);
3883 TranscendentalCacheStub stub(TranscendentalCache::SIN,
3884 TranscendentalCacheStub::UNTAGGED);
3885 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3889 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3890 ASSERT(ToRegister(instr->context()).is(cp));
3891 ASSERT(ToRegister(instr->function()).is(a1));
3892 ASSERT(instr->HasPointerMap());
3894 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3895 if (known_function.is_null()) {
3896 LPointerMap* pointers = instr->pointer_map();
3897 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3898 ParameterCount count(instr->arity());
3899 __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3901 CallKnownFunction(known_function,
3902 instr->hydrogen()->formal_parameter_count(),
3906 A1_CONTAINS_TARGET);
3911 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3912 ASSERT(ToRegister(instr->context()).is(cp));
3913 ASSERT(ToRegister(instr->result()).is(v0));
3915 int arity = instr->arity();
3917 isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3918 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3922 void LCodeGen::DoCallNamed(LCallNamed* instr) {
3923 ASSERT(ToRegister(instr->context()).is(cp));
3924 ASSERT(ToRegister(instr->result()).is(v0));
3926 int arity = instr->arity();
3927 RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3929 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3930 __ li(a2, Operand(instr->name()));
3931 CallCode(ic, mode, instr);
3935 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3936 ASSERT(ToRegister(instr->context()).is(cp));
3937 ASSERT(ToRegister(instr->function()).is(a1));
3938 ASSERT(ToRegister(instr->result()).is(v0));
3940 int arity = instr->arity();
3941 CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3942 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3946 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3947 ASSERT(ToRegister(instr->context()).is(cp));
3948 ASSERT(ToRegister(instr->result()).is(v0));
3950 int arity = instr->arity();
3951 RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3953 isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3954 __ li(a2, Operand(instr->name()));
3955 CallCode(ic, mode, instr);
3959 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3960 ASSERT(ToRegister(instr->result()).is(v0));
3961 CallKnownFunction(instr->hydrogen()->target(),
3962 instr->hydrogen()->formal_parameter_count(),
3970 void LCodeGen::DoCallNew(LCallNew* instr) {
3971 ASSERT(ToRegister(instr->context()).is(cp));
3972 ASSERT(ToRegister(instr->constructor()).is(a1));
3973 ASSERT(ToRegister(instr->result()).is(v0));
3975 __ li(a0, Operand(instr->arity()));
3976 // No cell in a2 for construct type feedback in optimized code
3977 Handle<Object> undefined_value(isolate()->factory()->undefined_value());
3978 __ li(a2, Operand(undefined_value));
3979 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3980 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3984 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3985 ASSERT(ToRegister(instr->context()).is(cp));
3986 ASSERT(ToRegister(instr->constructor()).is(a1));
3987 ASSERT(ToRegister(instr->result()).is(v0));
3989 __ li(a0, Operand(instr->arity()));
3990 __ li(a2, Operand(instr->hydrogen()->property_cell()));
3991 ElementsKind kind = instr->hydrogen()->elements_kind();
3992 AllocationSiteOverrideMode override_mode =
3993 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3994 ? DISABLE_ALLOCATION_SITES
3996 ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
3998 if (instr->arity() == 0) {
3999 ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
4000 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4001 } else if (instr->arity() == 1) {
4003 if (IsFastPackedElementsKind(kind)) {
4005 // We might need a change here,
4006 // look at the first argument.
4007 __ lw(t1, MemOperand(sp, 0));
4008 __ Branch(&packed_case, eq, t1, Operand(zero_reg));
4010 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4011 ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
4013 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4015 __ bind(&packed_case);
4018 ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
4019 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4022 ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
4023 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4028 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4029 CallRuntime(instr->function(), instr->arity(), instr);
4033 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4034 Register function = ToRegister(instr->function());
4035 Register code_object = ToRegister(instr->code_object());
4036 __ Addu(code_object, code_object,
4037 Operand(Code::kHeaderSize - kHeapObjectTag));
4039 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4043 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4044 Register result = ToRegister(instr->result());
4045 Register base = ToRegister(instr->base_object());
4046 __ Addu(result, base, Operand(instr->offset()));
4050 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4051 Representation representation = instr->representation();
4053 Register object = ToRegister(instr->object());
4054 Register scratch = scratch0();
4055 HObjectAccess access = instr->hydrogen()->access();
4056 int offset = access.offset();
4058 if (access.IsExternalMemory()) {
4059 Register value = ToRegister(instr->value());
4060 MemOperand operand = MemOperand(object, offset);
4061 if (representation.IsByte()) {
4062 __ sb(value, operand);
4064 __ sw(value, operand);
4069 Handle<Map> transition = instr->transition();
4071 if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
4072 Register value = ToRegister(instr->value());
4073 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4074 __ And(scratch, value, Operand(kSmiTagMask));
4075 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
4077 } else if (FLAG_track_double_fields && representation.IsDouble()) {
4078 ASSERT(transition.is_null());
4079 ASSERT(access.IsInobject());
4080 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4081 DoubleRegister value = ToDoubleRegister(instr->value());
4082 __ sdc1(value, FieldMemOperand(object, offset));
4086 if (!transition.is_null()) {
4087 __ li(scratch, Operand(transition));
4088 __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4089 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4090 Register temp = ToRegister(instr->temp());
4091 // Update the write barrier for the map field.
4092 __ RecordWriteField(object,
4093 HeapObject::kMapOffset,
4098 OMIT_REMEMBERED_SET,
4104 Register value = ToRegister(instr->value());
4105 ASSERT(!object.is(value));
4106 SmiCheck check_needed =
4107 instr->hydrogen()->value()->IsHeapObject()
4108 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4109 if (access.IsInobject()) {
4110 MemOperand operand = FieldMemOperand(object, offset);
4111 if (representation.IsByte()) {
4112 __ sb(value, operand);
4114 __ sw(value, operand);
4116 if (instr->hydrogen()->NeedsWriteBarrier()) {
4117 // Update the write barrier for the object for in-object properties.
4118 __ RecordWriteField(object,
4124 EMIT_REMEMBERED_SET,
4128 __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4129 MemOperand operand = FieldMemOperand(scratch, offset);
4130 if (representation.IsByte()) {
4131 __ sb(value, operand);
4133 __ sw(value, operand);
4135 if (instr->hydrogen()->NeedsWriteBarrier()) {
4136 // Update the write barrier for the properties array.
4137 // object is used as a scratch register.
4138 __ RecordWriteField(scratch,
4144 EMIT_REMEMBERED_SET,
4151 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4152 ASSERT(ToRegister(instr->context()).is(cp));
4153 ASSERT(ToRegister(instr->object()).is(a1));
4154 ASSERT(ToRegister(instr->value()).is(a0));
4156 // Name is always in a2.
4157 __ li(a2, Operand(instr->name()));
4158 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4159 ? isolate()->builtins()->StoreIC_Initialize_Strict()
4160 : isolate()->builtins()->StoreIC_Initialize();
4161 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4165 void LCodeGen::ApplyCheckIf(Condition condition,
4166 LBoundsCheck* check,
4168 const Operand& src2) {
4169 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4171 __ Branch(&done, NegateCondition(condition), src1, src2);
4172 __ stop("eliminated bounds check failed");
4175 DeoptimizeIf(condition, check->environment(), src1, src2);
4180 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4181 if (instr->hydrogen()->skip_check()) return;
4183 Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
4184 if (instr->index()->IsConstantOperand()) {
4185 int constant_index =
4186 ToInteger32(LConstantOperand::cast(instr->index()));
4187 if (instr->hydrogen()->length()->representation().IsSmi()) {
4188 __ li(at, Operand(Smi::FromInt(constant_index)));
4190 __ li(at, Operand(constant_index));
4192 ApplyCheckIf(condition,
4195 Operand(ToRegister(instr->length())));
4197 ApplyCheckIf(condition,
4199 ToRegister(instr->index()),
4200 Operand(ToRegister(instr->length())));
4205 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4206 Register external_pointer = ToRegister(instr->elements());
4207 Register key = no_reg;
4208 ElementsKind elements_kind = instr->elements_kind();
4209 bool key_is_constant = instr->key()->IsConstantOperand();
4210 int constant_key = 0;
4211 if (key_is_constant) {
4212 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4213 if (constant_key & 0xF0000000) {
4214 Abort(kArrayIndexConstantValueTooBig);
4217 key = ToRegister(instr->key());
4219 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4220 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4221 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4222 int additional_offset = instr->additional_index() << element_size_shift;
4224 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
4225 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4226 Register address = scratch0();
4227 FPURegister value(ToDoubleRegister(instr->value()));
4228 if (key_is_constant) {
4229 if (constant_key != 0) {
4230 __ Addu(address, external_pointer,
4231 Operand(constant_key << element_size_shift));
4233 address = external_pointer;
4236 __ sll(address, key, shift_size);
4237 __ Addu(address, external_pointer, address);
4240 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4241 __ cvt_s_d(double_scratch0(), value);
4242 __ swc1(double_scratch0(), MemOperand(address, additional_offset));
4243 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
4244 __ sdc1(value, MemOperand(address, additional_offset));
4247 Register value(ToRegister(instr->value()));
4248 MemOperand mem_operand = PrepareKeyedOperand(
4249 key, external_pointer, key_is_constant, constant_key,
4250 element_size_shift, shift_size,
4251 instr->additional_index(), additional_offset);
4252 switch (elements_kind) {
4253 case EXTERNAL_PIXEL_ELEMENTS:
4254 case EXTERNAL_BYTE_ELEMENTS:
4255 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
4256 __ sb(value, mem_operand);
4258 case EXTERNAL_SHORT_ELEMENTS:
4259 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
4260 __ sh(value, mem_operand);
4262 case EXTERNAL_INT_ELEMENTS:
4263 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
4264 __ sw(value, mem_operand);
4266 case EXTERNAL_FLOAT_ELEMENTS:
4267 case EXTERNAL_DOUBLE_ELEMENTS:
4268 case FAST_DOUBLE_ELEMENTS:
4270 case FAST_SMI_ELEMENTS:
4271 case FAST_HOLEY_DOUBLE_ELEMENTS:
4272 case FAST_HOLEY_ELEMENTS:
4273 case FAST_HOLEY_SMI_ELEMENTS:
4274 case DICTIONARY_ELEMENTS:
4275 case NON_STRICT_ARGUMENTS_ELEMENTS:
4283 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4284 DoubleRegister value = ToDoubleRegister(instr->value());
4285 Register elements = ToRegister(instr->elements());
4286 Register scratch = scratch0();
4287 DoubleRegister double_scratch = double_scratch0();
4288 bool key_is_constant = instr->key()->IsConstantOperand();
4289 Label not_nan, done;
4291 // Calculate the effective address of the slot in the array to store the
4293 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4294 if (key_is_constant) {
4295 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4296 if (constant_key & 0xF0000000) {
4297 Abort(kArrayIndexConstantValueTooBig);
4299 __ Addu(scratch, elements,
4300 Operand((constant_key << element_size_shift) +
4301 FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4303 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4304 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4305 __ Addu(scratch, elements,
4306 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4307 __ sll(at, ToRegister(instr->key()), shift_size);
4308 __ Addu(scratch, scratch, at);
4311 if (instr->NeedsCanonicalization()) {
4313 // Check for NaN. All NaNs must be canonicalized.
4314 __ BranchF(NULL, &is_nan, eq, value, value);
4315 __ Branch(¬_nan);
4317 // Only load canonical NaN if the comparison above set the overflow.
4319 __ Move(double_scratch,
4320 FixedDoubleArray::canonical_not_the_hole_nan_as_double());
4321 __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
4322 element_size_shift));
4327 __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
4328 element_size_shift));
4333 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4334 Register value = ToRegister(instr->value());
4335 Register elements = ToRegister(instr->elements());
4336 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4338 Register scratch = scratch0();
4339 Register store_base = scratch;
4343 if (instr->key()->IsConstantOperand()) {
4344 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4345 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4346 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4347 instr->additional_index());
4348 store_base = elements;
4350 // Even though the HLoadKeyed instruction forces the input
4351 // representation for the key to be an integer, the input gets replaced
4352 // during bound check elimination with the index argument to the bounds
4353 // check, which can be tagged, so that case must be handled here, too.
4354 if (instr->hydrogen()->key()->representation().IsSmi()) {
4355 __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
4356 __ addu(scratch, elements, scratch);
4358 __ sll(scratch, key, kPointerSizeLog2);
4359 __ addu(scratch, elements, scratch);
4361 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4363 __ sw(value, FieldMemOperand(store_base, offset));
4365 if (instr->hydrogen()->NeedsWriteBarrier()) {
4366 SmiCheck check_needed =
4367 instr->hydrogen()->value()->IsHeapObject()
4368 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4369 // Compute address of modified element and store it into key register.
4370 __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
4371 __ RecordWrite(elements,
4376 EMIT_REMEMBERED_SET,
4382 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4383 // By cases: external, fast double
4384 if (instr->is_external()) {
4385 DoStoreKeyedExternalArray(instr);
4386 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4387 DoStoreKeyedFixedDoubleArray(instr);
4389 DoStoreKeyedFixedArray(instr);
4394 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4395 ASSERT(ToRegister(instr->context()).is(cp));
4396 ASSERT(ToRegister(instr->object()).is(a2));
4397 ASSERT(ToRegister(instr->key()).is(a1));
4398 ASSERT(ToRegister(instr->value()).is(a0));
4400 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4401 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4402 : isolate()->builtins()->KeyedStoreIC_Initialize();
4403 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4407 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4408 Register object_reg = ToRegister(instr->object());
4409 Register scratch = scratch0();
4411 Handle<Map> from_map = instr->original_map();
4412 Handle<Map> to_map = instr->transitioned_map();
4413 ElementsKind from_kind = instr->from_kind();
4414 ElementsKind to_kind = instr->to_kind();
4416 Label not_applicable;
4417 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4418 __ Branch(¬_applicable, ne, scratch, Operand(from_map));
4420 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4421 Register new_map_reg = ToRegister(instr->new_map_temp());
4422 __ li(new_map_reg, Operand(to_map));
4423 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4425 __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4426 scratch, GetRAState(), kDontSaveFPRegs);
4428 ASSERT(ToRegister(instr->context()).is(cp));
4429 PushSafepointRegistersScope scope(
4430 this, Safepoint::kWithRegistersAndDoubles);
4431 __ mov(a0, object_reg);
4432 __ li(a1, Operand(to_map));
4433 TransitionElementsKindStub stub(from_kind, to_kind);
4435 RecordSafepointWithRegistersAndDoubles(
4436 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4438 __ bind(¬_applicable);
4442 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4443 Register object = ToRegister(instr->object());
4444 Register temp = ToRegister(instr->temp());
4445 Label no_memento_found;
4446 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4447 ne, &no_memento_found);
4448 DeoptimizeIf(al, instr->environment());
4449 __ bind(&no_memento_found);
4453 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4454 ASSERT(ToRegister(instr->context()).is(cp));
4455 __ push(ToRegister(instr->left()));
4456 __ push(ToRegister(instr->right()));
4457 StringAddStub stub(instr->hydrogen()->flags());
4458 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4462 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4463 class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4465 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4466 : LDeferredCode(codegen), instr_(instr) { }
4467 virtual void Generate() V8_OVERRIDE {
4468 codegen()->DoDeferredStringCharCodeAt(instr_);
4470 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4472 LStringCharCodeAt* instr_;
4475 DeferredStringCharCodeAt* deferred =
4476 new(zone()) DeferredStringCharCodeAt(this, instr);
4477 StringCharLoadGenerator::Generate(masm(),
4478 ToRegister(instr->string()),
4479 ToRegister(instr->index()),
4480 ToRegister(instr->result()),
4482 __ bind(deferred->exit());
4486 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4487 Register string = ToRegister(instr->string());
4488 Register result = ToRegister(instr->result());
4489 Register scratch = scratch0();
4491 // TODO(3095996): Get rid of this. For now, we need to make the
4492 // result register contain a valid pointer because it is already
4493 // contained in the register pointer map.
4494 __ mov(result, zero_reg);
4496 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4498 // Push the index as a smi. This is safe because of the checks in
4499 // DoStringCharCodeAt above.
4500 if (instr->index()->IsConstantOperand()) {
4501 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4502 __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4505 Register index = ToRegister(instr->index());
4509 CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
4513 __ StoreToSafepointRegisterSlot(v0, result);
4517 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4518 class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4520 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4521 : LDeferredCode(codegen), instr_(instr) { }
4522 virtual void Generate() V8_OVERRIDE {
4523 codegen()->DoDeferredStringCharFromCode(instr_);
4525 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4527 LStringCharFromCode* instr_;
4530 DeferredStringCharFromCode* deferred =
4531 new(zone()) DeferredStringCharFromCode(this, instr);
4533 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4534 Register char_code = ToRegister(instr->char_code());
4535 Register result = ToRegister(instr->result());
4536 Register scratch = scratch0();
4537 ASSERT(!char_code.is(result));
4539 __ Branch(deferred->entry(), hi,
4540 char_code, Operand(String::kMaxOneByteCharCode));
4541 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4542 __ sll(scratch, char_code, kPointerSizeLog2);
4543 __ Addu(result, result, scratch);
4544 __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4545 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4546 __ Branch(deferred->entry(), eq, result, Operand(scratch));
4547 __ bind(deferred->exit());
4551 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4552 Register char_code = ToRegister(instr->char_code());
4553 Register result = ToRegister(instr->result());
4555 // TODO(3095996): Get rid of this. For now, we need to make the
4556 // result register contain a valid pointer because it is already
4557 // contained in the register pointer map.
4558 __ mov(result, zero_reg);
4560 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4561 __ SmiTag(char_code);
4563 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4564 __ StoreToSafepointRegisterSlot(v0, result);
4568 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4569 LOperand* input = instr->value();
4570 ASSERT(input->IsRegister() || input->IsStackSlot());
4571 LOperand* output = instr->result();
4572 ASSERT(output->IsDoubleRegister());
4573 FPURegister single_scratch = double_scratch0().low();
4574 if (input->IsStackSlot()) {
4575 Register scratch = scratch0();
4576 __ lw(scratch, ToMemOperand(input));
4577 __ mtc1(scratch, single_scratch);
4579 __ mtc1(ToRegister(input), single_scratch);
4581 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4585 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4586 LOperand* input = instr->value();
4587 LOperand* output = instr->result();
4588 Register scratch = scratch0();
4590 __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
4591 if (!instr->hydrogen()->value()->HasRange() ||
4592 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4593 DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
4598 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4599 LOperand* input = instr->value();
4600 LOperand* output = instr->result();
4602 FPURegister dbl_scratch = double_scratch0();
4603 __ mtc1(ToRegister(input), dbl_scratch);
4604 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
4608 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
4609 LOperand* input = instr->value();
4610 LOperand* output = instr->result();
4611 if (!instr->hydrogen()->value()->HasRange() ||
4612 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4613 Register scratch = scratch0();
4614 __ And(scratch, ToRegister(input), Operand(0xc0000000));
4615 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
4617 __ SmiTag(ToRegister(output), ToRegister(input));
4621 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4622 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4624 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4625 : LDeferredCode(codegen), instr_(instr) { }
4626 virtual void Generate() V8_OVERRIDE {
4627 codegen()->DoDeferredNumberTagI(instr_,
4631 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4633 LNumberTagI* instr_;
4636 Register src = ToRegister(instr->value());
4637 Register dst = ToRegister(instr->result());
4638 Register overflow = scratch0();
4640 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4641 __ SmiTagCheckOverflow(dst, src, overflow);
4642 __ BranchOnOverflow(deferred->entry(), overflow);
4643 __ bind(deferred->exit());
4647 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4648 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4650 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4651 : LDeferredCode(codegen), instr_(instr) { }
4652 virtual void Generate() V8_OVERRIDE {
4653 codegen()->DoDeferredNumberTagI(instr_,
4657 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4659 LNumberTagU* instr_;
4662 LOperand* input = instr->value();
4663 ASSERT(input->IsRegister() && input->Equals(instr->result()));
4664 Register reg = ToRegister(input);
4666 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4667 __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue));
4668 __ SmiTag(reg, reg);
4669 __ bind(deferred->exit());
4673 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4675 IntegerSignedness signedness) {
4677 Register src = ToRegister(value);
4678 Register dst = ToRegister(instr->result());
4679 DoubleRegister dbl_scratch = double_scratch0();
4681 // Preserve the value of all registers.
4682 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4685 if (signedness == SIGNED_INT32) {
4686 // There was overflow, so bits 30 and 31 of the original integer
4687 // disagree. Try to allocate a heap number in new space and store
4688 // the value in there. If that fails, call the runtime system.
4690 __ SmiUntag(src, dst);
4691 __ Xor(src, src, Operand(0x80000000));
4693 __ mtc1(src, dbl_scratch);
4694 __ cvt_d_w(dbl_scratch, dbl_scratch);
4696 __ mtc1(src, dbl_scratch);
4697 __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4700 if (FLAG_inline_new) {
4701 __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
4702 __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
4707 // Slow case: Call the runtime system to do the number allocation.
4710 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4711 // register is stored, as this register is in the pointer map, but contains an
4713 __ StoreToSafepointRegisterSlot(zero_reg, dst);
4714 // NumberTagI and NumberTagD use the context from the frame, rather than
4715 // the environment's HContext or HInlinedContext value.
4716 // They only call Runtime::kAllocateHeapNumber.
4717 // The corresponding HChange instructions are added in a phase that does
4718 // not have easy access to the local context.
4719 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4720 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4721 RecordSafepointWithRegisters(
4722 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4724 __ Subu(dst, dst, kHeapObjectTag);
4726 // Done. Put the value in dbl_scratch into the value of the allocated heap
4729 __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
4730 __ Addu(dst, dst, kHeapObjectTag);
4731 __ StoreToSafepointRegisterSlot(dst, dst);
4735 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4736 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4738 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4739 : LDeferredCode(codegen), instr_(instr) { }
4740 virtual void Generate() V8_OVERRIDE {
4741 codegen()->DoDeferredNumberTagD(instr_);
4743 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4745 LNumberTagD* instr_;
4748 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4749 Register scratch = scratch0();
4750 Register reg = ToRegister(instr->result());
4751 Register temp1 = ToRegister(instr->temp());
4752 Register temp2 = ToRegister(instr->temp2());
4754 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4755 if (FLAG_inline_new) {
4756 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4757 // We want the untagged address first for performance
4758 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4761 __ Branch(deferred->entry());
4763 __ bind(deferred->exit());
4764 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4765 // Now that we have finished with the object's real address tag it
4766 __ Addu(reg, reg, kHeapObjectTag);
4770 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4771 // TODO(3095996): Get rid of this. For now, we need to make the
4772 // result register contain a valid pointer because it is already
4773 // contained in the register pointer map.
4774 Register reg = ToRegister(instr->result());
4775 __ mov(reg, zero_reg);
4777 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4778 // NumberTagI and NumberTagD use the context from the frame, rather than
4779 // the environment's HContext or HInlinedContext value.
4780 // They only call Runtime::kAllocateHeapNumber.
4781 // The corresponding HChange instructions are added in a phase that does
4782 // not have easy access to the local context.
4783 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4784 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4785 RecordSafepointWithRegisters(
4786 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4787 __ Subu(v0, v0, kHeapObjectTag);
4788 __ StoreToSafepointRegisterSlot(v0, reg);
4792 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4793 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4794 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4798 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4799 Register scratch = scratch0();
4800 Register input = ToRegister(instr->value());
4801 Register result = ToRegister(instr->result());
4802 if (instr->needs_check()) {
4803 STATIC_ASSERT(kHeapObjectTag == 1);
4804 // If the input is a HeapObject, value of scratch won't be zero.
4805 __ And(scratch, input, Operand(kHeapObjectTag));
4806 __ SmiUntag(result, input);
4807 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
4809 __ SmiUntag(result, input);
4814 void LCodeGen::EmitNumberUntagD(Register input_reg,
4815 DoubleRegister result_reg,
4816 bool can_convert_undefined_to_nan,
4817 bool deoptimize_on_minus_zero,
4819 NumberUntagDMode mode) {
4820 Register scratch = scratch0();
4821 Label convert, load_smi, done;
4822 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4824 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4825 // Heap number map check.
4826 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4827 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4828 if (can_convert_undefined_to_nan) {
4829 __ Branch(&convert, ne, scratch, Operand(at));
4831 DeoptimizeIf(ne, env, scratch, Operand(at));
4833 // Load heap number.
4834 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4835 if (deoptimize_on_minus_zero) {
4836 __ mfc1(at, result_reg.low());
4837 __ Branch(&done, ne, at, Operand(zero_reg));
4838 __ mfc1(scratch, result_reg.high());
4839 DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4842 if (can_convert_undefined_to_nan) {
4844 // Convert undefined (and hole) to NaN.
4845 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4846 DeoptimizeIf(ne, env, input_reg, Operand(at));
4847 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4848 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4852 __ SmiUntag(scratch, input_reg);
4853 ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4855 // Smi to double register conversion
4857 // scratch: untagged value of input_reg
4858 __ mtc1(scratch, result_reg);
4859 __ cvt_d_w(result_reg, result_reg);
4864 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4865 Register input_reg = ToRegister(instr->value());
4866 Register scratch1 = scratch0();
4867 Register scratch2 = ToRegister(instr->temp());
4868 DoubleRegister double_scratch = double_scratch0();
4869 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4871 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4872 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4876 // The input is a tagged HeapObject.
4877 // Heap number map check.
4878 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4879 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4880 // This 'at' value and scratch1 map value are used for tests in both clauses
4883 if (instr->truncating()) {
4884 // Performs a truncating conversion of a floating point number as used by
4885 // the JS bitwise operations.
4886 Label no_heap_number, check_bools, check_false;
4887 __ Branch(&no_heap_number, ne, scratch1, Operand(at)); // HeapNumber map?
4888 __ mov(scratch2, input_reg);
4889 __ TruncateHeapNumberToI(input_reg, scratch2);
4892 // Check for Oddballs. Undefined/False is converted to zero and True to one
4893 // for truncating conversions.
4894 __ bind(&no_heap_number);
4895 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4896 __ Branch(&check_bools, ne, input_reg, Operand(at));
4897 ASSERT(ToRegister(instr->result()).is(input_reg));
4898 __ Branch(USE_DELAY_SLOT, &done);
4899 __ mov(input_reg, zero_reg); // In delay slot.
4901 __ bind(&check_bools);
4902 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4903 __ Branch(&check_false, ne, scratch2, Operand(at));
4904 __ Branch(USE_DELAY_SLOT, &done);
4905 __ li(input_reg, Operand(1)); // In delay slot.
4907 __ bind(&check_false);
4908 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4909 DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
4910 __ Branch(USE_DELAY_SLOT, &done);
4911 __ mov(input_reg, zero_reg); // In delay slot.
4913 // Deoptimize if we don't have a heap number.
4914 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
4916 // Load the double value.
4917 __ ldc1(double_scratch,
4918 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4920 Register except_flag = scratch2;
4921 __ EmitFPUTruncate(kRoundToZero,
4927 kCheckForInexactConversion);
4929 // Deopt if the operation did not succeed.
4930 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4932 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4933 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4935 __ mfc1(scratch1, double_scratch.high());
4936 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4937 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4944 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4945 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
4947 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4948 : LDeferredCode(codegen), instr_(instr) { }
4949 virtual void Generate() V8_OVERRIDE {
4950 codegen()->DoDeferredTaggedToI(instr_);
4952 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4957 LOperand* input = instr->value();
4958 ASSERT(input->IsRegister());
4959 ASSERT(input->Equals(instr->result()));
4961 Register input_reg = ToRegister(input);
4963 if (instr->hydrogen()->value()->representation().IsSmi()) {
4964 __ SmiUntag(input_reg);
4966 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4968 // Let the deferred code handle the HeapObject case.
4969 __ JumpIfNotSmi(input_reg, deferred->entry());
4971 // Smi to int32 conversion.
4972 __ SmiUntag(input_reg);
4973 __ bind(deferred->exit());
4978 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4979 LOperand* input = instr->value();
4980 ASSERT(input->IsRegister());
4981 LOperand* result = instr->result();
4982 ASSERT(result->IsDoubleRegister());
4984 Register input_reg = ToRegister(input);
4985 DoubleRegister result_reg = ToDoubleRegister(result);
4987 HValue* value = instr->hydrogen()->value();
4988 NumberUntagDMode mode = value->representation().IsSmi()
4989 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4991 EmitNumberUntagD(input_reg, result_reg,
4992 instr->hydrogen()->can_convert_undefined_to_nan(),
4993 instr->hydrogen()->deoptimize_on_minus_zero(),
4994 instr->environment(),
4999 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5000 Register result_reg = ToRegister(instr->result());
5001 Register scratch1 = scratch0();
5002 DoubleRegister double_input = ToDoubleRegister(instr->value());
5004 if (instr->truncating()) {
5005 __ TruncateDoubleToI(result_reg, double_input);
5007 Register except_flag = LCodeGen::scratch1();
5009 __ EmitFPUTruncate(kRoundToMinusInf,
5015 kCheckForInexactConversion);
5017 // Deopt if the operation did not succeed (except_flag != 0).
5018 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
5020 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5022 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5023 __ mfc1(scratch1, double_input.high());
5024 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5025 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
5032 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5033 Register result_reg = ToRegister(instr->result());
5034 Register scratch1 = LCodeGen::scratch0();
5035 DoubleRegister double_input = ToDoubleRegister(instr->value());
5037 if (instr->truncating()) {
5038 __ TruncateDoubleToI(result_reg, double_input);
5040 Register except_flag = LCodeGen::scratch1();
5042 __ EmitFPUTruncate(kRoundToMinusInf,
5048 kCheckForInexactConversion);
5050 // Deopt if the operation did not succeed (except_flag != 0).
5051 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
5053 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5055 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5056 __ mfc1(scratch1, double_input.high());
5057 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5058 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
5062 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
5063 DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
5067 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5068 LOperand* input = instr->value();
5069 __ And(at, ToRegister(input), Operand(kSmiTagMask));
5070 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
5074 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5075 if (!instr->hydrogen()->value()->IsHeapObject()) {
5076 LOperand* input = instr->value();
5077 __ And(at, ToRegister(input), Operand(kSmiTagMask));
5078 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5083 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5084 Register input = ToRegister(instr->value());
5085 Register scratch = scratch0();
5087 __ GetObjectType(input, scratch, scratch);
5089 if (instr->hydrogen()->is_interval_check()) {
5092 instr->hydrogen()->GetCheckInterval(&first, &last);
5094 // If there is only one type in the interval check for equality.
5095 if (first == last) {
5096 DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
5098 DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
5099 // Omit check for the last type.
5100 if (last != LAST_TYPE) {
5101 DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
5107 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5109 if (IsPowerOf2(mask)) {
5110 ASSERT(tag == 0 || IsPowerOf2(tag));
5111 __ And(at, scratch, mask);
5112 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
5113 at, Operand(zero_reg));
5115 __ And(scratch, scratch, Operand(mask));
5116 DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
5122 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5123 Register reg = ToRegister(instr->value());
5124 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5125 AllowDeferredHandleDereference smi_check;
5126 if (isolate()->heap()->InNewSpace(*object)) {
5127 Register reg = ToRegister(instr->value());
5128 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5129 __ li(at, Operand(Handle<Object>(cell)));
5130 __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
5131 DeoptimizeIf(ne, instr->environment(), reg,
5134 DeoptimizeIf(ne, instr->environment(), reg,
5140 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5142 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5144 __ mov(cp, zero_reg);
5145 __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
5146 RecordSafepointWithRegisters(
5147 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5148 __ StoreToSafepointRegisterSlot(v0, scratch0());
5150 __ And(at, scratch0(), Operand(kSmiTagMask));
5151 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5155 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5156 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5158 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5159 : LDeferredCode(codegen), instr_(instr), object_(object) {
5160 SetExit(check_maps());
5162 virtual void Generate() V8_OVERRIDE {
5163 codegen()->DoDeferredInstanceMigration(instr_, object_);
5165 Label* check_maps() { return &check_maps_; }
5166 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5173 if (instr->hydrogen()->CanOmitMapChecks()) return;
5174 Register map_reg = scratch0();
5175 LOperand* input = instr->value();
5176 ASSERT(input->IsRegister());
5177 Register reg = ToRegister(input);
5178 __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5180 DeferredCheckMaps* deferred = NULL;
5181 if (instr->hydrogen()->has_migration_target()) {
5182 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5183 __ bind(deferred->check_maps());
5186 UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5188 for (int i = 0; i < map_set.size() - 1; i++) {
5189 Handle<Map> map = map_set.at(i).handle();
5190 __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5192 Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5193 // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5194 if (instr->hydrogen()->has_migration_target()) {
5195 __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5197 DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
5204 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5205 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5206 Register result_reg = ToRegister(instr->result());
5207 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5208 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5212 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5213 Register unclamped_reg = ToRegister(instr->unclamped());
5214 Register result_reg = ToRegister(instr->result());
5215 __ ClampUint8(result_reg, unclamped_reg);
5219 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5220 Register scratch = scratch0();
5221 Register input_reg = ToRegister(instr->unclamped());
5222 Register result_reg = ToRegister(instr->result());
5223 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5224 Label is_smi, done, heap_number;
5226 // Both smi and heap number cases are handled.
5227 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5229 // Check for heap number
5230 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5231 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5233 // Check for undefined. Undefined is converted to zero for clamping
5235 DeoptimizeIf(ne, instr->environment(), input_reg,
5236 Operand(factory()->undefined_value()));
5237 __ mov(result_reg, zero_reg);
5241 __ bind(&heap_number);
5242 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5243 HeapNumber::kValueOffset));
5244 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5248 __ ClampUint8(result_reg, scratch);
5254 void LCodeGen::DoAllocate(LAllocate* instr) {
5255 class DeferredAllocate V8_FINAL : public LDeferredCode {
5257 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5258 : LDeferredCode(codegen), instr_(instr) { }
5259 virtual void Generate() V8_OVERRIDE {
5260 codegen()->DoDeferredAllocate(instr_);
5262 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5267 DeferredAllocate* deferred =
5268 new(zone()) DeferredAllocate(this, instr);
5270 Register result = ToRegister(instr->result());
5271 Register scratch = ToRegister(instr->temp1());
5272 Register scratch2 = ToRegister(instr->temp2());
5274 // Allocate memory for the object.
5275 AllocationFlags flags = TAG_OBJECT;
5276 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5277 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5279 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5280 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5281 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5282 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5283 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5284 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5285 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5287 if (instr->size()->IsConstantOperand()) {
5288 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5289 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5291 Register size = ToRegister(instr->size());
5300 __ bind(deferred->exit());
5302 if (instr->hydrogen()->MustPrefillWithFiller()) {
5303 if (instr->size()->IsConstantOperand()) {
5304 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5305 __ li(scratch, Operand(size));
5307 scratch = ToRegister(instr->size());
5309 __ Subu(scratch, scratch, Operand(kPointerSize));
5310 __ Subu(result, result, Operand(kHeapObjectTag));
5313 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5314 __ Addu(at, result, Operand(scratch));
5315 __ sw(scratch2, MemOperand(at));
5316 __ Subu(scratch, scratch, Operand(kPointerSize));
5317 __ Branch(&loop, ge, scratch, Operand(zero_reg));
5318 __ Addu(result, result, Operand(kHeapObjectTag));
5323 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5324 Register result = ToRegister(instr->result());
5326 // TODO(3095996): Get rid of this. For now, we need to make the
5327 // result register contain a valid pointer because it is already
5328 // contained in the register pointer map.
5329 __ mov(result, zero_reg);
5331 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5332 if (instr->size()->IsRegister()) {
5333 Register size = ToRegister(instr->size());
5334 ASSERT(!size.is(result));
5338 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5339 __ Push(Smi::FromInt(size));
5342 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5343 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5344 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5345 CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
5347 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5348 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5349 CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
5352 CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
5355 __ StoreToSafepointRegisterSlot(v0, result);
5359 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5360 ASSERT(ToRegister(instr->value()).is(a0));
5361 ASSERT(ToRegister(instr->result()).is(v0));
5363 CallRuntime(Runtime::kToFastProperties, 1, instr);
5367 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5368 ASSERT(ToRegister(instr->context()).is(cp));
5370 // Registers will be used as follows:
5371 // t3 = literals array.
5372 // a1 = regexp literal.
5373 // a0 = regexp literal clone.
5374 // a2 and t0-t2 are used as temporaries.
5375 int literal_offset =
5376 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5377 __ LoadHeapObject(t3, instr->hydrogen()->literals());
5378 __ lw(a1, FieldMemOperand(t3, literal_offset));
5379 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5380 __ Branch(&materialized, ne, a1, Operand(at));
5382 // Create regexp literal using runtime function
5383 // Result will be in v0.
5384 __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5385 __ li(t1, Operand(instr->hydrogen()->pattern()));
5386 __ li(t0, Operand(instr->hydrogen()->flags()));
5387 __ Push(t3, t2, t1, t0);
5388 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5391 __ bind(&materialized);
5392 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5393 Label allocated, runtime_allocate;
5395 __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5398 __ bind(&runtime_allocate);
5399 __ li(a0, Operand(Smi::FromInt(size)));
5401 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5404 __ bind(&allocated);
5405 // Copy the content into the newly allocated memory.
5406 // (Unroll copy loop once for better throughput).
5407 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5408 __ lw(a3, FieldMemOperand(a1, i));
5409 __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
5410 __ sw(a3, FieldMemOperand(v0, i));
5411 __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
5413 if ((size % (2 * kPointerSize)) != 0) {
5414 __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
5415 __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
5420 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5421 ASSERT(ToRegister(instr->context()).is(cp));
5422 // Use the fast case closure allocation code that allocates in new
5423 // space for nested functions that don't need literals cloning.
5424 bool pretenure = instr->hydrogen()->pretenure();
5425 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5426 FastNewClosureStub stub(instr->hydrogen()->language_mode(),
5427 instr->hydrogen()->is_generator());
5428 __ li(a2, Operand(instr->hydrogen()->shared_info()));
5429 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5431 __ li(a2, Operand(instr->hydrogen()->shared_info()));
5432 __ li(a1, Operand(pretenure ? factory()->true_value()
5433 : factory()->false_value()));
5434 __ Push(cp, a2, a1);
5435 CallRuntime(Runtime::kNewClosure, 3, instr);
5440 void LCodeGen::DoTypeof(LTypeof* instr) {
5441 ASSERT(ToRegister(instr->result()).is(v0));
5442 Register input = ToRegister(instr->value());
5444 CallRuntime(Runtime::kTypeof, 1, instr);
5448 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5449 Register input = ToRegister(instr->value());
5451 Register cmp1 = no_reg;
5452 Operand cmp2 = Operand(no_reg);
5454 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5455 instr->FalseLabel(chunk_),
5457 instr->type_literal(),
5461 ASSERT(cmp1.is_valid());
5462 ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
5464 if (final_branch_condition != kNoCondition) {
5465 EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5470 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5473 Handle<String> type_name,
5476 // This function utilizes the delay slot heavily. This is used to load
5477 // values that are always usable without depending on the type of the input
5479 Condition final_branch_condition = kNoCondition;
5480 Register scratch = scratch0();
5481 if (type_name->Equals(heap()->number_string())) {
5482 __ JumpIfSmi(input, true_label);
5483 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5484 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5487 final_branch_condition = eq;
5489 } else if (type_name->Equals(heap()->string_string())) {
5490 __ JumpIfSmi(input, false_label);
5491 __ GetObjectType(input, input, scratch);
5492 __ Branch(USE_DELAY_SLOT, false_label,
5493 ge, scratch, Operand(FIRST_NONSTRING_TYPE));
5494 // input is an object so we can load the BitFieldOffset even if we take the
5496 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5497 __ And(at, at, 1 << Map::kIsUndetectable);
5499 cmp2 = Operand(zero_reg);
5500 final_branch_condition = eq;
5502 } else if (type_name->Equals(heap()->symbol_string())) {
5503 __ JumpIfSmi(input, false_label);
5504 __ GetObjectType(input, input, scratch);
5506 cmp2 = Operand(SYMBOL_TYPE);
5507 final_branch_condition = eq;
5509 } else if (type_name->Equals(heap()->boolean_string())) {
5510 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5511 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5512 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5514 cmp2 = Operand(input);
5515 final_branch_condition = eq;
5517 } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5518 __ LoadRoot(at, Heap::kNullValueRootIndex);
5520 cmp2 = Operand(input);
5521 final_branch_condition = eq;
5523 } else if (type_name->Equals(heap()->undefined_string())) {
5524 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5525 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5526 // The first instruction of JumpIfSmi is an And - it is safe in the delay
5528 __ JumpIfSmi(input, false_label);
5529 // Check for undetectable objects => true.
5530 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5531 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5532 __ And(at, at, 1 << Map::kIsUndetectable);
5534 cmp2 = Operand(zero_reg);
5535 final_branch_condition = ne;
5537 } else if (type_name->Equals(heap()->function_string())) {
5538 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5539 __ JumpIfSmi(input, false_label);
5540 __ GetObjectType(input, scratch, input);
5541 __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
5543 cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
5544 final_branch_condition = eq;
5546 } else if (type_name->Equals(heap()->object_string())) {
5547 __ JumpIfSmi(input, false_label);
5548 if (!FLAG_harmony_typeof) {
5549 __ LoadRoot(at, Heap::kNullValueRootIndex);
5550 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5552 Register map = input;
5553 __ GetObjectType(input, map, scratch);
5554 __ Branch(false_label,
5555 lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
5556 __ Branch(USE_DELAY_SLOT, false_label,
5557 gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
5558 // map is still valid, so the BitField can be loaded in delay slot.
5559 // Check for undetectable objects => false.
5560 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
5561 __ And(at, at, 1 << Map::kIsUndetectable);
5563 cmp2 = Operand(zero_reg);
5564 final_branch_condition = eq;
5568 cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5569 __ Branch(false_label);
5572 return final_branch_condition;
5576 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5577 Register temp1 = ToRegister(instr->temp());
5579 EmitIsConstructCall(temp1, scratch0());
5581 EmitBranch(instr, eq, temp1,
5582 Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5586 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5587 ASSERT(!temp1.is(temp2));
5588 // Get the frame pointer for the calling frame.
5589 __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5591 // Skip the arguments adaptor frame if it exists.
5592 Label check_frame_marker;
5593 __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5594 __ Branch(&check_frame_marker, ne, temp2,
5595 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5596 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5598 // Check the marker in the calling frame.
5599 __ bind(&check_frame_marker);
5600 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5604 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5605 if (info()->IsStub()) return;
5606 // Ensure that we have enough space after the previous lazy-bailout
5607 // instruction for patching the code here.
5608 int current_pc = masm()->pc_offset();
5609 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5610 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5611 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5612 while (padding_size > 0) {
5614 padding_size -= Assembler::kInstrSize;
5620 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5621 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5622 last_lazy_deopt_pc_ = masm()->pc_offset();
5623 ASSERT(instr->HasEnvironment());
5624 LEnvironment* env = instr->environment();
5625 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5626 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5630 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5631 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5632 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5633 // needed return address), even though the implementation of LAZY and EAGER is
5634 // now identical. When LAZY is eventually completely folded into EAGER, remove
5635 // the special case below.
5636 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5637 type = Deoptimizer::LAZY;
5640 Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5641 DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
5645 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5646 // Nothing to see here, move on!
5650 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5651 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5652 LoadContextFromDeferred(instr->context());
5653 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5654 RecordSafepointWithLazyDeopt(
5655 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5656 ASSERT(instr->HasEnvironment());
5657 LEnvironment* env = instr->environment();
5658 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5662 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5663 class DeferredStackCheck V8_FINAL : public LDeferredCode {
5665 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5666 : LDeferredCode(codegen), instr_(instr) { }
5667 virtual void Generate() V8_OVERRIDE {
5668 codegen()->DoDeferredStackCheck(instr_);
5670 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5672 LStackCheck* instr_;
5675 ASSERT(instr->HasEnvironment());
5676 LEnvironment* env = instr->environment();
5677 // There is no LLazyBailout instruction for stack-checks. We have to
5678 // prepare for lazy deoptimization explicitly here.
5679 if (instr->hydrogen()->is_function_entry()) {
5680 // Perform stack overflow check.
5682 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5683 __ Branch(&done, hs, sp, Operand(at));
5684 ASSERT(instr->context()->IsRegister());
5685 ASSERT(ToRegister(instr->context()).is(cp));
5686 CallCode(isolate()->builtins()->StackCheck(),
5687 RelocInfo::CODE_TARGET,
5689 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5690 last_lazy_deopt_pc_ = masm()->pc_offset();
5692 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5693 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5695 ASSERT(instr->hydrogen()->is_backwards_branch());
5696 // Perform stack overflow check if this goto needs it before jumping.
5697 DeferredStackCheck* deferred_stack_check =
5698 new(zone()) DeferredStackCheck(this, instr);
5699 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5700 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5701 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5702 last_lazy_deopt_pc_ = masm()->pc_offset();
5703 __ bind(instr->done_label());
5704 deferred_stack_check->SetExit(instr->done_label());
5705 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5706 // Don't record a deoptimization index for the safepoint here.
5707 // This will be done explicitly when emitting call and the safepoint in
5708 // the deferred code.
5713 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5714 // This is a pseudo-instruction that ensures that the environment here is
5715 // properly registered for deoptimization and records the assembler's PC
5717 LEnvironment* environment = instr->environment();
5719 // If the environment were already registered, we would have no way of
5720 // backpatching it with the spill slot operands.
5721 ASSERT(!environment->HasBeenRegistered());
5722 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5724 GenerateOsrPrologue();
5728 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5729 Register result = ToRegister(instr->result());
5730 Register object = ToRegister(instr->object());
5731 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5732 DeoptimizeIf(eq, instr->environment(), object, Operand(at));
5734 Register null_value = t1;
5735 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5736 DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
5738 __ And(at, object, kSmiTagMask);
5739 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5741 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5742 __ GetObjectType(object, a1, a1);
5743 DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
5745 Label use_cache, call_runtime;
5746 ASSERT(object.is(a0));
5747 __ CheckEnumCache(null_value, &call_runtime);
5749 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5750 __ Branch(&use_cache);
5752 // Get the set of properties to enumerate.
5753 __ bind(&call_runtime);
5755 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5757 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5758 ASSERT(result.is(v0));
5759 __ LoadRoot(at, Heap::kMetaMapRootIndex);
5760 DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
5761 __ bind(&use_cache);
5765 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5766 Register map = ToRegister(instr->map());
5767 Register result = ToRegister(instr->result());
5768 Label load_cache, done;
5769 __ EnumLength(result, map);
5770 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5771 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5774 __ bind(&load_cache);
5775 __ LoadInstanceDescriptors(map, result);
5777 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5779 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5780 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5786 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5787 Register object = ToRegister(instr->value());
5788 Register map = ToRegister(instr->map());
5789 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5790 DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
5794 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5795 Register object = ToRegister(instr->object());
5796 Register index = ToRegister(instr->index());
5797 Register result = ToRegister(instr->result());
5798 Register scratch = scratch0();
5800 Label out_of_object, done;
5801 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5802 __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
5804 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5805 __ Addu(scratch, object, scratch);
5806 __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5810 __ bind(&out_of_object);
5811 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5812 // Index is equal to negated out of object property index plus 1.
5813 __ Subu(scratch, result, scratch);
5814 __ lw(result, FieldMemOperand(scratch,
5815 FixedArray::kHeaderSize - kPointerSize));
5822 } } // namespace v8::internal