1 // Copyright 2012 the V8 project authors. All rights reserved.7
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "src/base/bits.h"
31 #include "src/code-factory.h"
32 #include "src/code-stubs.h"
33 #include "src/cpu-profiler.h"
34 #include "src/hydrogen-osr.h"
35 #include "src/ic/ic.h"
36 #include "src/ic/stub-cache.h"
37 #include "src/mips/lithium-codegen-mips.h"
38 #include "src/mips/lithium-gap-resolver-mips.h"
45 class SafepointGenerator final : public CallWrapper {
47 SafepointGenerator(LCodeGen* codegen,
48 LPointerMap* pointers,
49 Safepoint::DeoptMode mode)
53 virtual ~SafepointGenerator() {}
55 void BeforeCall(int call_size) const override {}
57 void AfterCall() const override {
58 codegen_->RecordSafepoint(pointers_, deopt_mode_);
63 LPointerMap* pointers_;
64 Safepoint::DeoptMode deopt_mode_;
70 bool LCodeGen::GenerateCode() {
71 LPhase phase("Z_Code generation", chunk());
75 // Open a frame scope to indicate that there is a frame on the stack. The
76 // NONE indicates that the scope shouldn't actually generate code to set up
77 // the frame (that is done in GeneratePrologue).
78 FrameScope frame_scope(masm_, StackFrame::NONE);
80 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
81 GenerateJumpTable() && GenerateSafepointTable();
85 void LCodeGen::FinishCode(Handle<Code> code) {
87 code->set_stack_slots(GetStackSlotCount());
88 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
89 PopulateDeoptimizationData(code);
93 void LCodeGen::SaveCallerDoubles() {
94 DCHECK(info()->saves_caller_doubles());
95 DCHECK(NeedsEagerFrame());
96 Comment(";;; Save clobbered callee double registers");
98 BitVector* doubles = chunk()->allocated_double_registers();
99 BitVector::Iterator save_iterator(doubles);
100 while (!save_iterator.Done()) {
101 __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
102 MemOperand(sp, count * kDoubleSize));
103 save_iterator.Advance();
109 void LCodeGen::RestoreCallerDoubles() {
110 DCHECK(info()->saves_caller_doubles());
111 DCHECK(NeedsEagerFrame());
112 Comment(";;; Restore clobbered callee double registers");
113 BitVector* doubles = chunk()->allocated_double_registers();
114 BitVector::Iterator save_iterator(doubles);
116 while (!save_iterator.Done()) {
117 __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
118 MemOperand(sp, count * kDoubleSize));
119 save_iterator.Advance();
125 bool LCodeGen::GeneratePrologue() {
126 DCHECK(is_generating());
128 if (info()->IsOptimizing()) {
129 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
132 if (strlen(FLAG_stop_at) > 0 &&
133 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
138 // a1: Callee's JS function.
139 // cp: Callee's context.
140 // fp: Caller's frame pointer.
143 // Sloppy mode functions and builtins need to replace the receiver with the
144 // global proxy when called as functions (without an explicit receiver
146 if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
147 !info()->is_native() && info()->scope()->has_this_declaration()) {
149 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
150 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
151 __ lw(a2, MemOperand(sp, receiver_offset));
152 __ Branch(&ok, ne, a2, Operand(at));
154 __ lw(a2, GlobalObjectOperand());
155 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
157 __ sw(a2, MemOperand(sp, receiver_offset));
163 info()->set_prologue_offset(masm_->pc_offset());
164 if (NeedsEagerFrame()) {
165 if (info()->IsStub()) {
168 __ Prologue(info()->IsCodePreAgingActive());
170 frame_is_built_ = true;
171 info_->AddNoFrameRange(0, masm_->pc_offset());
174 // Reserve space for the stack slots needed by the code.
175 int slots = GetStackSlotCount();
177 if (FLAG_debug_code) {
178 __ Subu(sp, sp, Operand(slots * kPointerSize));
180 __ Addu(a0, sp, Operand(slots * kPointerSize));
181 __ li(a1, Operand(kSlotsZapValue));
184 __ Subu(a0, a0, Operand(kPointerSize));
185 __ sw(a1, MemOperand(a0, 2 * kPointerSize));
186 __ Branch(&loop, ne, a0, Operand(sp));
189 __ Subu(sp, sp, Operand(slots * kPointerSize));
193 if (info()->saves_caller_doubles()) {
197 // Possibly allocate a local context.
198 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
199 if (heap_slots > 0) {
200 Comment(";;; Allocate local context");
201 bool need_write_barrier = true;
202 // Argument to NewContext is the function, which is in a1.
203 DCHECK(!info()->scope()->is_script_scope());
204 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
205 FastNewContextStub stub(isolate(), heap_slots);
207 // Result of FastNewContextStub is always in new space.
208 need_write_barrier = false;
211 __ CallRuntime(Runtime::kNewFunctionContext, 1);
213 RecordSafepoint(Safepoint::kNoLazyDeopt);
214 // Context is returned in both v0. It replaces the context passed to us.
215 // It's saved in the stack and kept live in cp.
217 __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
218 // Copy any necessary parameters into the context.
219 int num_parameters = scope()->num_parameters();
220 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
221 for (int i = first_parameter; i < num_parameters; i++) {
222 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
223 if (var->IsContextSlot()) {
224 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
225 (num_parameters - 1 - i) * kPointerSize;
226 // Load parameter from stack.
227 __ lw(a0, MemOperand(fp, parameter_offset));
228 // Store it in the context.
229 MemOperand target = ContextOperand(cp, var->index());
231 // Update the write barrier. This clobbers a3 and a0.
232 if (need_write_barrier) {
233 __ RecordWriteContextSlot(
234 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
235 } else if (FLAG_debug_code) {
237 __ JumpIfInNewSpace(cp, a0, &done);
238 __ Abort(kExpectedNewSpaceObject);
243 Comment(";;; End allocate local context");
247 if (FLAG_trace && info()->IsOptimizing()) {
248 // We have not executed any compiled code yet, so cp still holds the
250 __ CallRuntime(Runtime::kTraceEnter, 0);
252 return !is_aborted();
256 void LCodeGen::GenerateOsrPrologue() {
257 // Generate the OSR entry prologue at the first unknown OSR value, or if there
258 // are none, at the OSR entrypoint instruction.
259 if (osr_pc_offset_ >= 0) return;
261 osr_pc_offset_ = masm()->pc_offset();
263 // Adjust the frame size, subsuming the unoptimized frame into the
265 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
267 __ Subu(sp, sp, Operand(slots * kPointerSize));
271 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
272 if (instr->IsCall()) {
273 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
275 if (!instr->IsLazyBailout() && !instr->IsGap()) {
276 safepoints_.BumpLastLazySafepointIndex();
281 bool LCodeGen::GenerateDeferredCode() {
282 DCHECK(is_generating());
283 if (deferred_.length() > 0) {
284 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
285 LDeferredCode* code = deferred_[i];
288 instructions_->at(code->instruction_index())->hydrogen_value();
289 RecordAndWritePosition(
290 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
292 Comment(";;; <@%d,#%d> "
293 "-------------------- Deferred %s --------------------",
294 code->instruction_index(),
295 code->instr()->hydrogen_value()->id(),
296 code->instr()->Mnemonic());
297 __ bind(code->entry());
298 if (NeedsDeferredFrame()) {
299 Comment(";;; Build frame");
300 DCHECK(!frame_is_built_);
301 DCHECK(info()->IsStub());
302 frame_is_built_ = true;
303 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
304 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
306 __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
307 Comment(";;; Deferred code");
310 if (NeedsDeferredFrame()) {
311 Comment(";;; Destroy frame");
312 DCHECK(frame_is_built_);
314 __ MultiPop(cp.bit() | fp.bit() | ra.bit());
315 frame_is_built_ = false;
317 __ jmp(code->exit());
320 // Deferred code is the last part of the instruction sequence. Mark
321 // the generated code as done unless we bailed out.
322 if (!is_aborted()) status_ = DONE;
323 return !is_aborted();
327 bool LCodeGen::GenerateJumpTable() {
328 if (jump_table_.length() > 0) {
329 Label needs_frame, call_deopt_entry;
331 Comment(";;; -------------------- Jump table --------------------");
332 Address base = jump_table_[0].address;
334 Register entry_offset = t9;
336 int length = jump_table_.length();
337 for (int i = 0; i < length; i++) {
338 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
339 __ bind(&table_entry->label);
341 DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
342 Address entry = table_entry->address;
343 DeoptComment(table_entry->deopt_info);
345 // Second-level deopt table entries are contiguous and small, so instead
346 // of loading the full, absolute address of each one, load an immediate
347 // offset which will be added to the base address later.
348 __ li(entry_offset, Operand(entry - base));
350 if (table_entry->needs_frame) {
351 DCHECK(!info()->saves_caller_doubles());
352 Comment(";;; call deopt with frame");
353 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
354 __ Call(&needs_frame);
356 __ Call(&call_deopt_entry);
358 info()->LogDeoptCallPosition(masm()->pc_offset(),
359 table_entry->deopt_info.inlining_id);
362 if (needs_frame.is_linked()) {
363 __ bind(&needs_frame);
364 // This variant of deopt can only be used with stubs. Since we don't
365 // have a function pointer to install in the stack frame that we're
366 // building, install a special marker there instead.
367 DCHECK(info()->IsStub());
368 __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
370 __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
373 Comment(";;; call deopt");
374 __ bind(&call_deopt_entry);
376 if (info()->saves_caller_doubles()) {
377 DCHECK(info()->IsStub());
378 RestoreCallerDoubles();
381 // Add the base address to the offset previously loaded in entry_offset.
382 __ Addu(entry_offset, entry_offset,
383 Operand(ExternalReference::ForDeoptEntry(base)));
384 __ Jump(entry_offset);
386 __ RecordComment("]");
388 // The deoptimization jump table is the last part of the instruction
389 // sequence. Mark the generated code as done unless we bailed out.
390 if (!is_aborted()) status_ = DONE;
391 return !is_aborted();
395 bool LCodeGen::GenerateSafepointTable() {
397 safepoints_.Emit(masm(), GetStackSlotCount());
398 return !is_aborted();
402 Register LCodeGen::ToRegister(int index) const {
403 return Register::FromAllocationIndex(index);
407 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
408 return DoubleRegister::FromAllocationIndex(index);
412 Register LCodeGen::ToRegister(LOperand* op) const {
413 DCHECK(op->IsRegister());
414 return ToRegister(op->index());
418 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
419 if (op->IsRegister()) {
420 return ToRegister(op->index());
421 } else if (op->IsConstantOperand()) {
422 LConstantOperand* const_op = LConstantOperand::cast(op);
423 HConstant* constant = chunk_->LookupConstant(const_op);
424 Handle<Object> literal = constant->handle(isolate());
425 Representation r = chunk_->LookupLiteralRepresentation(const_op);
426 if (r.IsInteger32()) {
427 DCHECK(literal->IsNumber());
428 __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
429 } else if (r.IsSmi()) {
430 DCHECK(constant->HasSmiValue());
431 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
432 } else if (r.IsDouble()) {
433 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
435 DCHECK(r.IsSmiOrTagged());
436 __ li(scratch, literal);
439 } else if (op->IsStackSlot()) {
440 __ lw(scratch, ToMemOperand(op));
448 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
449 DCHECK(op->IsDoubleRegister());
450 return ToDoubleRegister(op->index());
454 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
455 FloatRegister flt_scratch,
456 DoubleRegister dbl_scratch) {
457 if (op->IsDoubleRegister()) {
458 return ToDoubleRegister(op->index());
459 } else if (op->IsConstantOperand()) {
460 LConstantOperand* const_op = LConstantOperand::cast(op);
461 HConstant* constant = chunk_->LookupConstant(const_op);
462 Handle<Object> literal = constant->handle(isolate());
463 Representation r = chunk_->LookupLiteralRepresentation(const_op);
464 if (r.IsInteger32()) {
465 DCHECK(literal->IsNumber());
466 __ li(at, Operand(static_cast<int32_t>(literal->Number())));
467 __ mtc1(at, flt_scratch);
468 __ cvt_d_w(dbl_scratch, flt_scratch);
470 } else if (r.IsDouble()) {
471 Abort(kUnsupportedDoubleImmediate);
472 } else if (r.IsTagged()) {
473 Abort(kUnsupportedTaggedImmediate);
475 } else if (op->IsStackSlot()) {
476 MemOperand mem_op = ToMemOperand(op);
477 __ ldc1(dbl_scratch, mem_op);
485 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
486 HConstant* constant = chunk_->LookupConstant(op);
487 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
488 return constant->handle(isolate());
492 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
493 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
497 bool LCodeGen::IsSmi(LConstantOperand* op) const {
498 return chunk_->LookupLiteralRepresentation(op).IsSmi();
502 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
503 return ToRepresentation(op, Representation::Integer32());
507 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
508 const Representation& r) const {
509 HConstant* constant = chunk_->LookupConstant(op);
510 int32_t value = constant->Integer32Value();
511 if (r.IsInteger32()) return value;
512 DCHECK(r.IsSmiOrTagged());
513 return reinterpret_cast<int32_t>(Smi::FromInt(value));
517 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
518 HConstant* constant = chunk_->LookupConstant(op);
519 return Smi::FromInt(constant->Integer32Value());
523 double LCodeGen::ToDouble(LConstantOperand* op) const {
524 HConstant* constant = chunk_->LookupConstant(op);
525 DCHECK(constant->HasDoubleValue());
526 return constant->DoubleValue();
530 Operand LCodeGen::ToOperand(LOperand* op) {
531 if (op->IsConstantOperand()) {
532 LConstantOperand* const_op = LConstantOperand::cast(op);
533 HConstant* constant = chunk()->LookupConstant(const_op);
534 Representation r = chunk_->LookupLiteralRepresentation(const_op);
536 DCHECK(constant->HasSmiValue());
537 return Operand(Smi::FromInt(constant->Integer32Value()));
538 } else if (r.IsInteger32()) {
539 DCHECK(constant->HasInteger32Value());
540 return Operand(constant->Integer32Value());
541 } else if (r.IsDouble()) {
542 Abort(kToOperandUnsupportedDoubleImmediate);
544 DCHECK(r.IsTagged());
545 return Operand(constant->handle(isolate()));
546 } else if (op->IsRegister()) {
547 return Operand(ToRegister(op));
548 } else if (op->IsDoubleRegister()) {
549 Abort(kToOperandIsDoubleRegisterUnimplemented);
552 // Stack slots not implemented, use ToMemOperand instead.
558 static int ArgumentsOffsetWithoutFrame(int index) {
560 return -(index + 1) * kPointerSize;
564 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
565 DCHECK(!op->IsRegister());
566 DCHECK(!op->IsDoubleRegister());
567 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
568 if (NeedsEagerFrame()) {
569 return MemOperand(fp, StackSlotOffset(op->index()));
571 // Retrieve parameter without eager stack-frame relative to the
573 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
578 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
579 DCHECK(op->IsDoubleStackSlot());
580 if (NeedsEagerFrame()) {
581 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
583 // Retrieve parameter without eager stack-frame relative to the
586 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
591 void LCodeGen::WriteTranslation(LEnvironment* environment,
592 Translation* translation) {
593 if (environment == NULL) return;
595 // The translation includes one command per value in the environment.
596 int translation_size = environment->translation_size();
597 // The output frame height does not include the parameters.
598 int height = translation_size - environment->parameter_count();
600 WriteTranslation(environment->outer(), translation);
601 bool has_closure_id = !info()->closure().is_null() &&
602 !info()->closure().is_identical_to(environment->closure());
603 int closure_id = has_closure_id
604 ? DefineDeoptimizationLiteral(environment->closure())
605 : Translation::kSelfLiteralId;
607 switch (environment->frame_type()) {
609 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
612 translation->BeginConstructStubFrame(closure_id, translation_size);
615 DCHECK(translation_size == 1);
617 translation->BeginGetterStubFrame(closure_id);
620 DCHECK(translation_size == 2);
622 translation->BeginSetterStubFrame(closure_id);
625 translation->BeginCompiledStubFrame(translation_size);
627 case ARGUMENTS_ADAPTOR:
628 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
632 int object_index = 0;
633 int dematerialized_index = 0;
634 for (int i = 0; i < translation_size; ++i) {
635 LOperand* value = environment->values()->at(i);
636 AddToTranslation(environment,
639 environment->HasTaggedValueAt(i),
640 environment->HasUint32ValueAt(i),
642 &dematerialized_index);
647 void LCodeGen::AddToTranslation(LEnvironment* environment,
648 Translation* translation,
652 int* object_index_pointer,
653 int* dematerialized_index_pointer) {
654 if (op == LEnvironment::materialization_marker()) {
655 int object_index = (*object_index_pointer)++;
656 if (environment->ObjectIsDuplicateAt(object_index)) {
657 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
658 translation->DuplicateObject(dupe_of);
661 int object_length = environment->ObjectLengthAt(object_index);
662 if (environment->ObjectIsArgumentsAt(object_index)) {
663 translation->BeginArgumentsObject(object_length);
665 translation->BeginCapturedObject(object_length);
667 int dematerialized_index = *dematerialized_index_pointer;
668 int env_offset = environment->translation_size() + dematerialized_index;
669 *dematerialized_index_pointer += object_length;
670 for (int i = 0; i < object_length; ++i) {
671 LOperand* value = environment->values()->at(env_offset + i);
672 AddToTranslation(environment,
675 environment->HasTaggedValueAt(env_offset + i),
676 environment->HasUint32ValueAt(env_offset + i),
677 object_index_pointer,
678 dematerialized_index_pointer);
683 if (op->IsStackSlot()) {
685 translation->StoreStackSlot(op->index());
686 } else if (is_uint32) {
687 translation->StoreUint32StackSlot(op->index());
689 translation->StoreInt32StackSlot(op->index());
691 } else if (op->IsDoubleStackSlot()) {
692 translation->StoreDoubleStackSlot(op->index());
693 } else if (op->IsRegister()) {
694 Register reg = ToRegister(op);
696 translation->StoreRegister(reg);
697 } else if (is_uint32) {
698 translation->StoreUint32Register(reg);
700 translation->StoreInt32Register(reg);
702 } else if (op->IsDoubleRegister()) {
703 DoubleRegister reg = ToDoubleRegister(op);
704 translation->StoreDoubleRegister(reg);
705 } else if (op->IsConstantOperand()) {
706 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
707 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
708 translation->StoreLiteral(src_index);
715 void LCodeGen::CallCode(Handle<Code> code,
716 RelocInfo::Mode mode,
717 LInstruction* instr) {
718 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
722 void LCodeGen::CallCodeGeneric(Handle<Code> code,
723 RelocInfo::Mode mode,
725 SafepointMode safepoint_mode) {
726 DCHECK(instr != NULL);
728 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
732 void LCodeGen::CallRuntime(const Runtime::Function* function,
735 SaveFPRegsMode save_doubles) {
736 DCHECK(instr != NULL);
738 __ CallRuntime(function, num_arguments, save_doubles);
740 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
744 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
745 if (context->IsRegister()) {
746 __ Move(cp, ToRegister(context));
747 } else if (context->IsStackSlot()) {
748 __ lw(cp, ToMemOperand(context));
749 } else if (context->IsConstantOperand()) {
750 HConstant* constant =
751 chunk_->LookupConstant(LConstantOperand::cast(context));
752 __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
759 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
763 LoadContextFromDeferred(context);
764 __ CallRuntimeSaveDoubles(id);
765 RecordSafepointWithRegisters(
766 instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
770 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
771 Safepoint::DeoptMode mode) {
772 environment->set_has_been_used();
773 if (!environment->HasBeenRegistered()) {
774 // Physical stack frame layout:
775 // -x ............. -4 0 ..................................... y
776 // [incoming arguments] [spill slots] [pushed outgoing arguments]
778 // Layout of the environment:
779 // 0 ..................................................... size-1
780 // [parameters] [locals] [expression stack including arguments]
782 // Layout of the translation:
783 // 0 ........................................................ size - 1 + 4
784 // [expression stack including arguments] [locals] [4 words] [parameters]
785 // |>------------ translation_size ------------<|
788 int jsframe_count = 0;
789 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
791 if (e->frame_type() == JS_FUNCTION) {
795 Translation translation(&translations_, frame_count, jsframe_count, zone());
796 WriteTranslation(environment, &translation);
797 int deoptimization_index = deoptimizations_.length();
798 int pc_offset = masm()->pc_offset();
799 environment->Register(deoptimization_index,
801 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
802 deoptimizations_.Add(environment, zone());
807 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
808 Deoptimizer::DeoptReason deopt_reason,
809 Deoptimizer::BailoutType bailout_type,
810 Register src1, const Operand& src2) {
811 LEnvironment* environment = instr->environment();
812 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
813 DCHECK(environment->HasBeenRegistered());
814 int id = environment->deoptimization_index();
815 DCHECK(info()->IsOptimizing() || info()->IsStub());
817 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
819 Abort(kBailoutWasNotPrepared);
823 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
824 Register scratch = scratch0();
825 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
827 __ Push(a1, scratch);
828 __ li(scratch, Operand(count));
829 __ lw(a1, MemOperand(scratch));
830 __ Subu(a1, a1, Operand(1));
831 __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
832 __ li(a1, Operand(FLAG_deopt_every_n_times));
833 __ sw(a1, MemOperand(scratch));
836 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
838 __ sw(a1, MemOperand(scratch));
842 if (info()->ShouldTrapOnDeopt()) {
844 if (condition != al) {
845 __ Branch(&skip, NegateCondition(condition), src1, src2);
847 __ stop("trap_on_deopt");
851 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
853 DCHECK(info()->IsStub() || frame_is_built_);
854 // Go through jump table if we need to handle condition, build frame, or
855 // restore caller doubles.
856 if (condition == al && frame_is_built_ &&
857 !info()->saves_caller_doubles()) {
858 DeoptComment(deopt_info);
859 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
860 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
862 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
864 // We often have several deopts to the same entry, reuse the last
865 // jump entry if this is the case.
866 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
867 jump_table_.is_empty() ||
868 !table_entry.IsEquivalentTo(jump_table_.last())) {
869 jump_table_.Add(table_entry, zone());
871 __ Branch(&jump_table_.last().label, condition, src1, src2);
876 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
877 Deoptimizer::DeoptReason deopt_reason,
878 Register src1, const Operand& src2) {
879 Deoptimizer::BailoutType bailout_type = info()->IsStub()
881 : Deoptimizer::EAGER;
882 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
886 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
887 int length = deoptimizations_.length();
888 if (length == 0) return;
889 Handle<DeoptimizationInputData> data =
890 DeoptimizationInputData::New(isolate(), length, TENURED);
892 Handle<ByteArray> translations =
893 translations_.CreateByteArray(isolate()->factory());
894 data->SetTranslationByteArray(*translations);
895 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
896 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
897 if (info_->IsOptimizing()) {
898 // Reference to shared function info does not change between phases.
899 AllowDeferredHandleDereference allow_handle_dereference;
900 data->SetSharedFunctionInfo(*info_->shared_info());
902 data->SetSharedFunctionInfo(Smi::FromInt(0));
904 data->SetWeakCellCache(Smi::FromInt(0));
906 Handle<FixedArray> literals =
907 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
908 { AllowDeferredHandleDereference copy_handles;
909 for (int i = 0; i < deoptimization_literals_.length(); i++) {
910 literals->set(i, *deoptimization_literals_[i]);
912 data->SetLiteralArray(*literals);
915 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
916 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
918 // Populate the deoptimization entries.
919 for (int i = 0; i < length; i++) {
920 LEnvironment* env = deoptimizations_[i];
921 data->SetAstId(i, env->ast_id());
922 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
923 data->SetArgumentsStackHeight(i,
924 Smi::FromInt(env->arguments_stack_height()));
925 data->SetPc(i, Smi::FromInt(env->pc_offset()));
927 code->set_deoptimization_data(*data);
931 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
932 int result = deoptimization_literals_.length();
933 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
934 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
936 deoptimization_literals_.Add(literal, zone());
941 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
942 DCHECK_EQ(0, deoptimization_literals_.length());
943 for (auto function : chunk()->inlined_functions()) {
944 DefineDeoptimizationLiteral(function);
946 inlined_function_count_ = deoptimization_literals_.length();
950 void LCodeGen::RecordSafepointWithLazyDeopt(
951 LInstruction* instr, SafepointMode safepoint_mode) {
952 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
953 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
955 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
956 RecordSafepointWithRegisters(
957 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
962 void LCodeGen::RecordSafepoint(
963 LPointerMap* pointers,
964 Safepoint::Kind kind,
966 Safepoint::DeoptMode deopt_mode) {
967 DCHECK(expected_safepoint_kind_ == kind);
969 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
970 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
971 kind, arguments, deopt_mode);
972 for (int i = 0; i < operands->length(); i++) {
973 LOperand* pointer = operands->at(i);
974 if (pointer->IsStackSlot()) {
975 safepoint.DefinePointerSlot(pointer->index(), zone());
976 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
977 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
983 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
984 Safepoint::DeoptMode deopt_mode) {
985 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
989 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
990 LPointerMap empty_pointers(zone());
991 RecordSafepoint(&empty_pointers, deopt_mode);
995 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
997 Safepoint::DeoptMode deopt_mode) {
999 pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
1003 void LCodeGen::RecordAndWritePosition(int position) {
1004 if (position == RelocInfo::kNoPosition) return;
1005 masm()->positions_recorder()->RecordPosition(position);
1006 masm()->positions_recorder()->WriteRecordedPositions();
1010 static const char* LabelType(LLabel* label) {
1011 if (label->is_loop_header()) return " (loop header)";
1012 if (label->is_osr_entry()) return " (OSR entry)";
1017 void LCodeGen::DoLabel(LLabel* label) {
1018 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1019 current_instruction_,
1020 label->hydrogen_value()->id(),
1023 __ bind(label->label());
1024 current_block_ = label->block_id();
1029 void LCodeGen::DoParallelMove(LParallelMove* move) {
1030 resolver_.Resolve(move);
1034 void LCodeGen::DoGap(LGap* gap) {
1035 for (int i = LGap::FIRST_INNER_POSITION;
1036 i <= LGap::LAST_INNER_POSITION;
1038 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1039 LParallelMove* move = gap->GetParallelMove(inner_pos);
1040 if (move != NULL) DoParallelMove(move);
1045 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1050 void LCodeGen::DoParameter(LParameter* instr) {
1055 void LCodeGen::DoCallStub(LCallStub* instr) {
1056 DCHECK(ToRegister(instr->context()).is(cp));
1057 DCHECK(ToRegister(instr->result()).is(v0));
1058 switch (instr->hydrogen()->major_key()) {
1059 case CodeStub::RegExpExec: {
1060 RegExpExecStub stub(isolate());
1061 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1064 case CodeStub::SubString: {
1065 SubStringStub stub(isolate());
1066 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1069 case CodeStub::StringCompare: {
1070 StringCompareStub stub(isolate());
1071 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1080 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1081 GenerateOsrPrologue();
1085 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1086 Register dividend = ToRegister(instr->dividend());
1087 int32_t divisor = instr->divisor();
1088 DCHECK(dividend.is(ToRegister(instr->result())));
1090 // Theoretically, a variation of the branch-free code for integer division by
1091 // a power of 2 (calculating the remainder via an additional multiplication
1092 // (which gets simplified to an 'and') and subtraction) should be faster, and
1093 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1094 // indicate that positive dividends are heavily favored, so the branching
1095 // version performs better.
1096 HMod* hmod = instr->hydrogen();
1097 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1098 Label dividend_is_not_negative, done;
1100 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1101 __ Branch(÷nd_is_not_negative, ge, dividend, Operand(zero_reg));
1102 // Note: The code below even works when right contains kMinInt.
1103 __ subu(dividend, zero_reg, dividend);
1104 __ And(dividend, dividend, Operand(mask));
1105 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1106 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1109 __ Branch(USE_DELAY_SLOT, &done);
1110 __ subu(dividend, zero_reg, dividend);
1113 __ bind(÷nd_is_not_negative);
1114 __ And(dividend, dividend, Operand(mask));
1119 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1120 Register dividend = ToRegister(instr->dividend());
1121 int32_t divisor = instr->divisor();
1122 Register result = ToRegister(instr->result());
1123 DCHECK(!dividend.is(result));
1126 DeoptimizeIf(al, instr);
1130 __ TruncatingDiv(result, dividend, Abs(divisor));
1131 __ Mul(result, result, Operand(Abs(divisor)));
1132 __ Subu(result, dividend, Operand(result));
1134 // Check for negative zero.
1135 HMod* hmod = instr->hydrogen();
1136 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1137 Label remainder_not_zero;
1138 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
1139 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
1141 __ bind(&remainder_not_zero);
1146 void LCodeGen::DoModI(LModI* instr) {
1147 HMod* hmod = instr->hydrogen();
1148 const Register left_reg = ToRegister(instr->left());
1149 const Register right_reg = ToRegister(instr->right());
1150 const Register result_reg = ToRegister(instr->result());
1152 // div runs in the background while we check for special cases.
1153 __ Mod(result_reg, left_reg, right_reg);
1156 // Check for x % 0, we have to deopt in this case because we can't return a
1158 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1159 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
1163 // Check for kMinInt % -1, div will return kMinInt, which is not what we
1164 // want. We have to deopt if we care about -0, because we can't return that.
1165 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1166 Label no_overflow_possible;
1167 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1168 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1169 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
1171 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1172 __ Branch(USE_DELAY_SLOT, &done);
1173 __ mov(result_reg, zero_reg);
1175 __ bind(&no_overflow_possible);
1178 // If we care about -0, test if the dividend is <0 and the result is 0.
1179 __ Branch(&done, ge, left_reg, Operand(zero_reg));
1180 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1181 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
1188 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1189 Register dividend = ToRegister(instr->dividend());
1190 int32_t divisor = instr->divisor();
1191 Register result = ToRegister(instr->result());
1192 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1193 DCHECK(!result.is(dividend));
1195 // Check for (0 / -x) that will produce negative zero.
1196 HDiv* hdiv = instr->hydrogen();
1197 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1198 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1201 // Check for (kMinInt / -1).
1202 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1203 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
1205 // Deoptimize if remainder will not be 0.
1206 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1207 divisor != 1 && divisor != -1) {
1208 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1209 __ And(at, dividend, Operand(mask));
1210 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
1213 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1214 __ Subu(result, zero_reg, dividend);
1217 uint16_t shift = WhichPowerOf2Abs(divisor);
1219 __ Move(result, dividend);
1220 } else if (shift == 1) {
1221 __ srl(result, dividend, 31);
1222 __ Addu(result, dividend, Operand(result));
1224 __ sra(result, dividend, 31);
1225 __ srl(result, result, 32 - shift);
1226 __ Addu(result, dividend, Operand(result));
1228 if (shift > 0) __ sra(result, result, shift);
1229 if (divisor < 0) __ Subu(result, zero_reg, result);
1233 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1234 Register dividend = ToRegister(instr->dividend());
1235 int32_t divisor = instr->divisor();
1236 Register result = ToRegister(instr->result());
1237 DCHECK(!dividend.is(result));
1240 DeoptimizeIf(al, instr);
1244 // Check for (0 / -x) that will produce negative zero.
1245 HDiv* hdiv = instr->hydrogen();
1246 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1247 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1251 __ TruncatingDiv(result, dividend, Abs(divisor));
1252 if (divisor < 0) __ Subu(result, zero_reg, result);
1254 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1255 __ Mul(scratch0(), result, Operand(divisor));
1256 __ Subu(scratch0(), scratch0(), dividend);
1257 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
1263 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1264 void LCodeGen::DoDivI(LDivI* instr) {
1265 HBinaryOperation* hdiv = instr->hydrogen();
1266 Register dividend = ToRegister(instr->dividend());
1267 Register divisor = ToRegister(instr->divisor());
1268 const Register result = ToRegister(instr->result());
1269 Register remainder = ToRegister(instr->temp());
1271 // On MIPS div is asynchronous - it will run in the background while we
1272 // check for special cases.
1273 __ Div(remainder, result, dividend, divisor);
1276 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1277 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1281 // Check for (0 / -x) that will produce negative zero.
1282 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1283 Label left_not_zero;
1284 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1285 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1287 __ bind(&left_not_zero);
1290 // Check for (kMinInt / -1).
1291 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1292 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1293 Label left_not_min_int;
1294 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1295 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1296 __ bind(&left_not_min_int);
1299 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1300 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
1306 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1307 DoubleRegister addend = ToDoubleRegister(instr->addend());
1308 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1309 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1311 // This is computed in-place.
1312 DCHECK(addend.is(ToDoubleRegister(instr->result())));
1314 __ madd_d(addend, addend, multiplier, multiplicand);
1318 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1319 Register dividend = ToRegister(instr->dividend());
1320 Register result = ToRegister(instr->result());
1321 int32_t divisor = instr->divisor();
1322 Register scratch = result.is(dividend) ? scratch0() : dividend;
1323 DCHECK(!result.is(dividend) || !scratch.is(dividend));
1325 // If the divisor is 1, return the dividend.
1327 __ Move(result, dividend);
1331 // If the divisor is positive, things are easy: There can be no deopts and we
1332 // can simply do an arithmetic right shift.
1333 uint16_t shift = WhichPowerOf2Abs(divisor);
1335 __ sra(result, dividend, shift);
1339 // If the divisor is negative, we have to negate and handle edge cases.
1341 // dividend can be the same register as result so save the value of it
1342 // for checking overflow.
1343 __ Move(scratch, dividend);
1345 __ Subu(result, zero_reg, dividend);
1346 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1347 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
1350 // Dividing by -1 is basically negation, unless we overflow.
1351 __ Xor(scratch, scratch, result);
1352 if (divisor == -1) {
1353 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1354 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
1360 // If the negation could not overflow, simply shifting is OK.
1361 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1362 __ sra(result, result, shift);
1366 Label no_overflow, done;
1367 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1368 __ li(result, Operand(kMinInt / divisor));
1370 __ bind(&no_overflow);
1371 __ sra(result, result, shift);
1376 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1377 Register dividend = ToRegister(instr->dividend());
1378 int32_t divisor = instr->divisor();
1379 Register result = ToRegister(instr->result());
1380 DCHECK(!dividend.is(result));
1383 DeoptimizeIf(al, instr);
1387 // Check for (0 / -x) that will produce negative zero.
1388 HMathFloorOfDiv* hdiv = instr->hydrogen();
1389 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1390 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
1394 // Easy case: We need no dynamic check for the dividend and the flooring
1395 // division is the same as the truncating division.
1396 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1397 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1398 __ TruncatingDiv(result, dividend, Abs(divisor));
1399 if (divisor < 0) __ Subu(result, zero_reg, result);
1403 // In the general case we may need to adjust before and after the truncating
1404 // division to get a flooring division.
1405 Register temp = ToRegister(instr->temp());
1406 DCHECK(!temp.is(dividend) && !temp.is(result));
1407 Label needs_adjustment, done;
1408 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1409 dividend, Operand(zero_reg));
1410 __ TruncatingDiv(result, dividend, Abs(divisor));
1411 if (divisor < 0) __ Subu(result, zero_reg, result);
1413 __ bind(&needs_adjustment);
1414 __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1415 __ TruncatingDiv(result, temp, Abs(divisor));
1416 if (divisor < 0) __ Subu(result, zero_reg, result);
1417 __ Subu(result, result, Operand(1));
1422 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1423 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1424 HBinaryOperation* hdiv = instr->hydrogen();
1425 Register dividend = ToRegister(instr->dividend());
1426 Register divisor = ToRegister(instr->divisor());
1427 const Register result = ToRegister(instr->result());
1428 Register remainder = scratch0();
1429 // On MIPS div is asynchronous - it will run in the background while we
1430 // check for special cases.
1431 __ Div(remainder, result, dividend, divisor);
1434 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1435 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
1439 // Check for (0 / -x) that will produce negative zero.
1440 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1441 Label left_not_zero;
1442 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1443 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
1445 __ bind(&left_not_zero);
1448 // Check for (kMinInt / -1).
1449 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1450 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1451 Label left_not_min_int;
1452 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1453 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
1454 __ bind(&left_not_min_int);
1457 // We performed a truncating division. Correct the result if necessary.
1459 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1460 __ Xor(remainder, remainder, Operand(divisor));
1461 __ Branch(&done, ge, remainder, Operand(zero_reg));
1462 __ Subu(result, result, Operand(1));
1467 void LCodeGen::DoMulI(LMulI* instr) {
1468 Register scratch = scratch0();
1469 Register result = ToRegister(instr->result());
1470 // Note that result may alias left.
1471 Register left = ToRegister(instr->left());
1472 LOperand* right_op = instr->right();
1474 bool bailout_on_minus_zero =
1475 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1476 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1478 if (right_op->IsConstantOperand()) {
1479 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1481 if (bailout_on_minus_zero && (constant < 0)) {
1482 // The case of a null constant will be handled separately.
1483 // If constant is negative and left is null, the result should be -0.
1484 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
1490 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1491 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
1494 __ Subu(result, zero_reg, left);
1498 if (bailout_on_minus_zero) {
1499 // If left is strictly negative and the constant is null, the
1500 // result is -0. Deoptimize if required, otherwise return 0.
1501 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
1504 __ mov(result, zero_reg);
1508 __ Move(result, left);
1511 // Multiplying by powers of two and powers of two plus or minus
1512 // one can be done faster with shifted operands.
1513 // For other constants we emit standard code.
1514 int32_t mask = constant >> 31;
1515 uint32_t constant_abs = (constant + mask) ^ mask;
1517 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1518 int32_t shift = WhichPowerOf2(constant_abs);
1519 __ sll(result, left, shift);
1520 // Correct the sign of the result if the constant is negative.
1521 if (constant < 0) __ Subu(result, zero_reg, result);
1522 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1523 int32_t shift = WhichPowerOf2(constant_abs - 1);
1524 __ sll(scratch, left, shift);
1525 __ Addu(result, scratch, left);
1526 // Correct the sign of the result if the constant is negative.
1527 if (constant < 0) __ Subu(result, zero_reg, result);
1528 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1529 int32_t shift = WhichPowerOf2(constant_abs + 1);
1530 __ sll(scratch, left, shift);
1531 __ Subu(result, scratch, left);
1532 // Correct the sign of the result if the constant is negative.
1533 if (constant < 0) __ Subu(result, zero_reg, result);
1535 // Generate standard code.
1536 __ li(at, constant);
1537 __ Mul(result, left, at);
1542 DCHECK(right_op->IsRegister());
1543 Register right = ToRegister(right_op);
1546 // hi:lo = left * right.
1547 if (instr->hydrogen()->representation().IsSmi()) {
1548 __ SmiUntag(result, left);
1549 __ Mul(scratch, result, result, right);
1551 __ Mul(scratch, result, left, right);
1553 __ sra(at, result, 31);
1554 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
1556 if (instr->hydrogen()->representation().IsSmi()) {
1557 __ SmiUntag(result, left);
1558 __ Mul(result, result, right);
1560 __ Mul(result, left, right);
1564 if (bailout_on_minus_zero) {
1566 __ Xor(at, left, right);
1567 __ Branch(&done, ge, at, Operand(zero_reg));
1568 // Bail out if the result is minus zero.
1569 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
1577 void LCodeGen::DoBitI(LBitI* instr) {
1578 LOperand* left_op = instr->left();
1579 LOperand* right_op = instr->right();
1580 DCHECK(left_op->IsRegister());
1581 Register left = ToRegister(left_op);
1582 Register result = ToRegister(instr->result());
1583 Operand right(no_reg);
1585 if (right_op->IsStackSlot()) {
1586 right = Operand(EmitLoadRegister(right_op, at));
1588 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1589 right = ToOperand(right_op);
1592 switch (instr->op()) {
1593 case Token::BIT_AND:
1594 __ And(result, left, right);
1597 __ Or(result, left, right);
1599 case Token::BIT_XOR:
1600 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1601 __ Nor(result, zero_reg, left);
1603 __ Xor(result, left, right);
1613 void LCodeGen::DoShiftI(LShiftI* instr) {
1614 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1615 // result may alias either of them.
1616 LOperand* right_op = instr->right();
1617 Register left = ToRegister(instr->left());
1618 Register result = ToRegister(instr->result());
1619 Register scratch = scratch0();
1621 if (right_op->IsRegister()) {
1622 // No need to mask the right operand on MIPS, it is built into the variable
1623 // shift instructions.
1624 switch (instr->op()) {
1626 __ Ror(result, left, Operand(ToRegister(right_op)));
1629 __ srav(result, left, ToRegister(right_op));
1632 __ srlv(result, left, ToRegister(right_op));
1633 if (instr->can_deopt()) {
1634 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
1639 __ sllv(result, left, ToRegister(right_op));
1646 // Mask the right_op operand.
1647 int value = ToInteger32(LConstantOperand::cast(right_op));
1648 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1649 switch (instr->op()) {
1651 if (shift_count != 0) {
1652 __ Ror(result, left, Operand(shift_count));
1654 __ Move(result, left);
1658 if (shift_count != 0) {
1659 __ sra(result, left, shift_count);
1661 __ Move(result, left);
1665 if (shift_count != 0) {
1666 __ srl(result, left, shift_count);
1668 if (instr->can_deopt()) {
1669 __ And(at, left, Operand(0x80000000));
1670 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
1673 __ Move(result, left);
1677 if (shift_count != 0) {
1678 if (instr->hydrogen_value()->representation().IsSmi() &&
1679 instr->can_deopt()) {
1680 if (shift_count != 1) {
1681 __ sll(result, left, shift_count - 1);
1682 __ SmiTagCheckOverflow(result, result, scratch);
1684 __ SmiTagCheckOverflow(result, left, scratch);
1686 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
1689 __ sll(result, left, shift_count);
1692 __ Move(result, left);
1703 void LCodeGen::DoSubI(LSubI* instr) {
1704 LOperand* left = instr->left();
1705 LOperand* right = instr->right();
1706 LOperand* result = instr->result();
1707 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1709 if (!can_overflow) {
1710 if (right->IsStackSlot()) {
1711 Register right_reg = EmitLoadRegister(right, at);
1712 __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1714 DCHECK(right->IsRegister() || right->IsConstantOperand());
1715 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1717 } else { // can_overflow.
1718 Register overflow = scratch0();
1719 Register scratch = scratch1();
1720 if (right->IsStackSlot() || right->IsConstantOperand()) {
1721 Register right_reg = EmitLoadRegister(right, scratch);
1722 __ SubuAndCheckForOverflow(ToRegister(result),
1725 overflow); // Reg at also used as scratch.
1727 DCHECK(right->IsRegister());
1728 // Due to overflow check macros not supporting constant operands,
1729 // handling the IsConstantOperand case was moved to prev if clause.
1730 __ SubuAndCheckForOverflow(ToRegister(result),
1733 overflow); // Reg at also used as scratch.
1735 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
1741 void LCodeGen::DoConstantI(LConstantI* instr) {
1742 __ li(ToRegister(instr->result()), Operand(instr->value()));
1746 void LCodeGen::DoConstantS(LConstantS* instr) {
1747 __ li(ToRegister(instr->result()), Operand(instr->value()));
1751 void LCodeGen::DoConstantD(LConstantD* instr) {
1752 DCHECK(instr->result()->IsDoubleRegister());
1753 DoubleRegister result = ToDoubleRegister(instr->result());
1754 #if V8_HOST_ARCH_IA32
1755 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1757 uint64_t bits = instr->bits();
1758 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1759 V8_UINT64_C(0x7FF0000000000000)) {
1760 uint32_t lo = static_cast<uint32_t>(bits);
1761 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1762 __ li(at, Operand(lo));
1763 __ li(scratch0(), Operand(hi));
1764 __ Move(result, at, scratch0());
1768 double v = instr->value();
1773 void LCodeGen::DoConstantE(LConstantE* instr) {
1774 __ li(ToRegister(instr->result()), Operand(instr->value()));
1778 void LCodeGen::DoConstantT(LConstantT* instr) {
1779 Handle<Object> object = instr->value(isolate());
1780 AllowDeferredHandleDereference smi_check;
1781 __ li(ToRegister(instr->result()), object);
1785 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1786 Register result = ToRegister(instr->result());
1787 Register map = ToRegister(instr->value());
1788 __ EnumLength(result, map);
1792 void LCodeGen::DoDateField(LDateField* instr) {
1793 Register object = ToRegister(instr->date());
1794 Register result = ToRegister(instr->result());
1795 Register scratch = ToRegister(instr->temp());
1796 Smi* index = instr->index();
1797 DCHECK(object.is(a0));
1798 DCHECK(result.is(v0));
1799 DCHECK(!scratch.is(scratch0()));
1800 DCHECK(!scratch.is(object));
1802 if (index->value() == 0) {
1803 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1805 Label runtime, done;
1806 if (index->value() < JSDate::kFirstUncachedField) {
1807 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1808 __ li(scratch, Operand(stamp));
1809 __ lw(scratch, MemOperand(scratch));
1810 __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1811 __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1812 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1813 kPointerSize * index->value()));
1817 __ PrepareCallCFunction(2, scratch);
1818 __ li(a1, Operand(index));
1819 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1825 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1827 String::Encoding encoding) {
1828 if (index->IsConstantOperand()) {
1829 int offset = ToInteger32(LConstantOperand::cast(index));
1830 if (encoding == String::TWO_BYTE_ENCODING) {
1831 offset *= kUC16Size;
1833 STATIC_ASSERT(kCharSize == 1);
1834 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1836 Register scratch = scratch0();
1837 DCHECK(!scratch.is(string));
1838 DCHECK(!scratch.is(ToRegister(index)));
1839 if (encoding == String::ONE_BYTE_ENCODING) {
1840 __ Addu(scratch, string, ToRegister(index));
1842 STATIC_ASSERT(kUC16Size == 2);
1843 __ sll(scratch, ToRegister(index), 1);
1844 __ Addu(scratch, string, scratch);
1846 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1850 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1851 String::Encoding encoding = instr->hydrogen()->encoding();
1852 Register string = ToRegister(instr->string());
1853 Register result = ToRegister(instr->result());
1855 if (FLAG_debug_code) {
1856 Register scratch = scratch0();
1857 __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1858 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1860 __ And(scratch, scratch,
1861 Operand(kStringRepresentationMask | kStringEncodingMask));
1862 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1863 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1864 __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1865 ? one_byte_seq_type : two_byte_seq_type));
1866 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1869 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1870 if (encoding == String::ONE_BYTE_ENCODING) {
1871 __ lbu(result, operand);
1873 __ lhu(result, operand);
1878 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1879 String::Encoding encoding = instr->hydrogen()->encoding();
1880 Register string = ToRegister(instr->string());
1881 Register value = ToRegister(instr->value());
1883 if (FLAG_debug_code) {
1884 Register scratch = scratch0();
1885 Register index = ToRegister(instr->index());
1886 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1887 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1889 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1890 ? one_byte_seq_type : two_byte_seq_type;
1891 __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
1894 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1895 if (encoding == String::ONE_BYTE_ENCODING) {
1896 __ sb(value, operand);
1898 __ sh(value, operand);
1903 void LCodeGen::DoAddI(LAddI* instr) {
1904 LOperand* left = instr->left();
1905 LOperand* right = instr->right();
1906 LOperand* result = instr->result();
1907 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1909 if (!can_overflow) {
1910 if (right->IsStackSlot()) {
1911 Register right_reg = EmitLoadRegister(right, at);
1912 __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1914 DCHECK(right->IsRegister() || right->IsConstantOperand());
1915 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1917 } else { // can_overflow.
1918 Register overflow = scratch0();
1919 Register scratch = scratch1();
1920 if (right->IsStackSlot() ||
1921 right->IsConstantOperand()) {
1922 Register right_reg = EmitLoadRegister(right, scratch);
1923 __ AdduAndCheckForOverflow(ToRegister(result),
1926 overflow); // Reg at also used as scratch.
1928 DCHECK(right->IsRegister());
1929 // Due to overflow check macros not supporting constant operands,
1930 // handling the IsConstantOperand case was moved to prev if clause.
1931 __ AdduAndCheckForOverflow(ToRegister(result),
1934 overflow); // Reg at also used as scratch.
1936 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
1942 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1943 LOperand* left = instr->left();
1944 LOperand* right = instr->right();
1945 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1946 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1947 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1948 Register left_reg = ToRegister(left);
1949 Register right_reg = EmitLoadRegister(right, scratch0());
1950 Register result_reg = ToRegister(instr->result());
1951 Label return_right, done;
1952 Register scratch = scratch1();
1953 __ Slt(scratch, left_reg, Operand(right_reg));
1954 if (condition == ge) {
1955 __ Movz(result_reg, left_reg, scratch);
1956 __ Movn(result_reg, right_reg, scratch);
1958 DCHECK(condition == le);
1959 __ Movn(result_reg, left_reg, scratch);
1960 __ Movz(result_reg, right_reg, scratch);
1963 DCHECK(instr->hydrogen()->representation().IsDouble());
1964 FPURegister left_reg = ToDoubleRegister(left);
1965 FPURegister right_reg = ToDoubleRegister(right);
1966 FPURegister result_reg = ToDoubleRegister(instr->result());
1967 Label check_nan_left, check_zero, return_left, return_right, done;
1968 __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1969 __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1970 __ Branch(&return_right);
1972 __ bind(&check_zero);
1973 // left == right != 0.
1974 __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1975 // At this point, both left and right are either 0 or -0.
1976 if (operation == HMathMinMax::kMathMin) {
1977 __ neg_d(left_reg, left_reg);
1978 __ sub_d(result_reg, left_reg, right_reg);
1979 __ neg_d(result_reg, result_reg);
1981 __ add_d(result_reg, left_reg, right_reg);
1985 __ bind(&check_nan_left);
1987 __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1988 __ bind(&return_right);
1989 if (!right_reg.is(result_reg)) {
1990 __ mov_d(result_reg, right_reg);
1994 __ bind(&return_left);
1995 if (!left_reg.is(result_reg)) {
1996 __ mov_d(result_reg, left_reg);
2003 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2004 DoubleRegister left = ToDoubleRegister(instr->left());
2005 DoubleRegister right = ToDoubleRegister(instr->right());
2006 DoubleRegister result = ToDoubleRegister(instr->result());
2007 switch (instr->op()) {
2009 __ add_d(result, left, right);
2012 __ sub_d(result, left, right);
2015 __ mul_d(result, left, right);
2018 __ div_d(result, left, right);
2021 // Save a0-a3 on the stack.
2022 RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
2023 __ MultiPush(saved_regs);
2025 __ PrepareCallCFunction(0, 2, scratch0());
2026 __ MovToFloatParameters(left, right);
2028 ExternalReference::mod_two_doubles_operation(isolate()),
2030 // Move the result in the double result register.
2031 __ MovFromFloatResult(result);
2033 // Restore saved register.
2034 __ MultiPop(saved_regs);
2044 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2045 DCHECK(ToRegister(instr->context()).is(cp));
2046 DCHECK(ToRegister(instr->left()).is(a1));
2047 DCHECK(ToRegister(instr->right()).is(a0));
2048 DCHECK(ToRegister(instr->result()).is(v0));
2051 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
2052 CallCode(code, RelocInfo::CODE_TARGET, instr);
2053 // Other arch use a nop here, to signal that there is no inlined
2054 // patchable code. Mips does not need the nop, since our marker
2055 // instruction (andi zero_reg) will never be used in normal code.
2059 template<class InstrType>
2060 void LCodeGen::EmitBranch(InstrType instr,
2061 Condition condition,
2063 const Operand& src2) {
2064 int left_block = instr->TrueDestination(chunk_);
2065 int right_block = instr->FalseDestination(chunk_);
2067 int next_block = GetNextEmittedBlock();
2068 if (right_block == left_block || condition == al) {
2069 EmitGoto(left_block);
2070 } else if (left_block == next_block) {
2071 __ Branch(chunk_->GetAssemblyLabel(right_block),
2072 NegateCondition(condition), src1, src2);
2073 } else if (right_block == next_block) {
2074 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2076 __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
2077 __ Branch(chunk_->GetAssemblyLabel(right_block));
2082 template<class InstrType>
2083 void LCodeGen::EmitBranchF(InstrType instr,
2084 Condition condition,
2087 int right_block = instr->FalseDestination(chunk_);
2088 int left_block = instr->TrueDestination(chunk_);
2090 int next_block = GetNextEmittedBlock();
2091 if (right_block == left_block) {
2092 EmitGoto(left_block);
2093 } else if (left_block == next_block) {
2094 __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
2095 NegateFpuCondition(condition), src1, src2);
2096 } else if (right_block == next_block) {
2097 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2098 condition, src1, src2);
2100 __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
2101 condition, src1, src2);
2102 __ Branch(chunk_->GetAssemblyLabel(right_block));
2107 template<class InstrType>
2108 void LCodeGen::EmitFalseBranch(InstrType instr,
2109 Condition condition,
2111 const Operand& src2) {
2112 int false_block = instr->FalseDestination(chunk_);
2113 __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
2117 template<class InstrType>
2118 void LCodeGen::EmitFalseBranchF(InstrType instr,
2119 Condition condition,
2122 int false_block = instr->FalseDestination(chunk_);
2123 __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2124 condition, src1, src2);
2128 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2129 __ stop("LDebugBreak");
2133 void LCodeGen::DoBranch(LBranch* instr) {
2134 Representation r = instr->hydrogen()->value()->representation();
2135 if (r.IsInteger32() || r.IsSmi()) {
2136 DCHECK(!info()->IsStub());
2137 Register reg = ToRegister(instr->value());
2138 EmitBranch(instr, ne, reg, Operand(zero_reg));
2139 } else if (r.IsDouble()) {
2140 DCHECK(!info()->IsStub());
2141 DoubleRegister reg = ToDoubleRegister(instr->value());
2142 // Test the double value. Zero and NaN are false.
2143 EmitBranchF(instr, ogl, reg, kDoubleRegZero);
2145 DCHECK(r.IsTagged());
2146 Register reg = ToRegister(instr->value());
2147 HType type = instr->hydrogen()->value()->type();
2148 if (type.IsBoolean()) {
2149 DCHECK(!info()->IsStub());
2150 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2151 EmitBranch(instr, eq, reg, Operand(at));
2152 } else if (type.IsSmi()) {
2153 DCHECK(!info()->IsStub());
2154 EmitBranch(instr, ne, reg, Operand(zero_reg));
2155 } else if (type.IsJSArray()) {
2156 DCHECK(!info()->IsStub());
2157 EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2158 } else if (type.IsHeapNumber()) {
2159 DCHECK(!info()->IsStub());
2160 DoubleRegister dbl_scratch = double_scratch0();
2161 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2162 // Test the double value. Zero and NaN are false.
2163 EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
2164 } else if (type.IsString()) {
2165 DCHECK(!info()->IsStub());
2166 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2167 EmitBranch(instr, ne, at, Operand(zero_reg));
2169 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2170 // Avoid deopts in the case where we've never executed this path before.
2171 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2173 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2174 // undefined -> false.
2175 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2176 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2178 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2179 // Boolean -> its value.
2180 __ LoadRoot(at, Heap::kTrueValueRootIndex);
2181 __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2182 __ LoadRoot(at, Heap::kFalseValueRootIndex);
2183 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2185 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2187 __ LoadRoot(at, Heap::kNullValueRootIndex);
2188 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2191 if (expected.Contains(ToBooleanStub::SMI)) {
2192 // Smis: 0 -> false, all other -> true.
2193 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2194 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2195 } else if (expected.NeedsMap()) {
2196 // If we need a map later and have a Smi -> deopt.
2198 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
2201 const Register map = scratch0();
2202 if (expected.NeedsMap()) {
2203 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2204 if (expected.CanBeUndetectable()) {
2205 // Undetectable -> false.
2206 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2207 __ And(at, at, Operand(1 << Map::kIsUndetectable));
2208 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2212 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2213 // spec object -> true.
2214 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2215 __ Branch(instr->TrueLabel(chunk_),
2216 ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2219 if (expected.Contains(ToBooleanStub::STRING)) {
2220 // String value -> false iff empty.
2222 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2223 __ Branch(¬_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2224 __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2225 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2226 __ Branch(instr->FalseLabel(chunk_));
2227 __ bind(¬_string);
2230 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2231 // Symbol value -> true.
2232 const Register scratch = scratch1();
2233 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2234 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2237 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2238 // heap number -> false iff +0, -0, or NaN.
2239 DoubleRegister dbl_scratch = double_scratch0();
2240 Label not_heap_number;
2241 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2242 __ Branch(¬_heap_number, ne, map, Operand(at));
2243 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2244 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2245 ne, dbl_scratch, kDoubleRegZero);
2246 // Falls through if dbl_scratch == 0.
2247 __ Branch(instr->FalseLabel(chunk_));
2248 __ bind(¬_heap_number);
2251 if (!expected.IsGeneric()) {
2252 // We've seen something for the first time -> deopt.
2253 // This can only happen if we are not generic already.
2254 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
2262 void LCodeGen::EmitGoto(int block) {
2263 if (!IsNextEmittedBlock(block)) {
2264 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2269 void LCodeGen::DoGoto(LGoto* instr) {
2270 EmitGoto(instr->block_id());
2274 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2275 Condition cond = kNoCondition;
2278 case Token::EQ_STRICT:
2282 case Token::NE_STRICT:
2286 cond = is_unsigned ? lo : lt;
2289 cond = is_unsigned ? hi : gt;
2292 cond = is_unsigned ? ls : le;
2295 cond = is_unsigned ? hs : ge;
2298 case Token::INSTANCEOF:
2306 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2307 LOperand* left = instr->left();
2308 LOperand* right = instr->right();
2310 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2311 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2312 Condition cond = TokenToCondition(instr->op(), is_unsigned);
2314 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2315 // We can statically evaluate the comparison.
2316 double left_val = ToDouble(LConstantOperand::cast(left));
2317 double right_val = ToDouble(LConstantOperand::cast(right));
2318 int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2319 instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2320 EmitGoto(next_block);
2322 if (instr->is_double()) {
2323 // Compare left and right as doubles and load the
2324 // resulting flags into the normal status register.
2325 FPURegister left_reg = ToDoubleRegister(left);
2326 FPURegister right_reg = ToDoubleRegister(right);
2328 // If a NaN is involved, i.e. the result is unordered,
2329 // jump to false block label.
2330 __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2331 left_reg, right_reg);
2333 EmitBranchF(instr, cond, left_reg, right_reg);
2336 Operand cmp_right = Operand(0);
2338 if (right->IsConstantOperand()) {
2339 int32_t value = ToInteger32(LConstantOperand::cast(right));
2340 if (instr->hydrogen_value()->representation().IsSmi()) {
2341 cmp_left = ToRegister(left);
2342 cmp_right = Operand(Smi::FromInt(value));
2344 cmp_left = ToRegister(left);
2345 cmp_right = Operand(value);
2347 } else if (left->IsConstantOperand()) {
2348 int32_t value = ToInteger32(LConstantOperand::cast(left));
2349 if (instr->hydrogen_value()->representation().IsSmi()) {
2350 cmp_left = ToRegister(right);
2351 cmp_right = Operand(Smi::FromInt(value));
2353 cmp_left = ToRegister(right);
2354 cmp_right = Operand(value);
2356 // We commuted the operands, so commute the condition.
2357 cond = CommuteCondition(cond);
2359 cmp_left = ToRegister(left);
2360 cmp_right = Operand(ToRegister(right));
2363 EmitBranch(instr, cond, cmp_left, cmp_right);
2369 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2370 Register left = ToRegister(instr->left());
2371 Register right = ToRegister(instr->right());
2373 EmitBranch(instr, eq, left, Operand(right));
2377 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2378 if (instr->hydrogen()->representation().IsTagged()) {
2379 Register input_reg = ToRegister(instr->object());
2380 __ li(at, Operand(factory()->the_hole_value()));
2381 EmitBranch(instr, eq, input_reg, Operand(at));
2385 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2386 EmitFalseBranchF(instr, eq, input_reg, input_reg);
2388 Register scratch = scratch0();
2389 __ FmoveHigh(scratch, input_reg);
2390 EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2394 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2395 Representation rep = instr->hydrogen()->value()->representation();
2396 DCHECK(!rep.IsInteger32());
2397 Register scratch = ToRegister(instr->temp());
2399 if (rep.IsDouble()) {
2400 DoubleRegister value = ToDoubleRegister(instr->value());
2401 EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
2402 __ FmoveHigh(scratch, value);
2403 __ li(at, 0x80000000);
2405 Register value = ToRegister(instr->value());
2408 Heap::kHeapNumberMapRootIndex,
2409 instr->FalseLabel(chunk()),
2411 __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2412 EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
2413 __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2414 __ mov(at, zero_reg);
2416 EmitBranch(instr, eq, scratch, Operand(at));
2420 Condition LCodeGen::EmitIsObject(Register input,
2423 Label* is_not_object,
2425 __ JumpIfSmi(input, is_not_object);
2427 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2428 __ Branch(is_object, eq, input, Operand(temp2));
2431 __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2432 // Undetectable objects behave like undefined.
2433 __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2434 __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
2435 __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
2437 // Load instance type and check that it is in object type range.
2438 __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2439 __ Branch(is_not_object,
2440 lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2446 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2447 Register reg = ToRegister(instr->value());
2448 Register temp1 = ToRegister(instr->temp());
2449 Register temp2 = scratch0();
2451 Condition true_cond =
2452 EmitIsObject(reg, temp1, temp2,
2453 instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2455 EmitBranch(instr, true_cond, temp2,
2456 Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2460 Condition LCodeGen::EmitIsString(Register input,
2462 Label* is_not_string,
2463 SmiCheck check_needed = INLINE_SMI_CHECK) {
2464 if (check_needed == INLINE_SMI_CHECK) {
2465 __ JumpIfSmi(input, is_not_string);
2467 __ GetObjectType(input, temp1, temp1);
2473 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2474 Register reg = ToRegister(instr->value());
2475 Register temp1 = ToRegister(instr->temp());
2477 SmiCheck check_needed =
2478 instr->hydrogen()->value()->type().IsHeapObject()
2479 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2480 Condition true_cond =
2481 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2483 EmitBranch(instr, true_cond, temp1,
2484 Operand(FIRST_NONSTRING_TYPE));
2488 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2489 Register input_reg = EmitLoadRegister(instr->value(), at);
2490 __ And(at, input_reg, kSmiTagMask);
2491 EmitBranch(instr, eq, at, Operand(zero_reg));
2495 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2496 Register input = ToRegister(instr->value());
2497 Register temp = ToRegister(instr->temp());
2499 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2500 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2502 __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2503 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2504 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2505 EmitBranch(instr, ne, at, Operand(zero_reg));
2509 static Condition ComputeCompareCondition(Token::Value op) {
2511 case Token::EQ_STRICT:
2524 return kNoCondition;
2529 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2530 DCHECK(ToRegister(instr->context()).is(cp));
2531 Token::Value op = instr->op();
2534 CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
2535 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2537 Condition condition = ComputeCompareCondition(op);
2539 EmitBranch(instr, condition, v0, Operand(zero_reg));
2543 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2544 InstanceType from = instr->from();
2545 InstanceType to = instr->to();
2546 if (from == FIRST_TYPE) return to;
2547 DCHECK(from == to || to == LAST_TYPE);
2552 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2553 InstanceType from = instr->from();
2554 InstanceType to = instr->to();
2555 if (from == to) return eq;
2556 if (to == LAST_TYPE) return hs;
2557 if (from == FIRST_TYPE) return ls;
2563 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2564 Register scratch = scratch0();
2565 Register input = ToRegister(instr->value());
2567 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2568 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2571 __ GetObjectType(input, scratch, scratch);
2573 BranchCondition(instr->hydrogen()),
2575 Operand(TestType(instr->hydrogen())));
2579 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2580 Register input = ToRegister(instr->value());
2581 Register result = ToRegister(instr->result());
2583 __ AssertString(input);
2585 __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2586 __ IndexFromHash(result, result);
2590 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2591 LHasCachedArrayIndexAndBranch* instr) {
2592 Register input = ToRegister(instr->value());
2593 Register scratch = scratch0();
2596 FieldMemOperand(input, String::kHashFieldOffset));
2597 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2598 EmitBranch(instr, eq, at, Operand(zero_reg));
2602 // Branches to a label or falls through with the answer in flags. Trashes
2603 // the temp registers, but not the input.
2604 void LCodeGen::EmitClassOfTest(Label* is_true,
2606 Handle<String>class_name,
2610 DCHECK(!input.is(temp));
2611 DCHECK(!input.is(temp2));
2612 DCHECK(!temp.is(temp2));
2614 __ JumpIfSmi(input, is_false);
2616 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2617 // Assuming the following assertions, we can use the same compares to test
2618 // for both being a function type and being in the object type range.
2619 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2620 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2621 FIRST_SPEC_OBJECT_TYPE + 1);
2622 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2623 LAST_SPEC_OBJECT_TYPE - 1);
2624 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2626 __ GetObjectType(input, temp, temp2);
2627 __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2628 __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2629 __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2631 // Faster code path to avoid two compares: subtract lower bound from the
2632 // actual type and do a signed compare with the width of the type range.
2633 __ GetObjectType(input, temp, temp2);
2634 __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2635 __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2636 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2639 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2640 // Check if the constructor in the map is a function.
2641 Register instance_type = scratch1();
2642 DCHECK(!instance_type.is(temp));
2643 __ GetMapConstructor(temp, temp, temp2, instance_type);
2645 // Objects with a non-function constructor have class 'Object'.
2646 if (String::Equals(class_name, isolate()->factory()->Object_string())) {
2647 __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2649 __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
2652 // temp now contains the constructor function. Grab the
2653 // instance class name from there.
2654 __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2655 __ lw(temp, FieldMemOperand(temp,
2656 SharedFunctionInfo::kInstanceClassNameOffset));
2657 // The class name we are testing against is internalized since it's a literal.
2658 // The name in the constructor is internalized because of the way the context
2659 // is booted. This routine isn't expected to work for random API-created
2660 // classes and it doesn't have to because you can't access it with natives
2661 // syntax. Since both sides are internalized it is sufficient to use an
2662 // identity comparison.
2664 // End with the address of this class_name instance in temp register.
2665 // On MIPS, the caller must do the comparison with Handle<String>class_name.
2669 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2670 Register input = ToRegister(instr->value());
2671 Register temp = scratch0();
2672 Register temp2 = ToRegister(instr->temp());
2673 Handle<String> class_name = instr->hydrogen()->class_name();
2675 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2676 class_name, input, temp, temp2);
2678 EmitBranch(instr, eq, temp, Operand(class_name));
2682 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2683 Register reg = ToRegister(instr->value());
2684 Register temp = ToRegister(instr->temp());
2686 __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2687 EmitBranch(instr, eq, temp, Operand(instr->map()));
2691 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2692 DCHECK(ToRegister(instr->context()).is(cp));
2693 Label true_label, done;
2694 DCHECK(ToRegister(instr->left()).is(a0)); // Object is in a0.
2695 DCHECK(ToRegister(instr->right()).is(a1)); // Function is in a1.
2696 Register result = ToRegister(instr->result());
2697 DCHECK(result.is(v0));
2699 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2700 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2702 __ Branch(&true_label, eq, result, Operand(zero_reg));
2703 __ li(result, Operand(factory()->false_value()));
2705 __ bind(&true_label);
2706 __ li(result, Operand(factory()->true_value()));
2711 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2712 class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
2714 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2715 LInstanceOfKnownGlobal* instr)
2716 : LDeferredCode(codegen), instr_(instr) { }
2717 void Generate() override {
2718 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2720 LInstruction* instr() override { return instr_; }
2721 Label* map_check() { return &map_check_; }
2724 LInstanceOfKnownGlobal* instr_;
2728 DeferredInstanceOfKnownGlobal* deferred;
2729 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2731 Label done, false_result;
2732 Register object = ToRegister(instr->value());
2733 Register temp = ToRegister(instr->temp());
2734 Register result = ToRegister(instr->result());
2736 DCHECK(object.is(a0));
2737 DCHECK(result.is(v0));
2739 // A Smi is not instance of anything.
2740 __ JumpIfSmi(object, &false_result);
2742 // This is the inlined call site instanceof cache. The two occurences of the
2743 // hole value will be patched to the last map/result pair generated by the
2746 Register map = temp;
2747 __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2749 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2750 __ bind(deferred->map_check()); // Label for calculating code patching.
2751 // We use Factory::the_hole_value() on purpose instead of loading from the
2752 // root array to force relocation to be able to later patch with
2754 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2755 __ li(at, Operand(cell));
2756 __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
2757 __ BranchShort(&cache_miss, ne, map, Operand(at));
2758 // We use Factory::the_hole_value() on purpose instead of loading from the
2759 // root array to force relocation to be able to later patch
2760 // with true or false. The distance from map check has to be constant.
2761 __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2764 // The inlined call site cache did not match. Check null and string before
2765 // calling the deferred code.
2766 __ bind(&cache_miss);
2767 // Null is not instance of anything.
2768 __ LoadRoot(temp, Heap::kNullValueRootIndex);
2769 __ Branch(&false_result, eq, object, Operand(temp));
2771 // String values is not instance of anything.
2772 Condition cc = __ IsObjectStringType(object, temp, temp);
2773 __ Branch(&false_result, cc, temp, Operand(zero_reg));
2775 // Go to the deferred code.
2776 __ Branch(deferred->entry());
2778 __ bind(&false_result);
2779 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2781 // Here result has either true or false. Deferred code also produces true or
2783 __ bind(deferred->exit());
2788 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2790 Register result = ToRegister(instr->result());
2791 DCHECK(result.is(v0));
2793 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2794 flags = static_cast<InstanceofStub::Flags>(
2795 flags | InstanceofStub::kArgsInRegisters);
2796 flags = static_cast<InstanceofStub::Flags>(
2797 flags | InstanceofStub::kCallSiteInlineCheck);
2798 flags = static_cast<InstanceofStub::Flags>(
2799 flags | InstanceofStub::kReturnTrueFalseObject);
2800 InstanceofStub stub(isolate(), flags);
2802 PushSafepointRegistersScope scope(this);
2803 LoadContextFromDeferred(instr->context());
2805 // Get the temp register reserved by the instruction. This needs to be t0 as
2806 // its slot of the pushing of safepoint registers is used to communicate the
2807 // offset to the location of the map check.
2808 Register temp = ToRegister(instr->temp());
2809 DCHECK(temp.is(t0));
2810 __ li(InstanceofStub::right(), instr->function());
2811 static const int kAdditionalDelta = 7;
2812 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2813 Label before_push_delta;
2814 __ bind(&before_push_delta);
2816 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2817 __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2818 __ StoreToSafepointRegisterSlot(temp, temp);
2820 CallCodeGeneric(stub.GetCode(),
2821 RelocInfo::CODE_TARGET,
2823 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2824 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2825 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2826 // Put the result value into the result register slot and
2827 // restore all registers.
2828 __ StoreToSafepointRegisterSlot(result, result);
2832 void LCodeGen::DoCmpT(LCmpT* instr) {
2833 DCHECK(ToRegister(instr->context()).is(cp));
2834 Token::Value op = instr->op();
2837 CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2838 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2839 // On MIPS there is no need for a "no inlined smi code" marker (nop).
2841 Condition condition = ComputeCompareCondition(op);
2842 // A minor optimization that relies on LoadRoot always emitting one
2844 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2846 __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2848 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2849 DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
2850 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2855 void LCodeGen::DoReturn(LReturn* instr) {
2856 if (FLAG_trace && info()->IsOptimizing()) {
2857 // Push the return value on the stack as the parameter.
2858 // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2859 // managed by the register allocator and tearing down the frame, it's
2860 // safe to write to the context register.
2862 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2863 __ CallRuntime(Runtime::kTraceExit, 1);
2865 if (info()->saves_caller_doubles()) {
2866 RestoreCallerDoubles();
2868 int no_frame_start = -1;
2869 if (NeedsEagerFrame()) {
2871 no_frame_start = masm_->pc_offset();
2874 if (instr->has_constant_parameter_count()) {
2875 int parameter_count = ToInteger32(instr->constant_parameter_count());
2876 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2877 if (sp_delta != 0) {
2878 __ Addu(sp, sp, Operand(sp_delta));
2881 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2882 Register reg = ToRegister(instr->parameter_count());
2883 // The argument count parameter is a smi
2885 __ sll(at, reg, kPointerSizeLog2);
2886 __ Addu(sp, sp, at);
2891 if (no_frame_start != -1) {
2892 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2898 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2899 Register vector_register = ToRegister(instr->temp_vector());
2900 Register slot_register = LoadWithVectorDescriptor::SlotRegister();
2901 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2902 DCHECK(slot_register.is(a0));
2904 AllowDeferredHandleDereference vector_structure_check;
2905 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2906 __ li(vector_register, vector);
2907 // No need to allocate this register.
2908 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2909 int index = vector->GetIndex(slot);
2910 __ li(slot_register, Operand(Smi::FromInt(index)));
2914 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2915 DCHECK(ToRegister(instr->context()).is(cp));
2916 DCHECK(ToRegister(instr->global_object())
2917 .is(LoadDescriptor::ReceiverRegister()));
2918 DCHECK(ToRegister(instr->result()).is(v0));
2920 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
2921 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
2922 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
2923 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
2924 PREMONOMORPHIC).code();
2925 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2929 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2930 Register context = ToRegister(instr->context());
2931 Register result = ToRegister(instr->result());
2933 __ lw(result, ContextOperand(context, instr->slot_index()));
2934 if (instr->hydrogen()->RequiresHoleCheck()) {
2935 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2937 if (instr->hydrogen()->DeoptimizesOnHole()) {
2938 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
2941 __ Branch(&is_not_hole, ne, result, Operand(at));
2942 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2943 __ bind(&is_not_hole);
2949 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2950 Register context = ToRegister(instr->context());
2951 Register value = ToRegister(instr->value());
2952 Register scratch = scratch0();
2953 MemOperand target = ContextOperand(context, instr->slot_index());
2955 Label skip_assignment;
2957 if (instr->hydrogen()->RequiresHoleCheck()) {
2958 __ lw(scratch, target);
2959 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2961 if (instr->hydrogen()->DeoptimizesOnHole()) {
2962 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
2964 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2968 __ sw(value, target);
2969 if (instr->hydrogen()->NeedsWriteBarrier()) {
2970 SmiCheck check_needed =
2971 instr->hydrogen()->value()->type().IsHeapObject()
2972 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2973 __ RecordWriteContextSlot(context,
2979 EMIT_REMEMBERED_SET,
2983 __ bind(&skip_assignment);
2987 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2988 HObjectAccess access = instr->hydrogen()->access();
2989 int offset = access.offset();
2990 Register object = ToRegister(instr->object());
2992 if (access.IsExternalMemory()) {
2993 Register result = ToRegister(instr->result());
2994 MemOperand operand = MemOperand(object, offset);
2995 __ Load(result, operand, access.representation());
2999 if (instr->hydrogen()->representation().IsDouble()) {
3000 DoubleRegister result = ToDoubleRegister(instr->result());
3001 __ ldc1(result, FieldMemOperand(object, offset));
3005 Register result = ToRegister(instr->result());
3006 if (!access.IsInobject()) {
3007 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3010 MemOperand operand = FieldMemOperand(object, offset);
3011 __ Load(result, operand, access.representation());
3015 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3016 DCHECK(ToRegister(instr->context()).is(cp));
3017 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3018 DCHECK(ToRegister(instr->result()).is(v0));
3020 // Name is always in a2.
3021 __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
3022 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3023 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
3024 isolate(), NOT_CONTEXTUAL,
3025 instr->hydrogen()->initialization_state()).code();
3026 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3030 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3031 Register scratch = scratch0();
3032 Register function = ToRegister(instr->function());
3033 Register result = ToRegister(instr->result());
3035 // Get the prototype or initial map from the function.
3037 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3039 // Check that the function has a prototype or an initial map.
3040 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3041 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
3043 // If the function does not have an initial map, we're done.
3045 __ GetObjectType(result, scratch, scratch);
3046 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
3048 // Get the prototype from the initial map.
3049 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3056 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3057 Register result = ToRegister(instr->result());
3058 __ LoadRoot(result, instr->index());
3062 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3063 Register arguments = ToRegister(instr->arguments());
3064 Register result = ToRegister(instr->result());
3065 // There are two words between the frame pointer and the last argument.
3066 // Subtracting from length accounts for one of them add one more.
3067 if (instr->length()->IsConstantOperand()) {
3068 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3069 if (instr->index()->IsConstantOperand()) {
3070 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3071 int index = (const_length - const_index) + 1;
3072 __ lw(result, MemOperand(arguments, index * kPointerSize));
3074 Register index = ToRegister(instr->index());
3075 __ li(at, Operand(const_length + 1));
3076 __ Subu(result, at, index);
3077 __ sll(at, result, kPointerSizeLog2);
3078 __ Addu(at, arguments, at);
3079 __ lw(result, MemOperand(at));
3081 } else if (instr->index()->IsConstantOperand()) {
3082 Register length = ToRegister(instr->length());
3083 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3084 int loc = const_index - 1;
3086 __ Subu(result, length, Operand(loc));
3087 __ sll(at, result, kPointerSizeLog2);
3088 __ Addu(at, arguments, at);
3089 __ lw(result, MemOperand(at));
3091 __ sll(at, length, kPointerSizeLog2);
3092 __ Addu(at, arguments, at);
3093 __ lw(result, MemOperand(at));
3096 Register length = ToRegister(instr->length());
3097 Register index = ToRegister(instr->index());
3098 __ Subu(result, length, index);
3099 __ Addu(result, result, 1);
3100 __ sll(at, result, kPointerSizeLog2);
3101 __ Addu(at, arguments, at);
3102 __ lw(result, MemOperand(at));
3107 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3108 Register external_pointer = ToRegister(instr->elements());
3109 Register key = no_reg;
3110 ElementsKind elements_kind = instr->elements_kind();
3111 bool key_is_constant = instr->key()->IsConstantOperand();
3112 int constant_key = 0;
3113 if (key_is_constant) {
3114 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3115 if (constant_key & 0xF0000000) {
3116 Abort(kArrayIndexConstantValueTooBig);
3119 key = ToRegister(instr->key());
3121 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3122 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3123 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3124 int base_offset = instr->base_offset();
3126 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3127 elements_kind == FLOAT32_ELEMENTS ||
3128 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3129 elements_kind == FLOAT64_ELEMENTS) {
3130 FPURegister result = ToDoubleRegister(instr->result());
3131 if (key_is_constant) {
3132 __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
3134 __ sll(scratch0(), key, shift_size);
3135 __ Addu(scratch0(), scratch0(), external_pointer);
3137 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3138 elements_kind == FLOAT32_ELEMENTS) {
3139 __ lwc1(result, MemOperand(scratch0(), base_offset));
3140 __ cvt_d_s(result, result);
3141 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3142 __ ldc1(result, MemOperand(scratch0(), base_offset));
3145 Register result = ToRegister(instr->result());
3146 MemOperand mem_operand = PrepareKeyedOperand(
3147 key, external_pointer, key_is_constant, constant_key,
3148 element_size_shift, shift_size, base_offset);
3149 switch (elements_kind) {
3150 case EXTERNAL_INT8_ELEMENTS:
3152 __ lb(result, mem_operand);
3154 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3155 case EXTERNAL_UINT8_ELEMENTS:
3156 case UINT8_ELEMENTS:
3157 case UINT8_CLAMPED_ELEMENTS:
3158 __ lbu(result, mem_operand);
3160 case EXTERNAL_INT16_ELEMENTS:
3161 case INT16_ELEMENTS:
3162 __ lh(result, mem_operand);
3164 case EXTERNAL_UINT16_ELEMENTS:
3165 case UINT16_ELEMENTS:
3166 __ lhu(result, mem_operand);
3168 case EXTERNAL_INT32_ELEMENTS:
3169 case INT32_ELEMENTS:
3170 __ lw(result, mem_operand);
3172 case EXTERNAL_UINT32_ELEMENTS:
3173 case UINT32_ELEMENTS:
3174 __ lw(result, mem_operand);
3175 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3176 DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
3177 result, Operand(0x80000000));
3180 case FLOAT32_ELEMENTS:
3181 case FLOAT64_ELEMENTS:
3182 case EXTERNAL_FLOAT32_ELEMENTS:
3183 case EXTERNAL_FLOAT64_ELEMENTS:
3184 case FAST_DOUBLE_ELEMENTS:
3186 case FAST_SMI_ELEMENTS:
3187 case FAST_HOLEY_DOUBLE_ELEMENTS:
3188 case FAST_HOLEY_ELEMENTS:
3189 case FAST_HOLEY_SMI_ELEMENTS:
3190 case DICTIONARY_ELEMENTS:
3191 case SLOPPY_ARGUMENTS_ELEMENTS:
3199 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3200 Register elements = ToRegister(instr->elements());
3201 bool key_is_constant = instr->key()->IsConstantOperand();
3202 Register key = no_reg;
3203 DoubleRegister result = ToDoubleRegister(instr->result());
3204 Register scratch = scratch0();
3206 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3208 int base_offset = instr->base_offset();
3209 if (key_is_constant) {
3210 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3211 if (constant_key & 0xF0000000) {
3212 Abort(kArrayIndexConstantValueTooBig);
3214 base_offset += constant_key * kDoubleSize;
3216 __ Addu(scratch, elements, Operand(base_offset));
3218 if (!key_is_constant) {
3219 key = ToRegister(instr->key());
3220 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3221 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3222 __ sll(at, key, shift_size);
3223 __ Addu(scratch, scratch, at);
3226 __ ldc1(result, MemOperand(scratch));
3228 if (instr->hydrogen()->RequiresHoleCheck()) {
3229 __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
3230 DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
3231 Operand(kHoleNanUpper32));
3236 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3237 Register elements = ToRegister(instr->elements());
3238 Register result = ToRegister(instr->result());
3239 Register scratch = scratch0();
3240 Register store_base = scratch;
3241 int offset = instr->base_offset();
3243 if (instr->key()->IsConstantOperand()) {
3244 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3245 offset += ToInteger32(const_operand) * kPointerSize;
3246 store_base = elements;
3248 Register key = ToRegister(instr->key());
3249 // Even though the HLoadKeyed instruction forces the input
3250 // representation for the key to be an integer, the input gets replaced
3251 // during bound check elimination with the index argument to the bounds
3252 // check, which can be tagged, so that case must be handled here, too.
3253 if (instr->hydrogen()->key()->representation().IsSmi()) {
3254 __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
3255 __ addu(scratch, elements, scratch);
3257 __ sll(scratch, key, kPointerSizeLog2);
3258 __ addu(scratch, elements, scratch);
3261 __ lw(result, MemOperand(store_base, offset));
3263 // Check for the hole value.
3264 if (instr->hydrogen()->RequiresHoleCheck()) {
3265 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3266 __ SmiTst(result, scratch);
3267 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
3270 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3271 DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
3273 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3274 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3276 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3277 __ Branch(&done, ne, result, Operand(scratch));
3278 if (info()->IsStub()) {
3279 // A stub can safely convert the hole to undefined only if the array
3280 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3281 // it needs to bail out.
3282 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3283 __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
3284 DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
3285 Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
3287 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3293 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3294 if (instr->is_typed_elements()) {
3295 DoLoadKeyedExternalArray(instr);
3296 } else if (instr->hydrogen()->representation().IsDouble()) {
3297 DoLoadKeyedFixedDoubleArray(instr);
3299 DoLoadKeyedFixedArray(instr);
3304 MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3306 bool key_is_constant,
3311 if (key_is_constant) {
3312 return MemOperand(base, (constant_key << element_size) + base_offset);
3315 if (base_offset == 0) {
3316 if (shift_size >= 0) {
3317 __ sll(scratch0(), key, shift_size);
3318 __ Addu(scratch0(), base, scratch0());
3319 return MemOperand(scratch0());
3321 DCHECK_EQ(-1, shift_size);
3322 __ srl(scratch0(), key, 1);
3323 __ Addu(scratch0(), base, scratch0());
3324 return MemOperand(scratch0());
3328 if (shift_size >= 0) {
3329 __ sll(scratch0(), key, shift_size);
3330 __ Addu(scratch0(), base, scratch0());
3331 return MemOperand(scratch0(), base_offset);
3333 DCHECK_EQ(-1, shift_size);
3334 __ sra(scratch0(), key, 1);
3335 __ Addu(scratch0(), base, scratch0());
3336 return MemOperand(scratch0(), base_offset);
3341 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3342 DCHECK(ToRegister(instr->context()).is(cp));
3343 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3344 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3346 if (instr->hydrogen()->HasVectorAndSlot()) {
3347 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3351 CodeFactory::KeyedLoadICInOptimizedCode(
3352 isolate(), instr->hydrogen()->initialization_state()).code();
3353 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3357 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3358 Register scratch = scratch0();
3359 Register temp = scratch1();
3360 Register result = ToRegister(instr->result());
3362 if (instr->hydrogen()->from_inlined()) {
3363 __ Subu(result, sp, 2 * kPointerSize);
3365 // Check if the calling frame is an arguments adaptor frame.
3366 Label done, adapted;
3367 __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3368 __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3369 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3371 // Result is the frame pointer for the frame if not adapted and for the real
3372 // frame below the adaptor frame if adapted.
3373 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
3374 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
3379 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3380 Register elem = ToRegister(instr->elements());
3381 Register result = ToRegister(instr->result());
3385 // If no arguments adaptor frame the number of arguments is fixed.
3386 __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
3387 __ Branch(&done, eq, fp, Operand(elem));
3389 // Arguments adaptor frame present. Get argument length from there.
3390 __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3392 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3393 __ SmiUntag(result);
3395 // Argument length is in result register.
3400 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3401 Register receiver = ToRegister(instr->receiver());
3402 Register function = ToRegister(instr->function());
3403 Register result = ToRegister(instr->result());
3404 Register scratch = scratch0();
3406 // If the receiver is null or undefined, we have to pass the global
3407 // object as a receiver to normal functions. Values have to be
3408 // passed unchanged to builtins and strict-mode functions.
3409 Label global_object, result_in_receiver;
3411 if (!instr->hydrogen()->known_function()) {
3412 // Do not transform the receiver to object for strict mode
3415 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3417 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3419 // Do not transform the receiver to object for builtins.
3420 int32_t strict_mode_function_mask =
3421 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3422 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3423 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3424 __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
3427 // Normal function. Replace undefined or null with global receiver.
3428 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3429 __ Branch(&global_object, eq, receiver, Operand(scratch));
3430 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3431 __ Branch(&global_object, eq, receiver, Operand(scratch));
3433 // Deoptimize if the receiver is not a JS object.
3434 __ SmiTst(receiver, scratch);
3435 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
3437 __ GetObjectType(receiver, scratch, scratch);
3438 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
3439 Operand(FIRST_SPEC_OBJECT_TYPE));
3441 __ Branch(&result_in_receiver);
3442 __ bind(&global_object);
3443 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
3445 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3447 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3449 if (result.is(receiver)) {
3450 __ bind(&result_in_receiver);
3453 __ Branch(&result_ok);
3454 __ bind(&result_in_receiver);
3455 __ mov(result, receiver);
3456 __ bind(&result_ok);
3461 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3462 Register receiver = ToRegister(instr->receiver());
3463 Register function = ToRegister(instr->function());
3464 Register length = ToRegister(instr->length());
3465 Register elements = ToRegister(instr->elements());
3466 Register scratch = scratch0();
3467 DCHECK(receiver.is(a0)); // Used for parameter count.
3468 DCHECK(function.is(a1)); // Required by InvokeFunction.
3469 DCHECK(ToRegister(instr->result()).is(v0));
3471 // Copy the arguments to this function possibly from the
3472 // adaptor frame below it.
3473 const uint32_t kArgumentsLimit = 1 * KB;
3474 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
3475 Operand(kArgumentsLimit));
3477 // Push the receiver and use the register to keep the original
3478 // number of arguments.
3480 __ Move(receiver, length);
3481 // The arguments are at a one pointer size offset from elements.
3482 __ Addu(elements, elements, Operand(1 * kPointerSize));
3484 // Loop through the arguments pushing them onto the execution
3487 // length is a small non-negative integer, due to the test above.
3488 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3489 __ sll(scratch, length, 2);
3491 __ Addu(scratch, elements, scratch);
3492 __ lw(scratch, MemOperand(scratch));
3494 __ Subu(length, length, Operand(1));
3495 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3496 __ sll(scratch, length, 2);
3499 DCHECK(instr->HasPointerMap());
3500 LPointerMap* pointers = instr->pointer_map();
3501 SafepointGenerator safepoint_generator(
3502 this, pointers, Safepoint::kLazyDeopt);
3503 // The number of arguments is stored in receiver which is a0, as expected
3504 // by InvokeFunction.
3505 ParameterCount actual(receiver);
3506 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3510 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3511 LOperand* argument = instr->value();
3512 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3513 Abort(kDoPushArgumentNotImplementedForDoubleType);
3515 Register argument_reg = EmitLoadRegister(argument, at);
3516 __ push(argument_reg);
3521 void LCodeGen::DoDrop(LDrop* instr) {
3522 __ Drop(instr->count());
3526 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3527 Register result = ToRegister(instr->result());
3528 __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3532 void LCodeGen::DoContext(LContext* instr) {
3533 // If there is a non-return use, the context must be moved to a register.
3534 Register result = ToRegister(instr->result());
3535 if (info()->IsOptimizing()) {
3536 __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3538 // If there is no frame, the context must be in cp.
3539 DCHECK(result.is(cp));
3544 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3545 DCHECK(ToRegister(instr->context()).is(cp));
3546 __ li(scratch0(), instr->hydrogen()->pairs());
3547 __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3548 // The context is the first argument.
3549 __ Push(cp, scratch0(), scratch1());
3550 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3554 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3555 int formal_parameter_count, int arity,
3556 LInstruction* instr) {
3557 bool dont_adapt_arguments =
3558 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3559 bool can_invoke_directly =
3560 dont_adapt_arguments || formal_parameter_count == arity;
3562 Register function_reg = a1;
3563 LPointerMap* pointers = instr->pointer_map();
3565 if (can_invoke_directly) {
3567 __ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3569 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3570 // is available to write to at this point.
3571 if (dont_adapt_arguments) {
3572 __ li(a0, Operand(arity));
3576 __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3579 // Set up deoptimization.
3580 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3582 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3583 ParameterCount count(arity);
3584 ParameterCount expected(formal_parameter_count);
3585 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3590 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3591 DCHECK(instr->context() != NULL);
3592 DCHECK(ToRegister(instr->context()).is(cp));
3593 Register input = ToRegister(instr->value());
3594 Register result = ToRegister(instr->result());
3595 Register scratch = scratch0();
3597 // Deoptimize if not a heap number.
3598 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3599 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3600 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
3603 Register exponent = scratch0();
3605 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3606 // Check the sign of the argument. If the argument is positive, just
3608 __ Move(result, input);
3609 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3610 __ Branch(&done, eq, at, Operand(zero_reg));
3612 // Input is negative. Reverse its sign.
3613 // Preserve the value of all registers.
3615 PushSafepointRegistersScope scope(this);
3617 // Registers were saved at the safepoint, so we can use
3618 // many scratch registers.
3619 Register tmp1 = input.is(a1) ? a0 : a1;
3620 Register tmp2 = input.is(a2) ? a0 : a2;
3621 Register tmp3 = input.is(a3) ? a0 : a3;
3622 Register tmp4 = input.is(t0) ? a0 : t0;
3624 // exponent: floating point exponent value.
3626 Label allocated, slow;
3627 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3628 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3629 __ Branch(&allocated);
3631 // Slow case: Call the runtime system to do the number allocation.
3634 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3636 // Set the pointer to the new heap number in tmp.
3639 // Restore input_reg after call to runtime.
3640 __ LoadFromSafepointRegisterSlot(input, input);
3641 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3643 __ bind(&allocated);
3644 // exponent: floating point exponent value.
3645 // tmp1: allocated heap number.
3646 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3647 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3648 __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3649 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3651 __ StoreToSafepointRegisterSlot(tmp1, result);
3658 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3659 Register input = ToRegister(instr->value());
3660 Register result = ToRegister(instr->result());
3661 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3663 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3664 __ mov(result, input);
3665 __ subu(result, zero_reg, input);
3666 // Overflow if result is still negative, i.e. 0x80000000.
3667 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
3672 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3673 // Class for deferred case.
3674 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3676 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3677 : LDeferredCode(codegen), instr_(instr) { }
3678 void Generate() override {
3679 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3681 LInstruction* instr() override { return instr_; }
3687 Representation r = instr->hydrogen()->value()->representation();
3689 FPURegister input = ToDoubleRegister(instr->value());
3690 FPURegister result = ToDoubleRegister(instr->result());
3691 __ abs_d(result, input);
3692 } else if (r.IsSmiOrInteger32()) {
3693 EmitIntegerMathAbs(instr);
3695 // Representation is tagged.
3696 DeferredMathAbsTaggedHeapNumber* deferred =
3697 new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3698 Register input = ToRegister(instr->value());
3700 __ JumpIfNotSmi(input, deferred->entry());
3701 // If smi, handle it directly.
3702 EmitIntegerMathAbs(instr);
3703 __ bind(deferred->exit());
3708 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3709 DoubleRegister input = ToDoubleRegister(instr->value());
3710 Register result = ToRegister(instr->result());
3711 Register scratch1 = scratch0();
3712 Register except_flag = ToRegister(instr->temp());
3714 __ EmitFPUTruncate(kRoundToMinusInf,
3721 // Deopt if the operation did not succeed.
3722 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3725 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3728 __ Branch(&done, ne, result, Operand(zero_reg));
3729 __ Mfhc1(scratch1, input);
3730 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3731 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
3738 void LCodeGen::DoMathRound(LMathRound* instr) {
3739 DoubleRegister input = ToDoubleRegister(instr->value());
3740 Register result = ToRegister(instr->result());
3741 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3742 Register scratch = scratch0();
3743 Label done, check_sign_on_zero;
3745 // Extract exponent bits.
3746 __ Mfhc1(result, input);
3749 HeapNumber::kExponentShift,
3750 HeapNumber::kExponentBits);
3752 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3754 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3755 __ mov(result, zero_reg);
3756 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3757 __ Branch(&check_sign_on_zero);
3763 // The following conversion will not work with numbers
3764 // outside of ]-2^32, 2^32[.
3765 DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
3766 Operand(HeapNumber::kExponentBias + 32));
3768 // Save the original sign for later comparison.
3769 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3771 __ Move(double_scratch0(), 0.5);
3772 __ add_d(double_scratch0(), input, double_scratch0());
3774 // Check sign of the result: if the sign changed, the input
3775 // value was in ]0.5, 0[ and the result should be -0.
3776 __ Mfhc1(result, double_scratch0());
3777 __ Xor(result, result, Operand(scratch));
3778 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3779 // ARM uses 'mi' here, which is 'lt'
3780 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
3783 // ARM uses 'mi' here, which is 'lt'
3784 // Negating it results in 'ge'
3785 __ Branch(&skip2, ge, result, Operand(zero_reg));
3786 __ mov(result, zero_reg);
3791 Register except_flag = scratch;
3792 __ EmitFPUTruncate(kRoundToMinusInf,
3799 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
3802 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3804 __ Branch(&done, ne, result, Operand(zero_reg));
3805 __ bind(&check_sign_on_zero);
3806 __ Mfhc1(scratch, input);
3807 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3808 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
3815 void LCodeGen::DoMathFround(LMathFround* instr) {
3816 DoubleRegister input = ToDoubleRegister(instr->value());
3817 DoubleRegister result = ToDoubleRegister(instr->result());
3818 __ cvt_s_d(result.low(), input);
3819 __ cvt_d_s(result, result.low());
3823 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3824 DoubleRegister input = ToDoubleRegister(instr->value());
3825 DoubleRegister result = ToDoubleRegister(instr->result());
3826 __ sqrt_d(result, input);
3830 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3831 DoubleRegister input = ToDoubleRegister(instr->value());
3832 DoubleRegister result = ToDoubleRegister(instr->result());
3833 DoubleRegister temp = ToDoubleRegister(instr->temp());
3835 DCHECK(!input.is(result));
3837 // Note that according to ECMA-262 15.8.2.13:
3838 // Math.pow(-Infinity, 0.5) == Infinity
3839 // Math.sqrt(-Infinity) == NaN
3841 __ Move(temp, static_cast<double>(-V8_INFINITY));
3842 __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3843 // Set up Infinity in the delay slot.
3844 // result is overwritten if the branch is not taken.
3845 __ neg_d(result, temp);
3847 // Add +0 to convert -0 to +0.
3848 __ add_d(result, input, kDoubleRegZero);
3849 __ sqrt_d(result, result);
3854 void LCodeGen::DoPower(LPower* instr) {
3855 Representation exponent_type = instr->hydrogen()->right()->representation();
3856 // Having marked this as a call, we can use any registers.
3857 // Just make sure that the input/output registers are the expected ones.
3858 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
3859 DCHECK(!instr->right()->IsDoubleRegister() ||
3860 ToDoubleRegister(instr->right()).is(f4));
3861 DCHECK(!instr->right()->IsRegister() ||
3862 ToRegister(instr->right()).is(tagged_exponent));
3863 DCHECK(ToDoubleRegister(instr->left()).is(f2));
3864 DCHECK(ToDoubleRegister(instr->result()).is(f0));
3866 if (exponent_type.IsSmi()) {
3867 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3869 } else if (exponent_type.IsTagged()) {
3871 __ JumpIfSmi(tagged_exponent, &no_deopt);
3872 DCHECK(!t3.is(tagged_exponent));
3873 __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3874 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3875 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at));
3877 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3879 } else if (exponent_type.IsInteger32()) {
3880 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3883 DCHECK(exponent_type.IsDouble());
3884 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3890 void LCodeGen::DoMathExp(LMathExp* instr) {
3891 DoubleRegister input = ToDoubleRegister(instr->value());
3892 DoubleRegister result = ToDoubleRegister(instr->result());
3893 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3894 DoubleRegister double_scratch2 = double_scratch0();
3895 Register temp1 = ToRegister(instr->temp1());
3896 Register temp2 = ToRegister(instr->temp2());
3898 MathExpGenerator::EmitMathExp(
3899 masm(), input, result, double_scratch1, double_scratch2,
3900 temp1, temp2, scratch0());
3904 void LCodeGen::DoMathLog(LMathLog* instr) {
3905 __ PrepareCallCFunction(0, 1, scratch0());
3906 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
3907 __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
3909 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
3913 void LCodeGen::DoMathClz32(LMathClz32* instr) {
3914 Register input = ToRegister(instr->value());
3915 Register result = ToRegister(instr->result());
3916 __ Clz(result, input);
3920 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3921 DCHECK(ToRegister(instr->context()).is(cp));
3922 DCHECK(ToRegister(instr->function()).is(a1));
3923 DCHECK(instr->HasPointerMap());
3925 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3926 if (known_function.is_null()) {
3927 LPointerMap* pointers = instr->pointer_map();
3928 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3929 ParameterCount count(instr->arity());
3930 __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
3932 CallKnownFunction(known_function,
3933 instr->hydrogen()->formal_parameter_count(),
3934 instr->arity(), instr);
3939 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
3940 DCHECK(ToRegister(instr->result()).is(v0));
3942 if (instr->hydrogen()->IsTailCall()) {
3943 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
3945 if (instr->target()->IsConstantOperand()) {
3946 LConstantOperand* target = LConstantOperand::cast(instr->target());
3947 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3948 __ Jump(code, RelocInfo::CODE_TARGET);
3950 DCHECK(instr->target()->IsRegister());
3951 Register target = ToRegister(instr->target());
3952 __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3956 LPointerMap* pointers = instr->pointer_map();
3957 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3959 if (instr->target()->IsConstantOperand()) {
3960 LConstantOperand* target = LConstantOperand::cast(instr->target());
3961 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3962 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3963 __ Call(code, RelocInfo::CODE_TARGET);
3965 DCHECK(instr->target()->IsRegister());
3966 Register target = ToRegister(instr->target());
3967 generator.BeforeCall(__ CallSize(target));
3968 __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3971 generator.AfterCall();
3976 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3977 DCHECK(ToRegister(instr->function()).is(a1));
3978 DCHECK(ToRegister(instr->result()).is(v0));
3980 if (instr->hydrogen()->pass_argument_count()) {
3981 __ li(a0, Operand(instr->arity()));
3985 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3987 // Load the code entry address
3988 __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3991 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3995 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3996 DCHECK(ToRegister(instr->context()).is(cp));
3997 DCHECK(ToRegister(instr->function()).is(a1));
3998 DCHECK(ToRegister(instr->result()).is(v0));
4000 int arity = instr->arity();
4001 CallFunctionFlags flags = instr->hydrogen()->function_flags();
4002 if (instr->hydrogen()->HasVectorAndSlot()) {
4003 Register slot_register = ToRegister(instr->temp_slot());
4004 Register vector_register = ToRegister(instr->temp_vector());
4005 DCHECK(slot_register.is(a3));
4006 DCHECK(vector_register.is(a2));
4008 AllowDeferredHandleDereference vector_structure_check;
4009 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
4010 int index = vector->GetIndex(instr->hydrogen()->slot());
4012 __ li(vector_register, vector);
4013 __ li(slot_register, Operand(Smi::FromInt(index)));
4015 CallICState::CallType call_type =
4016 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
4019 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
4020 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4022 CallFunctionStub stub(isolate(), arity, flags);
4023 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4028 void LCodeGen::DoCallNew(LCallNew* instr) {
4029 DCHECK(ToRegister(instr->context()).is(cp));
4030 DCHECK(ToRegister(instr->constructor()).is(a1));
4031 DCHECK(ToRegister(instr->result()).is(v0));
4033 __ li(a0, Operand(instr->arity()));
4034 // No cell in a2 for construct type feedback in optimized code
4035 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4036 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4037 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4041 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4042 DCHECK(ToRegister(instr->context()).is(cp));
4043 DCHECK(ToRegister(instr->constructor()).is(a1));
4044 DCHECK(ToRegister(instr->result()).is(v0));
4046 __ li(a0, Operand(instr->arity()));
4047 if (instr->arity() == 1) {
4048 // We only need the allocation site for the case we have a length argument.
4049 // The case may bail out to the runtime, which will determine the correct
4050 // elements kind with the site.
4051 __ li(a2, instr->hydrogen()->site());
4053 __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
4055 ElementsKind kind = instr->hydrogen()->elements_kind();
4056 AllocationSiteOverrideMode override_mode =
4057 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4058 ? DISABLE_ALLOCATION_SITES
4061 if (instr->arity() == 0) {
4062 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4063 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4064 } else if (instr->arity() == 1) {
4066 if (IsFastPackedElementsKind(kind)) {
4068 // We might need a change here,
4069 // look at the first argument.
4070 __ lw(t1, MemOperand(sp, 0));
4071 __ Branch(&packed_case, eq, t1, Operand(zero_reg));
4073 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4074 ArraySingleArgumentConstructorStub stub(isolate(),
4077 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4079 __ bind(&packed_case);
4082 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4083 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4086 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4087 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4092 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4093 CallRuntime(instr->function(), instr->arity(), instr);
4097 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4098 Register function = ToRegister(instr->function());
4099 Register code_object = ToRegister(instr->code_object());
4100 __ Addu(code_object, code_object,
4101 Operand(Code::kHeaderSize - kHeapObjectTag));
4103 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4107 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4108 Register result = ToRegister(instr->result());
4109 Register base = ToRegister(instr->base_object());
4110 if (instr->offset()->IsConstantOperand()) {
4111 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4112 __ Addu(result, base, Operand(ToInteger32(offset)));
4114 Register offset = ToRegister(instr->offset());
4115 __ Addu(result, base, offset);
4120 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4121 Representation representation = instr->representation();
4123 Register object = ToRegister(instr->object());
4124 Register scratch = scratch0();
4125 HObjectAccess access = instr->hydrogen()->access();
4126 int offset = access.offset();
4128 if (access.IsExternalMemory()) {
4129 Register value = ToRegister(instr->value());
4130 MemOperand operand = MemOperand(object, offset);
4131 __ Store(value, operand, representation);
4135 __ AssertNotSmi(object);
4137 DCHECK(!representation.IsSmi() ||
4138 !instr->value()->IsConstantOperand() ||
4139 IsSmi(LConstantOperand::cast(instr->value())));
4140 if (representation.IsDouble()) {
4141 DCHECK(access.IsInobject());
4142 DCHECK(!instr->hydrogen()->has_transition());
4143 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4144 DoubleRegister value = ToDoubleRegister(instr->value());
4145 __ sdc1(value, FieldMemOperand(object, offset));
4149 if (instr->hydrogen()->has_transition()) {
4150 Handle<Map> transition = instr->hydrogen()->transition_map();
4151 AddDeprecationDependency(transition);
4152 __ li(scratch, Operand(transition));
4153 __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4154 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4155 Register temp = ToRegister(instr->temp());
4156 // Update the write barrier for the map field.
4157 __ RecordWriteForMap(object,
4166 Register value = ToRegister(instr->value());
4167 if (access.IsInobject()) {
4168 MemOperand operand = FieldMemOperand(object, offset);
4169 __ Store(value, operand, representation);
4170 if (instr->hydrogen()->NeedsWriteBarrier()) {
4171 // Update the write barrier for the object for in-object properties.
4172 __ RecordWriteField(object,
4178 EMIT_REMEMBERED_SET,
4179 instr->hydrogen()->SmiCheckForWriteBarrier(),
4180 instr->hydrogen()->PointersToHereCheckForValue());
4183 __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4184 MemOperand operand = FieldMemOperand(scratch, offset);
4185 __ Store(value, operand, representation);
4186 if (instr->hydrogen()->NeedsWriteBarrier()) {
4187 // Update the write barrier for the properties array.
4188 // object is used as a scratch register.
4189 __ RecordWriteField(scratch,
4195 EMIT_REMEMBERED_SET,
4196 instr->hydrogen()->SmiCheckForWriteBarrier(),
4197 instr->hydrogen()->PointersToHereCheckForValue());
4203 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4204 DCHECK(ToRegister(instr->context()).is(cp));
4205 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4206 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4208 __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
4210 StoreIC::initialize_stub(isolate(), instr->language_mode(),
4211 instr->hydrogen()->initialization_state());
4212 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4216 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4217 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4220 if (instr->index()->IsConstantOperand()) {
4221 operand = ToOperand(instr->index());
4222 reg = ToRegister(instr->length());
4223 cc = CommuteCondition(cc);
4225 reg = ToRegister(instr->index());
4226 operand = ToOperand(instr->length());
4228 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4230 __ Branch(&done, NegateCondition(cc), reg, operand);
4231 __ stop("eliminated bounds check failed");
4234 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
4239 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4240 Register external_pointer = ToRegister(instr->elements());
4241 Register key = no_reg;
4242 ElementsKind elements_kind = instr->elements_kind();
4243 bool key_is_constant = instr->key()->IsConstantOperand();
4244 int constant_key = 0;
4245 if (key_is_constant) {
4246 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4247 if (constant_key & 0xF0000000) {
4248 Abort(kArrayIndexConstantValueTooBig);
4251 key = ToRegister(instr->key());
4253 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4254 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4255 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4256 int base_offset = instr->base_offset();
4258 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4259 elements_kind == FLOAT32_ELEMENTS ||
4260 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4261 elements_kind == FLOAT64_ELEMENTS) {
4262 Register address = scratch0();
4263 FPURegister value(ToDoubleRegister(instr->value()));
4264 if (key_is_constant) {
4265 if (constant_key != 0) {
4266 __ Addu(address, external_pointer,
4267 Operand(constant_key << element_size_shift));
4269 address = external_pointer;
4272 __ sll(address, key, shift_size);
4273 __ Addu(address, external_pointer, address);
4276 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4277 elements_kind == FLOAT32_ELEMENTS) {
4278 __ cvt_s_d(double_scratch0(), value);
4279 __ swc1(double_scratch0(), MemOperand(address, base_offset));
4280 } else { // Storing doubles, not floats.
4281 __ sdc1(value, MemOperand(address, base_offset));
4284 Register value(ToRegister(instr->value()));
4285 MemOperand mem_operand = PrepareKeyedOperand(
4286 key, external_pointer, key_is_constant, constant_key,
4287 element_size_shift, shift_size,
4289 switch (elements_kind) {
4290 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4291 case EXTERNAL_INT8_ELEMENTS:
4292 case EXTERNAL_UINT8_ELEMENTS:
4293 case UINT8_ELEMENTS:
4294 case UINT8_CLAMPED_ELEMENTS:
4296 __ sb(value, mem_operand);
4298 case EXTERNAL_INT16_ELEMENTS:
4299 case EXTERNAL_UINT16_ELEMENTS:
4300 case INT16_ELEMENTS:
4301 case UINT16_ELEMENTS:
4302 __ sh(value, mem_operand);
4304 case EXTERNAL_INT32_ELEMENTS:
4305 case EXTERNAL_UINT32_ELEMENTS:
4306 case INT32_ELEMENTS:
4307 case UINT32_ELEMENTS:
4308 __ sw(value, mem_operand);
4310 case FLOAT32_ELEMENTS:
4311 case FLOAT64_ELEMENTS:
4312 case EXTERNAL_FLOAT32_ELEMENTS:
4313 case EXTERNAL_FLOAT64_ELEMENTS:
4314 case FAST_DOUBLE_ELEMENTS:
4316 case FAST_SMI_ELEMENTS:
4317 case FAST_HOLEY_DOUBLE_ELEMENTS:
4318 case FAST_HOLEY_ELEMENTS:
4319 case FAST_HOLEY_SMI_ELEMENTS:
4320 case DICTIONARY_ELEMENTS:
4321 case SLOPPY_ARGUMENTS_ELEMENTS:
4329 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4330 DoubleRegister value = ToDoubleRegister(instr->value());
4331 Register elements = ToRegister(instr->elements());
4332 Register scratch = scratch0();
4333 DoubleRegister double_scratch = double_scratch0();
4334 bool key_is_constant = instr->key()->IsConstantOperand();
4335 int base_offset = instr->base_offset();
4336 Label not_nan, done;
4338 // Calculate the effective address of the slot in the array to store the
4340 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4341 if (key_is_constant) {
4342 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4343 if (constant_key & 0xF0000000) {
4344 Abort(kArrayIndexConstantValueTooBig);
4346 __ Addu(scratch, elements,
4347 Operand((constant_key << element_size_shift) + base_offset));
4349 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4350 ? (element_size_shift - kSmiTagSize) : element_size_shift;
4351 __ Addu(scratch, elements, Operand(base_offset));
4352 __ sll(at, ToRegister(instr->key()), shift_size);
4353 __ Addu(scratch, scratch, at);
4356 if (instr->NeedsCanonicalization()) {
4358 // Check for NaN. All NaNs must be canonicalized.
4359 __ BranchF(NULL, &is_nan, eq, value, value);
4360 __ Branch(¬_nan);
4362 // Only load canonical NaN if the comparison above set the overflow.
4364 __ LoadRoot(at, Heap::kNanValueRootIndex);
4365 __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
4366 __ sdc1(double_scratch, MemOperand(scratch, 0));
4371 __ sdc1(value, MemOperand(scratch, 0));
4376 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4377 Register value = ToRegister(instr->value());
4378 Register elements = ToRegister(instr->elements());
4379 Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4381 Register scratch = scratch0();
4382 Register store_base = scratch;
4383 int offset = instr->base_offset();
4386 if (instr->key()->IsConstantOperand()) {
4387 DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
4388 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4389 offset += ToInteger32(const_operand) * kPointerSize;
4390 store_base = elements;
4392 // Even though the HLoadKeyed instruction forces the input
4393 // representation for the key to be an integer, the input gets replaced
4394 // during bound check elimination with the index argument to the bounds
4395 // check, which can be tagged, so that case must be handled here, too.
4396 if (instr->hydrogen()->key()->representation().IsSmi()) {
4397 __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
4398 __ addu(scratch, elements, scratch);
4400 __ sll(scratch, key, kPointerSizeLog2);
4401 __ addu(scratch, elements, scratch);
4404 __ sw(value, MemOperand(store_base, offset));
4406 if (instr->hydrogen()->NeedsWriteBarrier()) {
4407 SmiCheck check_needed =
4408 instr->hydrogen()->value()->type().IsHeapObject()
4409 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4410 // Compute address of modified element and store it into key register.
4411 __ Addu(key, store_base, Operand(offset));
4412 __ RecordWrite(elements,
4417 EMIT_REMEMBERED_SET,
4419 instr->hydrogen()->PointersToHereCheckForValue());
4424 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4425 // By cases: external, fast double
4426 if (instr->is_typed_elements()) {
4427 DoStoreKeyedExternalArray(instr);
4428 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4429 DoStoreKeyedFixedDoubleArray(instr);
4431 DoStoreKeyedFixedArray(instr);
4436 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4437 DCHECK(ToRegister(instr->context()).is(cp));
4438 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4439 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4440 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4442 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4443 isolate(), instr->language_mode(),
4444 instr->hydrogen()->initialization_state()).code();
4445 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4449 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4450 class DeferredMaybeGrowElements final : public LDeferredCode {
4452 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4453 : LDeferredCode(codegen), instr_(instr) {}
4454 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4455 LInstruction* instr() override { return instr_; }
4458 LMaybeGrowElements* instr_;
4461 Register result = v0;
4462 DeferredMaybeGrowElements* deferred =
4463 new (zone()) DeferredMaybeGrowElements(this, instr);
4464 LOperand* key = instr->key();
4465 LOperand* current_capacity = instr->current_capacity();
4467 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4468 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4469 DCHECK(key->IsConstantOperand() || key->IsRegister());
4470 DCHECK(current_capacity->IsConstantOperand() ||
4471 current_capacity->IsRegister());
4473 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4474 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4475 int32_t constant_capacity =
4476 ToInteger32(LConstantOperand::cast(current_capacity));
4477 if (constant_key >= constant_capacity) {
4479 __ jmp(deferred->entry());
4481 } else if (key->IsConstantOperand()) {
4482 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4483 __ Branch(deferred->entry(), le, ToRegister(current_capacity),
4484 Operand(constant_key));
4485 } else if (current_capacity->IsConstantOperand()) {
4486 int32_t constant_capacity =
4487 ToInteger32(LConstantOperand::cast(current_capacity));
4488 __ Branch(deferred->entry(), ge, ToRegister(key),
4489 Operand(constant_capacity));
4491 __ Branch(deferred->entry(), ge, ToRegister(key),
4492 Operand(ToRegister(current_capacity)));
4495 if (instr->elements()->IsRegister()) {
4496 __ mov(result, ToRegister(instr->elements()));
4498 __ lw(result, ToMemOperand(instr->elements()));
4501 __ bind(deferred->exit());
4505 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4506 // TODO(3095996): Get rid of this. For now, we need to make the
4507 // result register contain a valid pointer because it is already
4508 // contained in the register pointer map.
4509 Register result = v0;
4510 __ mov(result, zero_reg);
4512 // We have to call a stub.
4514 PushSafepointRegistersScope scope(this);
4515 if (instr->object()->IsRegister()) {
4516 __ mov(result, ToRegister(instr->object()));
4518 __ lw(result, ToMemOperand(instr->object()));
4521 LOperand* key = instr->key();
4522 if (key->IsConstantOperand()) {
4523 __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
4525 __ mov(a3, ToRegister(key));
4529 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4530 instr->hydrogen()->kind());
4533 RecordSafepointWithLazyDeopt(
4534 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4535 __ StoreToSafepointRegisterSlot(result, result);
4538 // Deopt on smi, which means the elements array changed to dictionary mode.
4539 __ SmiTst(result, at);
4540 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
4544 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4545 Register object_reg = ToRegister(instr->object());
4546 Register scratch = scratch0();
4548 Handle<Map> from_map = instr->original_map();
4549 Handle<Map> to_map = instr->transitioned_map();
4550 ElementsKind from_kind = instr->from_kind();
4551 ElementsKind to_kind = instr->to_kind();
4553 Label not_applicable;
4554 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4555 __ Branch(¬_applicable, ne, scratch, Operand(from_map));
4557 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4558 Register new_map_reg = ToRegister(instr->new_map_temp());
4559 __ li(new_map_reg, Operand(to_map));
4560 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4562 __ RecordWriteForMap(object_reg,
4568 DCHECK(object_reg.is(a0));
4569 DCHECK(ToRegister(instr->context()).is(cp));
4570 PushSafepointRegistersScope scope(this);
4571 __ li(a1, Operand(to_map));
4572 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4573 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4575 RecordSafepointWithRegisters(
4576 instr->pointer_map(), 0, Safepoint::kLazyDeopt);
4578 __ bind(¬_applicable);
4582 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4583 Register object = ToRegister(instr->object());
4584 Register temp = ToRegister(instr->temp());
4585 Label no_memento_found;
4586 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4587 ne, &no_memento_found);
4588 DeoptimizeIf(al, instr);
4589 __ bind(&no_memento_found);
4593 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4594 DCHECK(ToRegister(instr->context()).is(cp));
4595 DCHECK(ToRegister(instr->left()).is(a1));
4596 DCHECK(ToRegister(instr->right()).is(a0));
4597 StringAddStub stub(isolate(),
4598 instr->hydrogen()->flags(),
4599 instr->hydrogen()->pretenure_flag());
4600 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4604 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4605 class DeferredStringCharCodeAt final : public LDeferredCode {
4607 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4608 : LDeferredCode(codegen), instr_(instr) { }
4609 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4610 LInstruction* instr() override { return instr_; }
4613 LStringCharCodeAt* instr_;
4616 DeferredStringCharCodeAt* deferred =
4617 new(zone()) DeferredStringCharCodeAt(this, instr);
4618 StringCharLoadGenerator::Generate(masm(),
4619 ToRegister(instr->string()),
4620 ToRegister(instr->index()),
4621 ToRegister(instr->result()),
4623 __ bind(deferred->exit());
4627 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4628 Register string = ToRegister(instr->string());
4629 Register result = ToRegister(instr->result());
4630 Register scratch = scratch0();
4632 // TODO(3095996): Get rid of this. For now, we need to make the
4633 // result register contain a valid pointer because it is already
4634 // contained in the register pointer map.
4635 __ mov(result, zero_reg);
4637 PushSafepointRegistersScope scope(this);
4639 // Push the index as a smi. This is safe because of the checks in
4640 // DoStringCharCodeAt above.
4641 if (instr->index()->IsConstantOperand()) {
4642 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4643 __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4646 Register index = ToRegister(instr->index());
4650 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4654 __ StoreToSafepointRegisterSlot(v0, result);
4658 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4659 class DeferredStringCharFromCode final : public LDeferredCode {
4661 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4662 : LDeferredCode(codegen), instr_(instr) { }
4663 void Generate() override {
4664 codegen()->DoDeferredStringCharFromCode(instr_);
4666 LInstruction* instr() override { return instr_; }
4669 LStringCharFromCode* instr_;
4672 DeferredStringCharFromCode* deferred =
4673 new(zone()) DeferredStringCharFromCode(this, instr);
4675 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4676 Register char_code = ToRegister(instr->char_code());
4677 Register result = ToRegister(instr->result());
4678 Register scratch = scratch0();
4679 DCHECK(!char_code.is(result));
4681 __ Branch(deferred->entry(), hi,
4682 char_code, Operand(String::kMaxOneByteCharCode));
4683 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4684 __ sll(scratch, char_code, kPointerSizeLog2);
4685 __ Addu(result, result, scratch);
4686 __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4687 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4688 __ Branch(deferred->entry(), eq, result, Operand(scratch));
4689 __ bind(deferred->exit());
4693 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4694 Register char_code = ToRegister(instr->char_code());
4695 Register result = ToRegister(instr->result());
4697 // TODO(3095996): Get rid of this. For now, we need to make the
4698 // result register contain a valid pointer because it is already
4699 // contained in the register pointer map.
4700 __ mov(result, zero_reg);
4702 PushSafepointRegistersScope scope(this);
4703 __ SmiTag(char_code);
4705 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4706 __ StoreToSafepointRegisterSlot(v0, result);
4710 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4711 LOperand* input = instr->value();
4712 DCHECK(input->IsRegister() || input->IsStackSlot());
4713 LOperand* output = instr->result();
4714 DCHECK(output->IsDoubleRegister());
4715 FPURegister single_scratch = double_scratch0().low();
4716 if (input->IsStackSlot()) {
4717 Register scratch = scratch0();
4718 __ lw(scratch, ToMemOperand(input));
4719 __ mtc1(scratch, single_scratch);
4721 __ mtc1(ToRegister(input), single_scratch);
4723 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4727 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4728 LOperand* input = instr->value();
4729 LOperand* output = instr->result();
4731 FPURegister dbl_scratch = double_scratch0();
4732 __ mtc1(ToRegister(input), dbl_scratch);
4733 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
4737 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4738 class DeferredNumberTagI final : public LDeferredCode {
4740 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4741 : LDeferredCode(codegen), instr_(instr) { }
4742 void Generate() override {
4743 codegen()->DoDeferredNumberTagIU(instr_,
4749 LInstruction* instr() override { return instr_; }
4752 LNumberTagI* instr_;
4755 Register src = ToRegister(instr->value());
4756 Register dst = ToRegister(instr->result());
4757 Register overflow = scratch0();
4759 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4760 __ SmiTagCheckOverflow(dst, src, overflow);
4761 __ BranchOnOverflow(deferred->entry(), overflow);
4762 __ bind(deferred->exit());
4766 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4767 class DeferredNumberTagU final : public LDeferredCode {
4769 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4770 : LDeferredCode(codegen), instr_(instr) { }
4771 void Generate() override {
4772 codegen()->DoDeferredNumberTagIU(instr_,
4778 LInstruction* instr() override { return instr_; }
4781 LNumberTagU* instr_;
4784 Register input = ToRegister(instr->value());
4785 Register result = ToRegister(instr->result());
4787 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4788 __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
4789 __ SmiTag(result, input);
4790 __ bind(deferred->exit());
4794 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4798 IntegerSignedness signedness) {
4800 Register src = ToRegister(value);
4801 Register dst = ToRegister(instr->result());
4802 Register tmp1 = scratch0();
4803 Register tmp2 = ToRegister(temp1);
4804 Register tmp3 = ToRegister(temp2);
4805 DoubleRegister dbl_scratch = double_scratch0();
4807 if (signedness == SIGNED_INT32) {
4808 // There was overflow, so bits 30 and 31 of the original integer
4809 // disagree. Try to allocate a heap number in new space and store
4810 // the value in there. If that fails, call the runtime system.
4812 __ SmiUntag(src, dst);
4813 __ Xor(src, src, Operand(0x80000000));
4815 __ mtc1(src, dbl_scratch);
4816 __ cvt_d_w(dbl_scratch, dbl_scratch);
4818 __ mtc1(src, dbl_scratch);
4819 __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4822 if (FLAG_inline_new) {
4823 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4824 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4828 // Slow case: Call the runtime system to do the number allocation.
4831 // TODO(3095996): Put a valid pointer value in the stack slot where the
4832 // result register is stored, as this register is in the pointer map, but
4833 // contains an integer value.
4834 __ mov(dst, zero_reg);
4836 // Preserve the value of all registers.
4837 PushSafepointRegistersScope scope(this);
4839 // NumberTagI and NumberTagD use the context from the frame, rather than
4840 // the environment's HContext or HInlinedContext value.
4841 // They only call Runtime::kAllocateHeapNumber.
4842 // The corresponding HChange instructions are added in a phase that does
4843 // not have easy access to the local context.
4844 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4845 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4846 RecordSafepointWithRegisters(
4847 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4848 __ Subu(v0, v0, kHeapObjectTag);
4849 __ StoreToSafepointRegisterSlot(v0, dst);
4853 // Done. Put the value in dbl_scratch into the value of the allocated heap
4856 __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
4857 __ Addu(dst, dst, kHeapObjectTag);
4861 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4862 class DeferredNumberTagD final : public LDeferredCode {
4864 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4865 : LDeferredCode(codegen), instr_(instr) { }
4866 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
4867 LInstruction* instr() override { return instr_; }
4870 LNumberTagD* instr_;
4873 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4874 Register scratch = scratch0();
4875 Register reg = ToRegister(instr->result());
4876 Register temp1 = ToRegister(instr->temp());
4877 Register temp2 = ToRegister(instr->temp2());
4879 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4880 if (FLAG_inline_new) {
4881 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4882 // We want the untagged address first for performance
4883 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4886 __ Branch(deferred->entry());
4888 __ bind(deferred->exit());
4889 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4890 // Now that we have finished with the object's real address tag it
4891 __ Addu(reg, reg, kHeapObjectTag);
4895 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4896 // TODO(3095996): Get rid of this. For now, we need to make the
4897 // result register contain a valid pointer because it is already
4898 // contained in the register pointer map.
4899 Register reg = ToRegister(instr->result());
4900 __ mov(reg, zero_reg);
4902 PushSafepointRegistersScope scope(this);
4903 // NumberTagI and NumberTagD use the context from the frame, rather than
4904 // the environment's HContext or HInlinedContext value.
4905 // They only call Runtime::kAllocateHeapNumber.
4906 // The corresponding HChange instructions are added in a phase that does
4907 // not have easy access to the local context.
4908 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4909 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4910 RecordSafepointWithRegisters(
4911 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4912 __ Subu(v0, v0, kHeapObjectTag);
4913 __ StoreToSafepointRegisterSlot(v0, reg);
4917 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4918 HChange* hchange = instr->hydrogen();
4919 Register input = ToRegister(instr->value());
4920 Register output = ToRegister(instr->result());
4921 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4922 hchange->value()->CheckFlag(HValue::kUint32)) {
4923 __ And(at, input, Operand(0xc0000000));
4924 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4926 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4927 !hchange->value()->CheckFlag(HValue::kUint32)) {
4928 __ SmiTagCheckOverflow(output, input, at);
4929 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
4931 __ SmiTag(output, input);
4936 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4937 Register scratch = scratch0();
4938 Register input = ToRegister(instr->value());
4939 Register result = ToRegister(instr->result());
4940 if (instr->needs_check()) {
4941 STATIC_ASSERT(kHeapObjectTag == 1);
4942 // If the input is a HeapObject, value of scratch won't be zero.
4943 __ And(scratch, input, Operand(kHeapObjectTag));
4944 __ SmiUntag(result, input);
4945 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
4947 __ SmiUntag(result, input);
4952 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4953 DoubleRegister result_reg,
4954 NumberUntagDMode mode) {
4955 bool can_convert_undefined_to_nan =
4956 instr->hydrogen()->can_convert_undefined_to_nan();
4957 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4959 Register scratch = scratch0();
4960 Label convert, load_smi, done;
4961 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4963 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4964 // Heap number map check.
4965 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4966 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4967 if (can_convert_undefined_to_nan) {
4968 __ Branch(&convert, ne, scratch, Operand(at));
4970 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
4973 // Load heap number.
4974 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4975 if (deoptimize_on_minus_zero) {
4976 __ mfc1(at, result_reg.low());
4977 __ Branch(&done, ne, at, Operand(zero_reg));
4978 __ Mfhc1(scratch, result_reg);
4979 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
4980 Operand(HeapNumber::kSignMask));
4983 if (can_convert_undefined_to_nan) {
4985 // Convert undefined (and hole) to NaN.
4986 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4987 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
4989 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4990 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4994 __ SmiUntag(scratch, input_reg);
4995 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4997 // Smi to double register conversion
4999 // scratch: untagged value of input_reg
5000 __ mtc1(scratch, result_reg);
5001 __ cvt_d_w(result_reg, result_reg);
5006 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
5007 Register input_reg = ToRegister(instr->value());
5008 Register scratch1 = scratch0();
5009 Register scratch2 = ToRegister(instr->temp());
5010 DoubleRegister double_scratch = double_scratch0();
5011 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
5013 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
5014 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
5018 // The input is a tagged HeapObject.
5019 // Heap number map check.
5020 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5021 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5022 // This 'at' value and scratch1 map value are used for tests in both clauses
5025 if (instr->truncating()) {
5026 // Performs a truncating conversion of a floating point number as used by
5027 // the JS bitwise operations.
5028 Label no_heap_number, check_bools, check_false;
5029 // Check HeapNumber map.
5030 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
5031 __ mov(scratch2, input_reg); // In delay slot.
5032 __ TruncateHeapNumberToI(input_reg, scratch2);
5035 // Check for Oddballs. Undefined/False is converted to zero and True to one
5036 // for truncating conversions.
5037 __ bind(&no_heap_number);
5038 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5039 __ Branch(&check_bools, ne, input_reg, Operand(at));
5040 DCHECK(ToRegister(instr->result()).is(input_reg));
5041 __ Branch(USE_DELAY_SLOT, &done);
5042 __ mov(input_reg, zero_reg); // In delay slot.
5044 __ bind(&check_bools);
5045 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5046 __ Branch(&check_false, ne, scratch2, Operand(at));
5047 __ Branch(USE_DELAY_SLOT, &done);
5048 __ li(input_reg, Operand(1)); // In delay slot.
5050 __ bind(&check_false);
5051 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5052 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
5053 scratch2, Operand(at));
5054 __ Branch(USE_DELAY_SLOT, &done);
5055 __ mov(input_reg, zero_reg); // In delay slot.
5057 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
5060 // Load the double value.
5061 __ ldc1(double_scratch,
5062 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5064 Register except_flag = scratch2;
5065 __ EmitFPUTruncate(kRoundToZero,
5071 kCheckForInexactConversion);
5073 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
5076 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5077 __ Branch(&done, ne, input_reg, Operand(zero_reg));
5079 __ Mfhc1(scratch1, double_scratch);
5080 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5081 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
5089 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5090 class DeferredTaggedToI final : public LDeferredCode {
5092 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5093 : LDeferredCode(codegen), instr_(instr) { }
5094 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
5095 LInstruction* instr() override { return instr_; }
5101 LOperand* input = instr->value();
5102 DCHECK(input->IsRegister());
5103 DCHECK(input->Equals(instr->result()));
5105 Register input_reg = ToRegister(input);
5107 if (instr->hydrogen()->value()->representation().IsSmi()) {
5108 __ SmiUntag(input_reg);
5110 DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5112 // Let the deferred code handle the HeapObject case.
5113 __ JumpIfNotSmi(input_reg, deferred->entry());
5115 // Smi to int32 conversion.
5116 __ SmiUntag(input_reg);
5117 __ bind(deferred->exit());
5122 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5123 LOperand* input = instr->value();
5124 DCHECK(input->IsRegister());
5125 LOperand* result = instr->result();
5126 DCHECK(result->IsDoubleRegister());
5128 Register input_reg = ToRegister(input);
5129 DoubleRegister result_reg = ToDoubleRegister(result);
5131 HValue* value = instr->hydrogen()->value();
5132 NumberUntagDMode mode = value->representation().IsSmi()
5133 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5135 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5139 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5140 Register result_reg = ToRegister(instr->result());
5141 Register scratch1 = scratch0();
5142 DoubleRegister double_input = ToDoubleRegister(instr->value());
5144 if (instr->truncating()) {
5145 __ TruncateDoubleToI(result_reg, double_input);
5147 Register except_flag = LCodeGen::scratch1();
5149 __ EmitFPUTruncate(kRoundToMinusInf,
5155 kCheckForInexactConversion);
5157 // Deopt if the operation did not succeed (except_flag != 0).
5158 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
5161 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5163 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5164 __ Mfhc1(scratch1, double_input);
5165 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5166 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
5174 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5175 Register result_reg = ToRegister(instr->result());
5176 Register scratch1 = LCodeGen::scratch0();
5177 DoubleRegister double_input = ToDoubleRegister(instr->value());
5179 if (instr->truncating()) {
5180 __ TruncateDoubleToI(result_reg, double_input);
5182 Register except_flag = LCodeGen::scratch1();
5184 __ EmitFPUTruncate(kRoundToMinusInf,
5190 kCheckForInexactConversion);
5192 // Deopt if the operation did not succeed (except_flag != 0).
5193 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
5196 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5198 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5199 __ Mfhc1(scratch1, double_input);
5200 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5201 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
5206 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
5207 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg));
5211 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5212 LOperand* input = instr->value();
5213 __ SmiTst(ToRegister(input), at);
5214 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
5218 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5219 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5220 LOperand* input = instr->value();
5221 __ SmiTst(ToRegister(input), at);
5222 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
5227 void LCodeGen::DoCheckArrayBufferNotNeutered(
5228 LCheckArrayBufferNotNeutered* instr) {
5229 Register view = ToRegister(instr->view());
5230 Register scratch = scratch0();
5232 __ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5233 __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5234 __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
5235 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
5239 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5240 Register input = ToRegister(instr->value());
5241 Register scratch = scratch0();
5243 __ GetObjectType(input, scratch, scratch);
5245 if (instr->hydrogen()->is_interval_check()) {
5248 instr->hydrogen()->GetCheckInterval(&first, &last);
5250 // If there is only one type in the interval check for equality.
5251 if (first == last) {
5252 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5255 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
5257 // Omit check for the last type.
5258 if (last != LAST_TYPE) {
5259 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
5266 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5268 if (base::bits::IsPowerOfTwo32(mask)) {
5269 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5270 __ And(at, scratch, mask);
5271 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
5272 at, Operand(zero_reg));
5274 __ And(scratch, scratch, Operand(mask));
5275 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
5282 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5283 Register reg = ToRegister(instr->value());
5284 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5285 AllowDeferredHandleDereference smi_check;
5286 if (isolate()->heap()->InNewSpace(*object)) {
5287 Register reg = ToRegister(instr->value());
5288 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5289 __ li(at, Operand(cell));
5290 __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
5291 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
5293 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
5298 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5300 PushSafepointRegistersScope scope(this);
5302 __ mov(cp, zero_reg);
5303 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5304 RecordSafepointWithRegisters(
5305 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5306 __ StoreToSafepointRegisterSlot(v0, scratch0());
5308 __ SmiTst(scratch0(), at);
5309 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
5314 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5315 class DeferredCheckMaps final : public LDeferredCode {
5317 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5318 : LDeferredCode(codegen), instr_(instr), object_(object) {
5319 SetExit(check_maps());
5321 void Generate() override {
5322 codegen()->DoDeferredInstanceMigration(instr_, object_);
5324 Label* check_maps() { return &check_maps_; }
5325 LInstruction* instr() override { return instr_; }
5333 if (instr->hydrogen()->IsStabilityCheck()) {
5334 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5335 for (int i = 0; i < maps->size(); ++i) {
5336 AddStabilityDependency(maps->at(i).handle());
5341 Register map_reg = scratch0();
5342 LOperand* input = instr->value();
5343 DCHECK(input->IsRegister());
5344 Register reg = ToRegister(input);
5345 __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5347 DeferredCheckMaps* deferred = NULL;
5348 if (instr->hydrogen()->HasMigrationTarget()) {
5349 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5350 __ bind(deferred->check_maps());
5353 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5355 for (int i = 0; i < maps->size() - 1; i++) {
5356 Handle<Map> map = maps->at(i).handle();
5357 __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5359 Handle<Map> map = maps->at(maps->size() - 1).handle();
5360 // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5361 if (instr->hydrogen()->HasMigrationTarget()) {
5362 __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5364 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
5371 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5372 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5373 Register result_reg = ToRegister(instr->result());
5374 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5375 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5379 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5380 Register unclamped_reg = ToRegister(instr->unclamped());
5381 Register result_reg = ToRegister(instr->result());
5382 __ ClampUint8(result_reg, unclamped_reg);
5386 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5387 Register scratch = scratch0();
5388 Register input_reg = ToRegister(instr->unclamped());
5389 Register result_reg = ToRegister(instr->result());
5390 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5391 Label is_smi, done, heap_number;
5393 // Both smi and heap number cases are handled.
5394 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5396 // Check for heap number
5397 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5398 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5400 // Check for undefined. Undefined is converted to zero for clamping
5402 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
5403 Operand(factory()->undefined_value()));
5404 __ mov(result_reg, zero_reg);
5408 __ bind(&heap_number);
5409 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5410 HeapNumber::kValueOffset));
5411 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5415 __ ClampUint8(result_reg, scratch);
5421 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5422 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5423 Register result_reg = ToRegister(instr->result());
5424 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5425 __ FmoveHigh(result_reg, value_reg);
5427 __ FmoveLow(result_reg, value_reg);
5432 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5433 Register hi_reg = ToRegister(instr->hi());
5434 Register lo_reg = ToRegister(instr->lo());
5435 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5436 __ Move(result_reg, lo_reg, hi_reg);
5440 void LCodeGen::DoAllocate(LAllocate* instr) {
5441 class DeferredAllocate final : public LDeferredCode {
5443 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5444 : LDeferredCode(codegen), instr_(instr) { }
5445 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5446 LInstruction* instr() override { return instr_; }
5452 DeferredAllocate* deferred =
5453 new(zone()) DeferredAllocate(this, instr);
5455 Register result = ToRegister(instr->result());
5456 Register scratch = ToRegister(instr->temp1());
5457 Register scratch2 = ToRegister(instr->temp2());
5459 // Allocate memory for the object.
5460 AllocationFlags flags = TAG_OBJECT;
5461 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5462 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5464 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5465 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5466 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5468 if (instr->size()->IsConstantOperand()) {
5469 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5470 if (size <= Page::kMaxRegularHeapObjectSize) {
5471 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5473 __ jmp(deferred->entry());
5476 Register size = ToRegister(instr->size());
5477 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5480 __ bind(deferred->exit());
5482 if (instr->hydrogen()->MustPrefillWithFiller()) {
5483 STATIC_ASSERT(kHeapObjectTag == 1);
5484 if (instr->size()->IsConstantOperand()) {
5485 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5486 __ li(scratch, Operand(size - kHeapObjectTag));
5488 __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5490 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5493 __ Subu(scratch, scratch, Operand(kPointerSize));
5494 __ Addu(at, result, Operand(scratch));
5495 __ sw(scratch2, MemOperand(at));
5496 __ Branch(&loop, ge, scratch, Operand(zero_reg));
5501 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5502 Register result = ToRegister(instr->result());
5504 // TODO(3095996): Get rid of this. For now, we need to make the
5505 // result register contain a valid pointer because it is already
5506 // contained in the register pointer map.
5507 __ mov(result, zero_reg);
5509 PushSafepointRegistersScope scope(this);
5510 if (instr->size()->IsRegister()) {
5511 Register size = ToRegister(instr->size());
5512 DCHECK(!size.is(result));
5516 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5517 if (size >= 0 && size <= Smi::kMaxValue) {
5518 __ Push(Smi::FromInt(size));
5520 // We should never get here at runtime => abort
5521 __ stop("invalid allocation size");
5526 int flags = AllocateDoubleAlignFlag::encode(
5527 instr->hydrogen()->MustAllocateDoubleAligned());
5528 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5529 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5530 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5532 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5534 __ Push(Smi::FromInt(flags));
5536 CallRuntimeFromDeferred(
5537 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5538 __ StoreToSafepointRegisterSlot(v0, result);
5542 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5543 DCHECK(ToRegister(instr->value()).is(a0));
5544 DCHECK(ToRegister(instr->result()).is(v0));
5546 CallRuntime(Runtime::kToFastProperties, 1, instr);
5550 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5551 DCHECK(ToRegister(instr->context()).is(cp));
5553 // Registers will be used as follows:
5554 // t3 = literals array.
5555 // a1 = regexp literal.
5556 // a0 = regexp literal clone.
5557 // a2 and t0-t2 are used as temporaries.
5558 int literal_offset =
5559 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5560 __ li(t3, instr->hydrogen()->literals());
5561 __ lw(a1, FieldMemOperand(t3, literal_offset));
5562 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5563 __ Branch(&materialized, ne, a1, Operand(at));
5565 // Create regexp literal using runtime function
5566 // Result will be in v0.
5567 __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5568 __ li(t1, Operand(instr->hydrogen()->pattern()));
5569 __ li(t0, Operand(instr->hydrogen()->flags()));
5570 __ Push(t3, t2, t1, t0);
5571 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5574 __ bind(&materialized);
5575 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5576 Label allocated, runtime_allocate;
5578 __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5581 __ bind(&runtime_allocate);
5582 __ li(a0, Operand(Smi::FromInt(size)));
5584 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5587 __ bind(&allocated);
5588 // Copy the content into the newly allocated memory.
5589 // (Unroll copy loop once for better throughput).
5590 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5591 __ lw(a3, FieldMemOperand(a1, i));
5592 __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
5593 __ sw(a3, FieldMemOperand(v0, i));
5594 __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
5596 if ((size % (2 * kPointerSize)) != 0) {
5597 __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
5598 __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
5603 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5604 DCHECK(ToRegister(instr->context()).is(cp));
5605 // Use the fast case closure allocation code that allocates in new
5606 // space for nested functions that don't need literals cloning.
5607 bool pretenure = instr->hydrogen()->pretenure();
5608 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5609 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
5610 instr->hydrogen()->kind());
5611 __ li(a2, Operand(instr->hydrogen()->shared_info()));
5612 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5614 __ li(a2, Operand(instr->hydrogen()->shared_info()));
5615 __ li(a1, Operand(pretenure ? factory()->true_value()
5616 : factory()->false_value()));
5617 __ Push(cp, a2, a1);
5618 CallRuntime(Runtime::kNewClosure, 3, instr);
5623 void LCodeGen::DoTypeof(LTypeof* instr) {
5624 DCHECK(ToRegister(instr->value()).is(a3));
5625 DCHECK(ToRegister(instr->result()).is(v0));
5627 Register value_register = ToRegister(instr->value());
5628 __ JumpIfNotSmi(value_register, &do_call);
5629 __ li(v0, Operand(isolate()->factory()->number_string()));
5632 TypeofStub stub(isolate());
5633 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5638 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5639 Register input = ToRegister(instr->value());
5641 Register cmp1 = no_reg;
5642 Operand cmp2 = Operand(no_reg);
5644 Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5645 instr->FalseLabel(chunk_),
5647 instr->type_literal(),
5651 DCHECK(cmp1.is_valid());
5652 DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
5654 if (final_branch_condition != kNoCondition) {
5655 EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5660 Condition LCodeGen::EmitTypeofIs(Label* true_label,
5663 Handle<String> type_name,
5666 // This function utilizes the delay slot heavily. This is used to load
5667 // values that are always usable without depending on the type of the input
5669 Condition final_branch_condition = kNoCondition;
5670 Register scratch = scratch0();
5671 Factory* factory = isolate()->factory();
5672 if (String::Equals(type_name, factory->number_string())) {
5673 __ JumpIfSmi(input, true_label);
5674 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5675 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5677 *cmp2 = Operand(at);
5678 final_branch_condition = eq;
5680 } else if (String::Equals(type_name, factory->string_string())) {
5681 __ JumpIfSmi(input, false_label);
5682 __ GetObjectType(input, input, scratch);
5683 __ Branch(USE_DELAY_SLOT, false_label,
5684 ge, scratch, Operand(FIRST_NONSTRING_TYPE));
5685 // input is an object so we can load the BitFieldOffset even if we take the
5687 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5688 __ And(at, at, 1 << Map::kIsUndetectable);
5690 *cmp2 = Operand(zero_reg);
5691 final_branch_condition = eq;
5693 } else if (String::Equals(type_name, factory->symbol_string())) {
5694 __ JumpIfSmi(input, false_label);
5695 __ GetObjectType(input, input, scratch);
5697 *cmp2 = Operand(SYMBOL_TYPE);
5698 final_branch_condition = eq;
5700 } else if (String::Equals(type_name, factory->boolean_string())) {
5701 __ LoadRoot(at, Heap::kTrueValueRootIndex);
5702 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5703 __ LoadRoot(at, Heap::kFalseValueRootIndex);
5705 *cmp2 = Operand(input);
5706 final_branch_condition = eq;
5708 } else if (String::Equals(type_name, factory->undefined_string())) {
5709 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5710 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5711 // The first instruction of JumpIfSmi is an And - it is safe in the delay
5713 __ JumpIfSmi(input, false_label);
5714 // Check for undetectable objects => true.
5715 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5716 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5717 __ And(at, at, 1 << Map::kIsUndetectable);
5719 *cmp2 = Operand(zero_reg);
5720 final_branch_condition = ne;
5722 } else if (String::Equals(type_name, factory->function_string())) {
5723 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5724 __ JumpIfSmi(input, false_label);
5725 __ GetObjectType(input, scratch, input);
5726 __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
5728 *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
5729 final_branch_condition = eq;
5731 } else if (String::Equals(type_name, factory->object_string())) {
5732 __ JumpIfSmi(input, false_label);
5733 __ LoadRoot(at, Heap::kNullValueRootIndex);
5734 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5735 Register map = input;
5736 __ GetObjectType(input, map, scratch);
5737 __ Branch(false_label,
5738 lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
5739 __ Branch(USE_DELAY_SLOT, false_label,
5740 gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
5741 // map is still valid, so the BitField can be loaded in delay slot.
5742 // Check for undetectable objects => false.
5743 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
5744 __ And(at, at, 1 << Map::kIsUndetectable);
5746 *cmp2 = Operand(zero_reg);
5747 final_branch_condition = eq;
5751 *cmp2 = Operand(zero_reg); // Set to valid regs, to avoid caller assertion.
5752 __ Branch(false_label);
5755 return final_branch_condition;
5759 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5760 Register temp1 = ToRegister(instr->temp());
5762 EmitIsConstructCall(temp1, scratch0());
5764 EmitBranch(instr, eq, temp1,
5765 Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5769 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5770 DCHECK(!temp1.is(temp2));
5771 // Get the frame pointer for the calling frame.
5772 __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5774 // Skip the arguments adaptor frame if it exists.
5775 Label check_frame_marker;
5776 __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5777 __ Branch(&check_frame_marker, ne, temp2,
5778 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5779 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5781 // Check the marker in the calling frame.
5782 __ bind(&check_frame_marker);
5783 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5787 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5788 if (!info()->IsStub()) {
5789 // Ensure that we have enough space after the previous lazy-bailout
5790 // instruction for patching the code here.
5791 int current_pc = masm()->pc_offset();
5792 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5793 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5794 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5795 while (padding_size > 0) {
5797 padding_size -= Assembler::kInstrSize;
5801 last_lazy_deopt_pc_ = masm()->pc_offset();
5805 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5806 last_lazy_deopt_pc_ = masm()->pc_offset();
5807 DCHECK(instr->HasEnvironment());
5808 LEnvironment* env = instr->environment();
5809 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5810 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5814 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5815 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5816 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5817 // needed return address), even though the implementation of LAZY and EAGER is
5818 // now identical. When LAZY is eventually completely folded into EAGER, remove
5819 // the special case below.
5820 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5821 type = Deoptimizer::LAZY;
5824 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
5829 void LCodeGen::DoDummy(LDummy* instr) {
5830 // Nothing to see here, move on!
5834 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5835 // Nothing to see here, move on!
5839 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5840 PushSafepointRegistersScope scope(this);
5841 LoadContextFromDeferred(instr->context());
5842 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5843 RecordSafepointWithLazyDeopt(
5844 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5845 DCHECK(instr->HasEnvironment());
5846 LEnvironment* env = instr->environment();
5847 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5851 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5852 class DeferredStackCheck final : public LDeferredCode {
5854 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5855 : LDeferredCode(codegen), instr_(instr) { }
5856 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
5857 LInstruction* instr() override { return instr_; }
5860 LStackCheck* instr_;
5863 DCHECK(instr->HasEnvironment());
5864 LEnvironment* env = instr->environment();
5865 // There is no LLazyBailout instruction for stack-checks. We have to
5866 // prepare for lazy deoptimization explicitly here.
5867 if (instr->hydrogen()->is_function_entry()) {
5868 // Perform stack overflow check.
5870 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5871 __ Branch(&done, hs, sp, Operand(at));
5872 DCHECK(instr->context()->IsRegister());
5873 DCHECK(ToRegister(instr->context()).is(cp));
5874 CallCode(isolate()->builtins()->StackCheck(),
5875 RelocInfo::CODE_TARGET,
5879 DCHECK(instr->hydrogen()->is_backwards_branch());
5880 // Perform stack overflow check if this goto needs it before jumping.
5881 DeferredStackCheck* deferred_stack_check =
5882 new(zone()) DeferredStackCheck(this, instr);
5883 __ LoadRoot(at, Heap::kStackLimitRootIndex);
5884 __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5885 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5886 __ bind(instr->done_label());
5887 deferred_stack_check->SetExit(instr->done_label());
5888 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5889 // Don't record a deoptimization index for the safepoint here.
5890 // This will be done explicitly when emitting call and the safepoint in
5891 // the deferred code.
5896 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5897 // This is a pseudo-instruction that ensures that the environment here is
5898 // properly registered for deoptimization and records the assembler's PC
5900 LEnvironment* environment = instr->environment();
5902 // If the environment were already registered, we would have no way of
5903 // backpatching it with the spill slot operands.
5904 DCHECK(!environment->HasBeenRegistered());
5905 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5907 GenerateOsrPrologue();
5911 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5912 Register result = ToRegister(instr->result());
5913 Register object = ToRegister(instr->object());
5914 __ And(at, object, kSmiTagMask);
5915 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
5917 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5918 __ GetObjectType(object, a1, a1);
5919 DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
5920 Operand(LAST_JS_PROXY_TYPE));
5922 Label use_cache, call_runtime;
5923 DCHECK(object.is(a0));
5924 Register null_value = t1;
5925 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5926 __ CheckEnumCache(null_value, &call_runtime);
5928 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5929 __ Branch(&use_cache);
5931 // Get the set of properties to enumerate.
5932 __ bind(&call_runtime);
5934 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5936 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5937 DCHECK(result.is(v0));
5938 __ LoadRoot(at, Heap::kMetaMapRootIndex);
5939 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
5940 __ bind(&use_cache);
5944 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5945 Register map = ToRegister(instr->map());
5946 Register result = ToRegister(instr->result());
5947 Label load_cache, done;
5948 __ EnumLength(result, map);
5949 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5950 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5953 __ bind(&load_cache);
5954 __ LoadInstanceDescriptors(map, result);
5956 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5958 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5959 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
5965 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5966 Register object = ToRegister(instr->value());
5967 Register map = ToRegister(instr->map());
5968 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5969 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
5973 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5977 PushSafepointRegistersScope scope(this);
5978 __ Push(object, index);
5979 __ mov(cp, zero_reg);
5980 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
5981 RecordSafepointWithRegisters(
5982 instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
5983 __ StoreToSafepointRegisterSlot(v0, result);
5987 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5988 class DeferredLoadMutableDouble final : public LDeferredCode {
5990 DeferredLoadMutableDouble(LCodeGen* codegen,
5991 LLoadFieldByIndex* instr,
5995 : LDeferredCode(codegen),
6001 void Generate() override {
6002 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
6004 LInstruction* instr() override { return instr_; }
6007 LLoadFieldByIndex* instr_;
6013 Register object = ToRegister(instr->object());
6014 Register index = ToRegister(instr->index());
6015 Register result = ToRegister(instr->result());
6016 Register scratch = scratch0();
6018 DeferredLoadMutableDouble* deferred;
6019 deferred = new(zone()) DeferredLoadMutableDouble(
6020 this, instr, result, object, index);
6022 Label out_of_object, done;
6024 __ And(scratch, index, Operand(Smi::FromInt(1)));
6025 __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
6026 __ sra(index, index, 1);
6028 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
6029 __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot.
6031 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
6032 __ Addu(scratch, object, scratch);
6033 __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
6037 __ bind(&out_of_object);
6038 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6039 // Index is equal to negated out of object property index plus 1.
6040 __ Subu(scratch, result, scratch);
6041 __ lw(result, FieldMemOperand(scratch,
6042 FixedArray::kHeaderSize - kPointerSize));
6043 __ bind(deferred->exit());
6048 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6049 Register context = ToRegister(instr->context());
6050 __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6054 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6055 Handle<ScopeInfo> scope_info = instr->scope_info();
6056 __ li(at, scope_info);
6057 __ Push(at, ToRegister(instr->function()));
6058 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6059 RecordSafepoint(Safepoint::kNoLazyDeopt);
6065 } // namespace internal