1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/base/bits.h"
6 #include "src/code-factory.h"
7 #include "src/code-stubs.h"
8 #include "src/cpu-profiler.h"
9 #include "src/hydrogen-osr.h"
10 #include "src/ic/ic.h"
11 #include "src/ic/stub-cache.h"
12 #include "src/ppc/lithium-codegen-ppc.h"
13 #include "src/ppc/lithium-gap-resolver-ppc.h"
19 class SafepointGenerator final : public CallWrapper {
21 SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
22 Safepoint::DeoptMode mode)
23 : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
24 virtual ~SafepointGenerator() {}
26 void BeforeCall(int call_size) const override {}
28 void AfterCall() const override {
29 codegen_->RecordSafepoint(pointers_, deopt_mode_);
34 LPointerMap* pointers_;
35 Safepoint::DeoptMode deopt_mode_;
41 bool LCodeGen::GenerateCode() {
42 LPhase phase("Z_Code generation", chunk());
46 // Open a frame scope to indicate that there is a frame on the stack. The
47 // NONE indicates that the scope shouldn't actually generate code to set up
48 // the frame (that is done in GeneratePrologue).
49 FrameScope frame_scope(masm_, StackFrame::NONE);
51 bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
52 GenerateJumpTable() && GenerateSafepointTable();
53 if (FLAG_enable_embedded_constant_pool && !rc) {
54 masm()->AbortConstantPoolBuilding();
60 void LCodeGen::FinishCode(Handle<Code> code) {
62 code->set_stack_slots(GetStackSlotCount());
63 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
64 PopulateDeoptimizationData(code);
68 void LCodeGen::SaveCallerDoubles() {
69 DCHECK(info()->saves_caller_doubles());
70 DCHECK(NeedsEagerFrame());
71 Comment(";;; Save clobbered callee double registers");
73 BitVector* doubles = chunk()->allocated_double_registers();
74 BitVector::Iterator save_iterator(doubles);
75 while (!save_iterator.Done()) {
76 __ stfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
77 MemOperand(sp, count * kDoubleSize));
78 save_iterator.Advance();
84 void LCodeGen::RestoreCallerDoubles() {
85 DCHECK(info()->saves_caller_doubles());
86 DCHECK(NeedsEagerFrame());
87 Comment(";;; Restore clobbered callee double registers");
88 BitVector* doubles = chunk()->allocated_double_registers();
89 BitVector::Iterator save_iterator(doubles);
91 while (!save_iterator.Done()) {
92 __ lfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
93 MemOperand(sp, count * kDoubleSize));
94 save_iterator.Advance();
100 bool LCodeGen::GeneratePrologue() {
101 DCHECK(is_generating());
103 if (info()->IsOptimizing()) {
104 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
107 if (strlen(FLAG_stop_at) > 0 &&
108 info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
113 // r4: Callee's JS function.
114 // cp: Callee's context.
115 // pp: Callee's constant pool pointer (if enabled)
116 // fp: Caller's frame pointer.
118 // ip: Our own function entry (required by the prologue)
120 // Sloppy mode functions and builtins need to replace the receiver with the
121 // global proxy when called as functions (without an explicit receiver
123 if (is_sloppy(info_->language_mode()) && info_->MayUseThis() &&
124 !info_->is_native() && info_->scope()->has_this_declaration()) {
126 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
127 __ LoadP(r5, MemOperand(sp, receiver_offset));
128 __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
131 __ LoadP(r5, GlobalObjectOperand());
132 __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
134 __ StoreP(r5, MemOperand(sp, receiver_offset));
140 int prologue_offset = masm_->pc_offset();
142 if (prologue_offset) {
143 // Prologue logic requires it's starting address in ip and the
144 // corresponding offset from the function entry.
145 prologue_offset += Instruction::kInstrSize;
146 __ addi(ip, ip, Operand(prologue_offset));
148 info()->set_prologue_offset(prologue_offset);
149 if (NeedsEagerFrame()) {
150 if (info()->IsStub()) {
151 __ StubPrologue(prologue_offset);
153 __ Prologue(info()->IsCodePreAgingActive(), prologue_offset);
155 frame_is_built_ = true;
156 info_->AddNoFrameRange(0, masm_->pc_offset());
159 // Reserve space for the stack slots needed by the code.
160 int slots = GetStackSlotCount();
162 __ subi(sp, sp, Operand(slots * kPointerSize));
163 if (FLAG_debug_code) {
165 __ li(r0, Operand(slots));
167 __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
168 __ mov(r4, Operand(kSlotsZapValue));
171 __ StorePU(r4, MemOperand(r3, -kPointerSize));
177 if (info()->saves_caller_doubles()) {
181 // Possibly allocate a local context.
182 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
183 if (heap_slots > 0) {
184 Comment(";;; Allocate local context");
185 bool need_write_barrier = true;
186 // Argument to NewContext is the function, which is in r4.
187 DCHECK(!info()->scope()->is_script_scope());
188 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
189 FastNewContextStub stub(isolate(), heap_slots);
191 // Result of FastNewContextStub is always in new space.
192 need_write_barrier = false;
195 __ CallRuntime(Runtime::kNewFunctionContext, 1);
197 RecordSafepoint(Safepoint::kNoLazyDeopt);
198 // Context is returned in both r3 and cp. It replaces the context
199 // passed to us. It's saved in the stack and kept live in cp.
201 __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
202 // Copy any necessary parameters into the context.
203 int num_parameters = scope()->num_parameters();
204 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
205 for (int i = first_parameter; i < num_parameters; i++) {
206 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
207 if (var->IsContextSlot()) {
208 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
209 (num_parameters - 1 - i) * kPointerSize;
210 // Load parameter from stack.
211 __ LoadP(r3, MemOperand(fp, parameter_offset));
212 // Store it in the context.
213 MemOperand target = ContextOperand(cp, var->index());
214 __ StoreP(r3, target, r0);
215 // Update the write barrier. This clobbers r6 and r3.
216 if (need_write_barrier) {
217 __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
218 GetLinkRegisterState(), kSaveFPRegs);
219 } else if (FLAG_debug_code) {
221 __ JumpIfInNewSpace(cp, r3, &done);
222 __ Abort(kExpectedNewSpaceObject);
227 Comment(";;; End allocate local context");
231 if (FLAG_trace && info()->IsOptimizing()) {
232 // We have not executed any compiled code yet, so cp still holds the
234 __ CallRuntime(Runtime::kTraceEnter, 0);
236 return !is_aborted();
240 void LCodeGen::GenerateOsrPrologue() {
241 // Generate the OSR entry prologue at the first unknown OSR value, or if there
242 // are none, at the OSR entrypoint instruction.
243 if (osr_pc_offset_ >= 0) return;
245 osr_pc_offset_ = masm()->pc_offset();
247 // Adjust the frame size, subsuming the unoptimized frame into the
249 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
251 __ subi(sp, sp, Operand(slots * kPointerSize));
255 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
256 if (instr->IsCall()) {
257 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
259 if (!instr->IsLazyBailout() && !instr->IsGap()) {
260 safepoints_.BumpLastLazySafepointIndex();
265 bool LCodeGen::GenerateDeferredCode() {
266 DCHECK(is_generating());
267 if (deferred_.length() > 0) {
268 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
269 LDeferredCode* code = deferred_[i];
272 instructions_->at(code->instruction_index())->hydrogen_value();
273 RecordAndWritePosition(
274 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
278 "-------------------- Deferred %s --------------------",
279 code->instruction_index(), code->instr()->hydrogen_value()->id(),
280 code->instr()->Mnemonic());
281 __ bind(code->entry());
282 if (NeedsDeferredFrame()) {
283 Comment(";;; Build frame");
284 DCHECK(!frame_is_built_);
285 DCHECK(info()->IsStub());
286 frame_is_built_ = true;
287 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
288 __ PushFixedFrame(scratch0());
289 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
290 Comment(";;; Deferred code");
293 if (NeedsDeferredFrame()) {
294 Comment(";;; Destroy frame");
295 DCHECK(frame_is_built_);
296 __ PopFixedFrame(ip);
297 frame_is_built_ = false;
303 return !is_aborted();
307 bool LCodeGen::GenerateJumpTable() {
308 // Check that the jump table is accessible from everywhere in the function
309 // code, i.e. that offsets to the table can be encoded in the 24bit signed
310 // immediate of a branch instruction.
311 // To simplify we consider the code size from the first instruction to the
312 // end of the jump table. We also don't consider the pc load delta.
313 // Each entry in the jump table generates one instruction and inlines one
314 // 32bit data after it.
315 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
316 jump_table_.length() * 7)) {
317 Abort(kGeneratedCodeIsTooLarge);
320 if (jump_table_.length() > 0) {
321 Label needs_frame, call_deopt_entry;
323 Comment(";;; -------------------- Jump table --------------------");
324 Address base = jump_table_[0].address;
326 Register entry_offset = scratch0();
328 int length = jump_table_.length();
329 for (int i = 0; i < length; i++) {
330 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
331 __ bind(&table_entry->label);
333 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
334 Address entry = table_entry->address;
335 DeoptComment(table_entry->deopt_info);
337 // Second-level deopt table entries are contiguous and small, so instead
338 // of loading the full, absolute address of each one, load an immediate
339 // offset which will be added to the base address later.
340 __ mov(entry_offset, Operand(entry - base));
342 if (table_entry->needs_frame) {
343 DCHECK(!info()->saves_caller_doubles());
344 Comment(";;; call deopt with frame");
346 __ b(&needs_frame, SetLK);
348 __ b(&call_deopt_entry, SetLK);
350 info()->LogDeoptCallPosition(masm()->pc_offset(),
351 table_entry->deopt_info.inlining_id);
354 if (needs_frame.is_linked()) {
355 __ bind(&needs_frame);
356 // This variant of deopt can only be used with stubs. Since we don't
357 // have a function pointer to install in the stack frame that we're
358 // building, install a special marker there instead.
359 DCHECK(info()->IsStub());
360 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
362 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
365 Comment(";;; call deopt");
366 __ bind(&call_deopt_entry);
368 if (info()->saves_caller_doubles()) {
369 DCHECK(info()->IsStub());
370 RestoreCallerDoubles();
373 // Add the base address to the offset previously loaded in entry_offset.
374 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
375 __ add(ip, entry_offset, ip);
379 // The deoptimization jump table is the last part of the instruction
380 // sequence. Mark the generated code as done unless we bailed out.
381 if (!is_aborted()) status_ = DONE;
382 return !is_aborted();
386 bool LCodeGen::GenerateSafepointTable() {
388 safepoints_.Emit(masm(), GetStackSlotCount());
389 return !is_aborted();
393 Register LCodeGen::ToRegister(int index) const {
394 return Register::FromAllocationIndex(index);
398 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
399 return DoubleRegister::FromAllocationIndex(index);
403 Register LCodeGen::ToRegister(LOperand* op) const {
404 DCHECK(op->IsRegister());
405 return ToRegister(op->index());
409 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
410 if (op->IsRegister()) {
411 return ToRegister(op->index());
412 } else if (op->IsConstantOperand()) {
413 LConstantOperand* const_op = LConstantOperand::cast(op);
414 HConstant* constant = chunk_->LookupConstant(const_op);
415 Handle<Object> literal = constant->handle(isolate());
416 Representation r = chunk_->LookupLiteralRepresentation(const_op);
417 if (r.IsInteger32()) {
418 AllowDeferredHandleDereference get_number;
419 DCHECK(literal->IsNumber());
420 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
421 } else if (r.IsDouble()) {
422 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
424 DCHECK(r.IsSmiOrTagged());
425 __ Move(scratch, literal);
428 } else if (op->IsStackSlot()) {
429 __ LoadP(scratch, ToMemOperand(op));
437 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
439 DCHECK(IsInteger32(const_op));
440 HConstant* constant = chunk_->LookupConstant(const_op);
441 int32_t value = constant->Integer32Value();
442 if (IsSmi(const_op)) {
443 __ LoadSmiLiteral(dst, Smi::FromInt(value));
445 __ LoadIntLiteral(dst, value);
450 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
451 DCHECK(op->IsDoubleRegister());
452 return ToDoubleRegister(op->index());
456 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
457 HConstant* constant = chunk_->LookupConstant(op);
458 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
459 return constant->handle(isolate());
463 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
464 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
468 bool LCodeGen::IsSmi(LConstantOperand* op) const {
469 return chunk_->LookupLiteralRepresentation(op).IsSmi();
473 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
474 return ToRepresentation(op, Representation::Integer32());
478 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
479 const Representation& r) const {
480 HConstant* constant = chunk_->LookupConstant(op);
481 int32_t value = constant->Integer32Value();
482 if (r.IsInteger32()) return value;
483 DCHECK(r.IsSmiOrTagged());
484 return reinterpret_cast<intptr_t>(Smi::FromInt(value));
488 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
489 HConstant* constant = chunk_->LookupConstant(op);
490 return Smi::FromInt(constant->Integer32Value());
494 double LCodeGen::ToDouble(LConstantOperand* op) const {
495 HConstant* constant = chunk_->LookupConstant(op);
496 DCHECK(constant->HasDoubleValue());
497 return constant->DoubleValue();
501 Operand LCodeGen::ToOperand(LOperand* op) {
502 if (op->IsConstantOperand()) {
503 LConstantOperand* const_op = LConstantOperand::cast(op);
504 HConstant* constant = chunk()->LookupConstant(const_op);
505 Representation r = chunk_->LookupLiteralRepresentation(const_op);
507 DCHECK(constant->HasSmiValue());
508 return Operand(Smi::FromInt(constant->Integer32Value()));
509 } else if (r.IsInteger32()) {
510 DCHECK(constant->HasInteger32Value());
511 return Operand(constant->Integer32Value());
512 } else if (r.IsDouble()) {
513 Abort(kToOperandUnsupportedDoubleImmediate);
515 DCHECK(r.IsTagged());
516 return Operand(constant->handle(isolate()));
517 } else if (op->IsRegister()) {
518 return Operand(ToRegister(op));
519 } else if (op->IsDoubleRegister()) {
520 Abort(kToOperandIsDoubleRegisterUnimplemented);
521 return Operand::Zero();
523 // Stack slots not implemented, use ToMemOperand instead.
525 return Operand::Zero();
529 static int ArgumentsOffsetWithoutFrame(int index) {
531 return -(index + 1) * kPointerSize;
535 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
536 DCHECK(!op->IsRegister());
537 DCHECK(!op->IsDoubleRegister());
538 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
539 if (NeedsEagerFrame()) {
540 return MemOperand(fp, StackSlotOffset(op->index()));
542 // Retrieve parameter without eager stack-frame relative to the
544 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
549 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
550 DCHECK(op->IsDoubleStackSlot());
551 if (NeedsEagerFrame()) {
552 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
554 // Retrieve parameter without eager stack-frame relative to the
556 return MemOperand(sp,
557 ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
562 void LCodeGen::WriteTranslation(LEnvironment* environment,
563 Translation* translation) {
564 if (environment == NULL) return;
566 // The translation includes one command per value in the environment.
567 int translation_size = environment->translation_size();
569 WriteTranslation(environment->outer(), translation);
570 WriteTranslationFrame(environment, translation);
572 int object_index = 0;
573 int dematerialized_index = 0;
574 for (int i = 0; i < translation_size; ++i) {
575 LOperand* value = environment->values()->at(i);
577 environment, translation, value, environment->HasTaggedValueAt(i),
578 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
583 void LCodeGen::AddToTranslation(LEnvironment* environment,
584 Translation* translation, LOperand* op,
585 bool is_tagged, bool is_uint32,
586 int* object_index_pointer,
587 int* dematerialized_index_pointer) {
588 if (op == LEnvironment::materialization_marker()) {
589 int object_index = (*object_index_pointer)++;
590 if (environment->ObjectIsDuplicateAt(object_index)) {
591 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
592 translation->DuplicateObject(dupe_of);
595 int object_length = environment->ObjectLengthAt(object_index);
596 if (environment->ObjectIsArgumentsAt(object_index)) {
597 translation->BeginArgumentsObject(object_length);
599 translation->BeginCapturedObject(object_length);
601 int dematerialized_index = *dematerialized_index_pointer;
602 int env_offset = environment->translation_size() + dematerialized_index;
603 *dematerialized_index_pointer += object_length;
604 for (int i = 0; i < object_length; ++i) {
605 LOperand* value = environment->values()->at(env_offset + i);
606 AddToTranslation(environment, translation, value,
607 environment->HasTaggedValueAt(env_offset + i),
608 environment->HasUint32ValueAt(env_offset + i),
609 object_index_pointer, dematerialized_index_pointer);
614 if (op->IsStackSlot()) {
616 translation->StoreStackSlot(op->index());
617 } else if (is_uint32) {
618 translation->StoreUint32StackSlot(op->index());
620 translation->StoreInt32StackSlot(op->index());
622 } else if (op->IsDoubleStackSlot()) {
623 translation->StoreDoubleStackSlot(op->index());
624 } else if (op->IsRegister()) {
625 Register reg = ToRegister(op);
627 translation->StoreRegister(reg);
628 } else if (is_uint32) {
629 translation->StoreUint32Register(reg);
631 translation->StoreInt32Register(reg);
633 } else if (op->IsDoubleRegister()) {
634 DoubleRegister reg = ToDoubleRegister(op);
635 translation->StoreDoubleRegister(reg);
636 } else if (op->IsConstantOperand()) {
637 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
638 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
639 translation->StoreLiteral(src_index);
646 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
647 LInstruction* instr) {
648 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
652 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
654 SafepointMode safepoint_mode) {
655 DCHECK(instr != NULL);
657 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
659 // Signal that we don't inline smi code before these stubs in the
660 // optimizing code generator.
661 if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
667 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
668 LInstruction* instr, SaveFPRegsMode save_doubles) {
669 DCHECK(instr != NULL);
671 __ CallRuntime(function, num_arguments, save_doubles);
673 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
677 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
678 if (context->IsRegister()) {
679 __ Move(cp, ToRegister(context));
680 } else if (context->IsStackSlot()) {
681 __ LoadP(cp, ToMemOperand(context));
682 } else if (context->IsConstantOperand()) {
683 HConstant* constant =
684 chunk_->LookupConstant(LConstantOperand::cast(context));
685 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
692 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
693 LInstruction* instr, LOperand* context) {
694 LoadContextFromDeferred(context);
695 __ CallRuntimeSaveDoubles(id);
696 RecordSafepointWithRegisters(instr->pointer_map(), argc,
697 Safepoint::kNoLazyDeopt);
701 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
702 Safepoint::DeoptMode mode) {
703 environment->set_has_been_used();
704 if (!environment->HasBeenRegistered()) {
705 // Physical stack frame layout:
706 // -x ............. -4 0 ..................................... y
707 // [incoming arguments] [spill slots] [pushed outgoing arguments]
709 // Layout of the environment:
710 // 0 ..................................................... size-1
711 // [parameters] [locals] [expression stack including arguments]
713 // Layout of the translation:
714 // 0 ........................................................ size - 1 + 4
715 // [expression stack including arguments] [locals] [4 words] [parameters]
716 // |>------------ translation_size ------------<|
719 int jsframe_count = 0;
720 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
722 if (e->frame_type() == JS_FUNCTION) {
726 Translation translation(&translations_, frame_count, jsframe_count, zone());
727 WriteTranslation(environment, &translation);
728 int deoptimization_index = deoptimizations_.length();
729 int pc_offset = masm()->pc_offset();
730 environment->Register(deoptimization_index, translation.index(),
731 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
732 deoptimizations_.Add(environment, zone());
737 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
738 Deoptimizer::DeoptReason deopt_reason,
739 Deoptimizer::BailoutType bailout_type,
741 LEnvironment* environment = instr->environment();
742 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
743 DCHECK(environment->HasBeenRegistered());
744 int id = environment->deoptimization_index();
745 DCHECK(info()->IsOptimizing() || info()->IsStub());
747 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
749 Abort(kBailoutWasNotPrepared);
753 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
754 CRegister alt_cr = cr6;
755 Register scratch = scratch0();
756 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
758 DCHECK(!alt_cr.is(cr));
759 __ Push(r4, scratch);
760 __ mov(scratch, Operand(count));
761 __ lwz(r4, MemOperand(scratch));
762 __ subi(r4, r4, Operand(1));
763 __ cmpi(r4, Operand::Zero(), alt_cr);
764 __ bne(&no_deopt, alt_cr);
765 __ li(r4, Operand(FLAG_deopt_every_n_times));
766 __ stw(r4, MemOperand(scratch));
769 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
771 __ stw(r4, MemOperand(scratch));
775 if (info()->ShouldTrapOnDeopt()) {
776 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
779 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
781 DCHECK(info()->IsStub() || frame_is_built_);
782 // Go through jump table if we need to handle condition, build frame, or
783 // restore caller doubles.
784 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
785 DeoptComment(deopt_info);
786 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
787 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
789 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
791 // We often have several deopts to the same entry, reuse the last
792 // jump entry if this is the case.
793 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
794 jump_table_.is_empty() ||
795 !table_entry.IsEquivalentTo(jump_table_.last())) {
796 jump_table_.Add(table_entry, zone());
798 __ b(cond, &jump_table_.last().label, cr);
803 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
804 Deoptimizer::DeoptReason deopt_reason,
806 Deoptimizer::BailoutType bailout_type =
807 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
808 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
812 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
813 int length = deoptimizations_.length();
814 if (length == 0) return;
815 Handle<DeoptimizationInputData> data =
816 DeoptimizationInputData::New(isolate(), length, TENURED);
818 Handle<ByteArray> translations =
819 translations_.CreateByteArray(isolate()->factory());
820 data->SetTranslationByteArray(*translations);
821 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
822 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
823 if (info_->IsOptimizing()) {
824 // Reference to shared function info does not change between phases.
825 AllowDeferredHandleDereference allow_handle_dereference;
826 data->SetSharedFunctionInfo(*info_->shared_info());
828 data->SetSharedFunctionInfo(Smi::FromInt(0));
830 data->SetWeakCellCache(Smi::FromInt(0));
832 Handle<FixedArray> literals =
833 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
835 AllowDeferredHandleDereference copy_handles;
836 for (int i = 0; i < deoptimization_literals_.length(); i++) {
837 literals->set(i, *deoptimization_literals_[i]);
839 data->SetLiteralArray(*literals);
842 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
843 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
845 // Populate the deoptimization entries.
846 for (int i = 0; i < length; i++) {
847 LEnvironment* env = deoptimizations_[i];
848 data->SetAstId(i, env->ast_id());
849 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
850 data->SetArgumentsStackHeight(i,
851 Smi::FromInt(env->arguments_stack_height()));
852 data->SetPc(i, Smi::FromInt(env->pc_offset()));
854 code->set_deoptimization_data(*data);
858 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
859 DCHECK_EQ(0, deoptimization_literals_.length());
860 for (auto function : chunk()->inlined_functions()) {
861 DefineDeoptimizationLiteral(function);
863 inlined_function_count_ = deoptimization_literals_.length();
867 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
868 SafepointMode safepoint_mode) {
869 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
870 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
872 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
873 RecordSafepointWithRegisters(instr->pointer_map(), 0,
874 Safepoint::kLazyDeopt);
879 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
880 int arguments, Safepoint::DeoptMode deopt_mode) {
881 DCHECK(expected_safepoint_kind_ == kind);
883 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
884 Safepoint safepoint =
885 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
886 for (int i = 0; i < operands->length(); i++) {
887 LOperand* pointer = operands->at(i);
888 if (pointer->IsStackSlot()) {
889 safepoint.DefinePointerSlot(pointer->index(), zone());
890 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
891 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
897 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
898 Safepoint::DeoptMode deopt_mode) {
899 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
903 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
904 LPointerMap empty_pointers(zone());
905 RecordSafepoint(&empty_pointers, deopt_mode);
909 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
911 Safepoint::DeoptMode deopt_mode) {
912 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
916 void LCodeGen::RecordAndWritePosition(int position) {
917 if (position == RelocInfo::kNoPosition) return;
918 masm()->positions_recorder()->RecordPosition(position);
919 masm()->positions_recorder()->WriteRecordedPositions();
923 static const char* LabelType(LLabel* label) {
924 if (label->is_loop_header()) return " (loop header)";
925 if (label->is_osr_entry()) return " (OSR entry)";
930 void LCodeGen::DoLabel(LLabel* label) {
931 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
932 current_instruction_, label->hydrogen_value()->id(),
933 label->block_id(), LabelType(label));
934 __ bind(label->label());
935 current_block_ = label->block_id();
940 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
943 void LCodeGen::DoGap(LGap* gap) {
944 for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
946 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
947 LParallelMove* move = gap->GetParallelMove(inner_pos);
948 if (move != NULL) DoParallelMove(move);
953 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
956 void LCodeGen::DoParameter(LParameter* instr) {
961 void LCodeGen::DoCallStub(LCallStub* instr) {
962 DCHECK(ToRegister(instr->context()).is(cp));
963 DCHECK(ToRegister(instr->result()).is(r3));
964 switch (instr->hydrogen()->major_key()) {
965 case CodeStub::RegExpExec: {
966 RegExpExecStub stub(isolate());
967 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
970 case CodeStub::SubString: {
971 SubStringStub stub(isolate());
972 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
975 case CodeStub::StringCompare: {
976 StringCompareStub stub(isolate());
977 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
986 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
987 GenerateOsrPrologue();
991 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
992 Register dividend = ToRegister(instr->dividend());
993 int32_t divisor = instr->divisor();
994 DCHECK(dividend.is(ToRegister(instr->result())));
996 // Theoretically, a variation of the branch-free code for integer division by
997 // a power of 2 (calculating the remainder via an additional multiplication
998 // (which gets simplified to an 'and') and subtraction) should be faster, and
999 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1000 // indicate that positive dividends are heavily favored, so the branching
1001 // version performs better.
1002 HMod* hmod = instr->hydrogen();
1003 int32_t shift = WhichPowerOf2Abs(divisor);
1004 Label dividend_is_not_negative, done;
1005 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1006 __ cmpwi(dividend, Operand::Zero());
1007 __ bge(÷nd_is_not_negative);
1009 // Note that this is correct even for kMinInt operands.
1010 __ neg(dividend, dividend);
1011 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1012 __ neg(dividend, dividend, LeaveOE, SetRC);
1013 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1014 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
1016 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1017 __ li(dividend, Operand::Zero());
1019 DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
1024 __ bind(÷nd_is_not_negative);
1026 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1028 __ li(dividend, Operand::Zero());
1034 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1035 Register dividend = ToRegister(instr->dividend());
1036 int32_t divisor = instr->divisor();
1037 Register result = ToRegister(instr->result());
1038 DCHECK(!dividend.is(result));
1041 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1045 __ TruncatingDiv(result, dividend, Abs(divisor));
1046 __ mov(ip, Operand(Abs(divisor)));
1047 __ mullw(result, result, ip);
1048 __ sub(result, dividend, result, LeaveOE, SetRC);
1050 // Check for negative zero.
1051 HMod* hmod = instr->hydrogen();
1052 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1053 Label remainder_not_zero;
1054 __ bne(&remainder_not_zero, cr0);
1055 __ cmpwi(dividend, Operand::Zero());
1056 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1057 __ bind(&remainder_not_zero);
1062 void LCodeGen::DoModI(LModI* instr) {
1063 HMod* hmod = instr->hydrogen();
1064 Register left_reg = ToRegister(instr->left());
1065 Register right_reg = ToRegister(instr->right());
1066 Register result_reg = ToRegister(instr->result());
1067 Register scratch = scratch0();
1068 bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
1072 __ li(r0, Operand::Zero()); // clear xer
1076 __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
1079 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1080 __ cmpwi(right_reg, Operand::Zero());
1081 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1084 // Check for kMinInt % -1, divw will return undefined, which is not what we
1085 // want. We have to deopt if we care about -0, because we can't return that.
1087 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1088 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
1090 if (CpuFeatures::IsSupported(ISELECT)) {
1091 __ isel(overflow, result_reg, r0, result_reg, cr0);
1092 __ boverflow(&done, cr0);
1094 Label no_overflow_possible;
1095 __ bnooverflow(&no_overflow_possible, cr0);
1096 __ li(result_reg, Operand::Zero());
1098 __ bind(&no_overflow_possible);
1103 __ mullw(scratch, right_reg, scratch);
1104 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
1106 // If we care about -0, test if the dividend is <0 and the result is 0.
1107 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1109 __ cmpwi(left_reg, Operand::Zero());
1110 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1117 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1118 Register dividend = ToRegister(instr->dividend());
1119 int32_t divisor = instr->divisor();
1120 Register result = ToRegister(instr->result());
1121 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1122 DCHECK(!result.is(dividend));
1124 // Check for (0 / -x) that will produce negative zero.
1125 HDiv* hdiv = instr->hydrogen();
1126 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1127 __ cmpwi(dividend, Operand::Zero());
1128 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1130 // Check for (kMinInt / -1).
1131 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1132 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1133 __ cmpw(dividend, r0);
1134 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1137 int32_t shift = WhichPowerOf2Abs(divisor);
1139 // Deoptimize if remainder will not be 0.
1140 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1141 __ TestBitRange(dividend, shift - 1, 0, r0);
1142 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
1145 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1146 __ neg(result, dividend);
1150 __ mr(result, dividend);
1153 __ srwi(result, dividend, Operand(31));
1155 __ srawi(result, dividend, 31);
1156 __ srwi(result, result, Operand(32 - shift));
1158 __ add(result, dividend, result);
1159 __ srawi(result, result, shift);
1161 if (divisor < 0) __ neg(result, result);
1165 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1166 Register dividend = ToRegister(instr->dividend());
1167 int32_t divisor = instr->divisor();
1168 Register result = ToRegister(instr->result());
1169 DCHECK(!dividend.is(result));
1172 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1176 // Check for (0 / -x) that will produce negative zero.
1177 HDiv* hdiv = instr->hydrogen();
1178 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1179 __ cmpwi(dividend, Operand::Zero());
1180 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1183 __ TruncatingDiv(result, dividend, Abs(divisor));
1184 if (divisor < 0) __ neg(result, result);
1186 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1187 Register scratch = scratch0();
1188 __ mov(ip, Operand(divisor));
1189 __ mullw(scratch, result, ip);
1190 __ cmpw(scratch, dividend);
1191 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1196 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1197 void LCodeGen::DoDivI(LDivI* instr) {
1198 HBinaryOperation* hdiv = instr->hydrogen();
1199 const Register dividend = ToRegister(instr->dividend());
1200 const Register divisor = ToRegister(instr->divisor());
1201 Register result = ToRegister(instr->result());
1202 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1204 DCHECK(!dividend.is(result));
1205 DCHECK(!divisor.is(result));
1208 __ li(r0, Operand::Zero()); // clear xer
1212 __ divw(result, dividend, divisor, SetOE, SetRC);
1215 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1216 __ cmpwi(divisor, Operand::Zero());
1217 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1220 // Check for (0 / -x) that will produce negative zero.
1221 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1222 Label dividend_not_zero;
1223 __ cmpwi(dividend, Operand::Zero());
1224 __ bne(÷nd_not_zero);
1225 __ cmpwi(divisor, Operand::Zero());
1226 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1227 __ bind(÷nd_not_zero);
1230 // Check for (kMinInt / -1).
1232 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1233 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1235 // When truncating, we want kMinInt / -1 = kMinInt.
1236 if (CpuFeatures::IsSupported(ISELECT)) {
1237 __ isel(overflow, result, dividend, result, cr0);
1239 Label no_overflow_possible;
1240 __ bnooverflow(&no_overflow_possible, cr0);
1241 __ mr(result, dividend);
1242 __ bind(&no_overflow_possible);
1247 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1248 // Deoptimize if remainder is not 0.
1249 Register scratch = scratch0();
1250 __ mullw(scratch, divisor, result);
1251 __ cmpw(dividend, scratch);
1252 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1257 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1258 HBinaryOperation* hdiv = instr->hydrogen();
1259 Register dividend = ToRegister(instr->dividend());
1260 Register result = ToRegister(instr->result());
1261 int32_t divisor = instr->divisor();
1262 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
1264 // If the divisor is positive, things are easy: There can be no deopts and we
1265 // can simply do an arithmetic right shift.
1266 int32_t shift = WhichPowerOf2Abs(divisor);
1268 if (shift || !result.is(dividend)) {
1269 __ srawi(result, dividend, shift);
1274 // If the divisor is negative, we have to negate and handle edge cases.
1276 #if V8_TARGET_ARCH_PPC64
1277 if (divisor == -1 && can_overflow) {
1278 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1279 __ cmpw(dividend, r0);
1280 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1284 __ li(r0, Operand::Zero()); // clear xer
1290 __ neg(result, dividend, oe, SetRC);
1291 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1292 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
1295 // If the negation could not overflow, simply shifting is OK.
1296 #if !V8_TARGET_ARCH_PPC64
1297 if (!can_overflow) {
1300 __ ShiftRightArithImm(result, result, shift);
1303 #if !V8_TARGET_ARCH_PPC64
1306 // Dividing by -1 is basically negation, unless we overflow.
1307 if (divisor == -1) {
1308 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1312 Label overflow, done;
1313 __ boverflow(&overflow, cr0);
1314 __ srawi(result, result, shift);
1317 __ mov(result, Operand(kMinInt / divisor));
1323 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1324 Register dividend = ToRegister(instr->dividend());
1325 int32_t divisor = instr->divisor();
1326 Register result = ToRegister(instr->result());
1327 DCHECK(!dividend.is(result));
1330 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1334 // Check for (0 / -x) that will produce negative zero.
1335 HMathFloorOfDiv* hdiv = instr->hydrogen();
1336 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1337 __ cmpwi(dividend, Operand::Zero());
1338 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1341 // Easy case: We need no dynamic check for the dividend and the flooring
1342 // division is the same as the truncating division.
1343 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1344 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1345 __ TruncatingDiv(result, dividend, Abs(divisor));
1346 if (divisor < 0) __ neg(result, result);
1350 // In the general case we may need to adjust before and after the truncating
1351 // division to get a flooring division.
1352 Register temp = ToRegister(instr->temp());
1353 DCHECK(!temp.is(dividend) && !temp.is(result));
1354 Label needs_adjustment, done;
1355 __ cmpwi(dividend, Operand::Zero());
1356 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1357 __ TruncatingDiv(result, dividend, Abs(divisor));
1358 if (divisor < 0) __ neg(result, result);
1360 __ bind(&needs_adjustment);
1361 __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1362 __ TruncatingDiv(result, temp, Abs(divisor));
1363 if (divisor < 0) __ neg(result, result);
1364 __ subi(result, result, Operand(1));
1369 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1370 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1371 HBinaryOperation* hdiv = instr->hydrogen();
1372 const Register dividend = ToRegister(instr->dividend());
1373 const Register divisor = ToRegister(instr->divisor());
1374 Register result = ToRegister(instr->result());
1375 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1377 DCHECK(!dividend.is(result));
1378 DCHECK(!divisor.is(result));
1381 __ li(r0, Operand::Zero()); // clear xer
1385 __ divw(result, dividend, divisor, SetOE, SetRC);
1388 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1389 __ cmpwi(divisor, Operand::Zero());
1390 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1393 // Check for (0 / -x) that will produce negative zero.
1394 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1395 Label dividend_not_zero;
1396 __ cmpwi(dividend, Operand::Zero());
1397 __ bne(÷nd_not_zero);
1398 __ cmpwi(divisor, Operand::Zero());
1399 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1400 __ bind(÷nd_not_zero);
1403 // Check for (kMinInt / -1).
1405 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1406 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1408 // When truncating, we want kMinInt / -1 = kMinInt.
1409 if (CpuFeatures::IsSupported(ISELECT)) {
1410 __ isel(overflow, result, dividend, result, cr0);
1412 Label no_overflow_possible;
1413 __ bnooverflow(&no_overflow_possible, cr0);
1414 __ mr(result, dividend);
1415 __ bind(&no_overflow_possible);
1421 Register scratch = scratch0();
1422 // If both operands have the same sign then we are done.
1423 #if V8_TARGET_ARCH_PPC64
1424 __ xor_(scratch, dividend, divisor);
1425 __ cmpwi(scratch, Operand::Zero());
1428 __ xor_(scratch, dividend, divisor, SetRC);
1432 // If there is no remainder then we are done.
1433 __ mullw(scratch, divisor, result);
1434 __ cmpw(dividend, scratch);
1437 // We performed a truncating division. Correct the result.
1438 __ subi(result, result, Operand(1));
1443 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1444 DoubleRegister addend = ToDoubleRegister(instr->addend());
1445 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1446 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1447 DoubleRegister result = ToDoubleRegister(instr->result());
1449 __ fmadd(result, multiplier, multiplicand, addend);
1453 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1454 DoubleRegister minuend = ToDoubleRegister(instr->minuend());
1455 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1456 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1457 DoubleRegister result = ToDoubleRegister(instr->result());
1459 __ fmsub(result, multiplier, multiplicand, minuend);
1463 void LCodeGen::DoMulI(LMulI* instr) {
1464 Register scratch = scratch0();
1465 Register result = ToRegister(instr->result());
1466 // Note that result may alias left.
1467 Register left = ToRegister(instr->left());
1468 LOperand* right_op = instr->right();
1470 bool bailout_on_minus_zero =
1471 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1472 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1474 if (right_op->IsConstantOperand()) {
1475 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1477 if (bailout_on_minus_zero && (constant < 0)) {
1478 // The case of a null constant will be handled separately.
1479 // If constant is negative and left is null, the result should be -0.
1480 __ cmpi(left, Operand::Zero());
1481 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1487 #if V8_TARGET_ARCH_PPC64
1488 if (instr->hydrogen()->representation().IsSmi()) {
1490 __ li(r0, Operand::Zero()); // clear xer
1492 __ neg(result, left, SetOE, SetRC);
1493 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1494 #if V8_TARGET_ARCH_PPC64
1496 __ neg(result, left);
1497 __ TestIfInt32(result, r0);
1498 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1502 __ neg(result, left);
1506 if (bailout_on_minus_zero) {
1507 // If left is strictly negative and the constant is null, the
1508 // result is -0. Deoptimize if required, otherwise return 0.
1509 #if V8_TARGET_ARCH_PPC64
1510 if (instr->hydrogen()->representation().IsSmi()) {
1512 __ cmpi(left, Operand::Zero());
1513 #if V8_TARGET_ARCH_PPC64
1515 __ cmpwi(left, Operand::Zero());
1518 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1520 __ li(result, Operand::Zero());
1523 __ Move(result, left);
1526 // Multiplying by powers of two and powers of two plus or minus
1527 // one can be done faster with shifted operands.
1528 // For other constants we emit standard code.
1529 int32_t mask = constant >> 31;
1530 uint32_t constant_abs = (constant + mask) ^ mask;
1532 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1533 int32_t shift = WhichPowerOf2(constant_abs);
1534 __ ShiftLeftImm(result, left, Operand(shift));
1535 // Correct the sign of the result if the constant is negative.
1536 if (constant < 0) __ neg(result, result);
1537 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1538 int32_t shift = WhichPowerOf2(constant_abs - 1);
1539 __ ShiftLeftImm(scratch, left, Operand(shift));
1540 __ add(result, scratch, left);
1541 // Correct the sign of the result if the constant is negative.
1542 if (constant < 0) __ neg(result, result);
1543 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1544 int32_t shift = WhichPowerOf2(constant_abs + 1);
1545 __ ShiftLeftImm(scratch, left, Operand(shift));
1546 __ sub(result, scratch, left);
1547 // Correct the sign of the result if the constant is negative.
1548 if (constant < 0) __ neg(result, result);
1550 // Generate standard code.
1551 __ mov(ip, Operand(constant));
1552 __ Mul(result, left, ip);
1557 DCHECK(right_op->IsRegister());
1558 Register right = ToRegister(right_op);
1561 #if V8_TARGET_ARCH_PPC64
1562 // result = left * right.
1563 if (instr->hydrogen()->representation().IsSmi()) {
1564 __ SmiUntag(result, left);
1565 __ SmiUntag(scratch, right);
1566 __ Mul(result, result, scratch);
1568 __ Mul(result, left, right);
1570 __ TestIfInt32(result, r0);
1571 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1572 if (instr->hydrogen()->representation().IsSmi()) {
1576 // scratch:result = left * right.
1577 if (instr->hydrogen()->representation().IsSmi()) {
1578 __ SmiUntag(result, left);
1579 __ mulhw(scratch, result, right);
1580 __ mullw(result, result, right);
1582 __ mulhw(scratch, left, right);
1583 __ mullw(result, left, right);
1585 __ TestIfInt32(scratch, result, r0);
1586 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1589 if (instr->hydrogen()->representation().IsSmi()) {
1590 __ SmiUntag(result, left);
1591 __ Mul(result, result, right);
1593 __ Mul(result, left, right);
1597 if (bailout_on_minus_zero) {
1599 #if V8_TARGET_ARCH_PPC64
1600 if (instr->hydrogen()->representation().IsSmi()) {
1602 __ xor_(r0, left, right, SetRC);
1604 #if V8_TARGET_ARCH_PPC64
1606 __ xor_(r0, left, right);
1607 __ cmpwi(r0, Operand::Zero());
1611 // Bail out if the result is minus zero.
1612 __ cmpi(result, Operand::Zero());
1613 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1620 void LCodeGen::DoBitI(LBitI* instr) {
1621 LOperand* left_op = instr->left();
1622 LOperand* right_op = instr->right();
1623 DCHECK(left_op->IsRegister());
1624 Register left = ToRegister(left_op);
1625 Register result = ToRegister(instr->result());
1626 Operand right(no_reg);
1628 if (right_op->IsStackSlot()) {
1629 right = Operand(EmitLoadRegister(right_op, ip));
1631 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1632 right = ToOperand(right_op);
1634 if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
1635 switch (instr->op()) {
1636 case Token::BIT_AND:
1637 __ andi(result, left, right);
1640 __ ori(result, left, right);
1642 case Token::BIT_XOR:
1643 __ xori(result, left, right);
1653 switch (instr->op()) {
1654 case Token::BIT_AND:
1655 __ And(result, left, right);
1658 __ Or(result, left, right);
1660 case Token::BIT_XOR:
1661 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1662 __ notx(result, left);
1664 __ Xor(result, left, right);
1674 void LCodeGen::DoShiftI(LShiftI* instr) {
1675 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1676 // result may alias either of them.
1677 LOperand* right_op = instr->right();
1678 Register left = ToRegister(instr->left());
1679 Register result = ToRegister(instr->result());
1680 Register scratch = scratch0();
1681 if (right_op->IsRegister()) {
1682 // Mask the right_op operand.
1683 __ andi(scratch, ToRegister(right_op), Operand(0x1F));
1684 switch (instr->op()) {
1686 // rotate_right(a, b) == rotate_left(a, 32 - b)
1687 __ subfic(scratch, scratch, Operand(32));
1688 __ rotlw(result, left, scratch);
1691 __ sraw(result, left, scratch);
1694 if (instr->can_deopt()) {
1695 __ srw(result, left, scratch, SetRC);
1696 #if V8_TARGET_ARCH_PPC64
1697 __ extsw(result, result, SetRC);
1699 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
1701 __ srw(result, left, scratch);
1705 __ slw(result, left, scratch);
1706 #if V8_TARGET_ARCH_PPC64
1707 __ extsw(result, result);
1715 // Mask the right_op operand.
1716 int value = ToInteger32(LConstantOperand::cast(right_op));
1717 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1718 switch (instr->op()) {
1720 if (shift_count != 0) {
1721 __ rotrwi(result, left, shift_count);
1723 __ Move(result, left);
1727 if (shift_count != 0) {
1728 __ srawi(result, left, shift_count);
1730 __ Move(result, left);
1734 if (shift_count != 0) {
1735 __ srwi(result, left, Operand(shift_count));
1737 if (instr->can_deopt()) {
1738 __ cmpwi(left, Operand::Zero());
1739 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
1741 __ Move(result, left);
1745 if (shift_count != 0) {
1746 #if V8_TARGET_ARCH_PPC64
1747 if (instr->hydrogen_value()->representation().IsSmi()) {
1748 __ sldi(result, left, Operand(shift_count));
1750 if (instr->hydrogen_value()->representation().IsSmi() &&
1751 instr->can_deopt()) {
1752 if (shift_count != 1) {
1753 __ slwi(result, left, Operand(shift_count - 1));
1754 __ SmiTagCheckOverflow(result, result, scratch);
1756 __ SmiTagCheckOverflow(result, left, scratch);
1758 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1761 __ slwi(result, left, Operand(shift_count));
1762 #if V8_TARGET_ARCH_PPC64
1763 __ extsw(result, result);
1767 __ Move(result, left);
1778 void LCodeGen::DoSubI(LSubI* instr) {
1779 LOperand* right = instr->right();
1780 Register left = ToRegister(instr->left());
1781 Register result = ToRegister(instr->result());
1782 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1783 #if V8_TARGET_ARCH_PPC64
1784 const bool isInteger = !instr->hydrogen()->representation().IsSmi();
1786 const bool isInteger = false;
1788 if (!can_overflow || isInteger) {
1789 if (right->IsConstantOperand()) {
1790 __ Add(result, left, -(ToOperand(right).immediate()), r0);
1792 __ sub(result, left, EmitLoadRegister(right, ip));
1794 #if V8_TARGET_ARCH_PPC64
1796 __ TestIfInt32(result, r0);
1797 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1801 if (right->IsConstantOperand()) {
1802 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
1805 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1808 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1813 void LCodeGen::DoRSubI(LRSubI* instr) {
1814 LOperand* left = instr->left();
1815 LOperand* right = instr->right();
1816 LOperand* result = instr->result();
1818 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1819 right->IsConstantOperand());
1821 Operand right_operand = ToOperand(right);
1822 if (is_int16(right_operand.immediate())) {
1823 __ subfic(ToRegister(result), ToRegister(left), right_operand);
1825 __ mov(r0, right_operand);
1826 __ sub(ToRegister(result), r0, ToRegister(left));
1831 void LCodeGen::DoConstantI(LConstantI* instr) {
1832 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1836 void LCodeGen::DoConstantS(LConstantS* instr) {
1837 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1841 void LCodeGen::DoConstantD(LConstantD* instr) {
1842 DCHECK(instr->result()->IsDoubleRegister());
1843 DoubleRegister result = ToDoubleRegister(instr->result());
1844 #if V8_HOST_ARCH_IA32
1845 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1847 uint64_t bits = instr->bits();
1848 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1849 V8_UINT64_C(0x7FF0000000000000)) {
1850 uint32_t lo = static_cast<uint32_t>(bits);
1851 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1852 __ mov(ip, Operand(lo));
1853 __ mov(scratch0(), Operand(hi));
1854 __ MovInt64ToDouble(result, scratch0(), ip);
1858 double v = instr->value();
1859 __ LoadDoubleLiteral(result, v, scratch0());
1863 void LCodeGen::DoConstantE(LConstantE* instr) {
1864 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1868 void LCodeGen::DoConstantT(LConstantT* instr) {
1869 Handle<Object> object = instr->value(isolate());
1870 AllowDeferredHandleDereference smi_check;
1871 __ Move(ToRegister(instr->result()), object);
1875 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1876 Register result = ToRegister(instr->result());
1877 Register map = ToRegister(instr->value());
1878 __ EnumLength(result, map);
1882 void LCodeGen::DoDateField(LDateField* instr) {
1883 Register object = ToRegister(instr->date());
1884 Register result = ToRegister(instr->result());
1885 Register scratch = ToRegister(instr->temp());
1886 Smi* index = instr->index();
1887 DCHECK(object.is(result));
1888 DCHECK(object.is(r3));
1889 DCHECK(!scratch.is(scratch0()));
1890 DCHECK(!scratch.is(object));
1892 if (index->value() == 0) {
1893 __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
1895 Label runtime, done;
1896 if (index->value() < JSDate::kFirstUncachedField) {
1897 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1898 __ mov(scratch, Operand(stamp));
1899 __ LoadP(scratch, MemOperand(scratch));
1900 __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1901 __ cmp(scratch, scratch0());
1904 FieldMemOperand(object, JSDate::kValueOffset +
1905 kPointerSize * index->value()));
1909 __ PrepareCallCFunction(2, scratch);
1910 __ LoadSmiLiteral(r4, index);
1911 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1917 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
1918 String::Encoding encoding) {
1919 if (index->IsConstantOperand()) {
1920 int offset = ToInteger32(LConstantOperand::cast(index));
1921 if (encoding == String::TWO_BYTE_ENCODING) {
1922 offset *= kUC16Size;
1924 STATIC_ASSERT(kCharSize == 1);
1925 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1927 Register scratch = scratch0();
1928 DCHECK(!scratch.is(string));
1929 DCHECK(!scratch.is(ToRegister(index)));
1930 if (encoding == String::ONE_BYTE_ENCODING) {
1931 __ add(scratch, string, ToRegister(index));
1933 STATIC_ASSERT(kUC16Size == 2);
1934 __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
1935 __ add(scratch, string, scratch);
1937 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1941 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1942 String::Encoding encoding = instr->hydrogen()->encoding();
1943 Register string = ToRegister(instr->string());
1944 Register result = ToRegister(instr->result());
1946 if (FLAG_debug_code) {
1947 Register scratch = scratch0();
1948 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1949 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1951 __ andi(scratch, scratch,
1952 Operand(kStringRepresentationMask | kStringEncodingMask));
1953 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1954 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1956 Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
1957 : two_byte_seq_type));
1958 __ Check(eq, kUnexpectedStringType);
1961 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1962 if (encoding == String::ONE_BYTE_ENCODING) {
1963 __ lbz(result, operand);
1965 __ lhz(result, operand);
1970 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1971 String::Encoding encoding = instr->hydrogen()->encoding();
1972 Register string = ToRegister(instr->string());
1973 Register value = ToRegister(instr->value());
1975 if (FLAG_debug_code) {
1976 Register index = ToRegister(instr->index());
1977 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1978 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1980 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1982 : two_byte_seq_type;
1983 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1986 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1987 if (encoding == String::ONE_BYTE_ENCODING) {
1988 __ stb(value, operand);
1990 __ sth(value, operand);
1995 void LCodeGen::DoAddI(LAddI* instr) {
1996 LOperand* right = instr->right();
1997 Register left = ToRegister(instr->left());
1998 Register result = ToRegister(instr->result());
1999 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2000 #if V8_TARGET_ARCH_PPC64
2001 const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
2002 instr->hydrogen()->representation().IsExternal());
2004 const bool isInteger = false;
2007 if (!can_overflow || isInteger) {
2008 if (right->IsConstantOperand()) {
2009 __ Add(result, left, ToOperand(right).immediate(), r0);
2011 __ add(result, left, EmitLoadRegister(right, ip));
2013 #if V8_TARGET_ARCH_PPC64
2015 __ TestIfInt32(result, r0);
2016 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
2020 if (right->IsConstantOperand()) {
2021 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
2024 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
2027 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
2032 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2033 LOperand* left = instr->left();
2034 LOperand* right = instr->right();
2035 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2036 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
2037 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2038 Register left_reg = ToRegister(left);
2039 Register right_reg = EmitLoadRegister(right, ip);
2040 Register result_reg = ToRegister(instr->result());
2041 Label return_left, done;
2042 #if V8_TARGET_ARCH_PPC64
2043 if (instr->hydrogen_value()->representation().IsSmi()) {
2045 __ cmp(left_reg, right_reg);
2046 #if V8_TARGET_ARCH_PPC64
2048 __ cmpw(left_reg, right_reg);
2051 if (CpuFeatures::IsSupported(ISELECT)) {
2052 __ isel(cond, result_reg, left_reg, right_reg);
2054 __ b(cond, &return_left);
2055 __ Move(result_reg, right_reg);
2057 __ bind(&return_left);
2058 __ Move(result_reg, left_reg);
2062 DCHECK(instr->hydrogen()->representation().IsDouble());
2063 DoubleRegister left_reg = ToDoubleRegister(left);
2064 DoubleRegister right_reg = ToDoubleRegister(right);
2065 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2066 Label check_nan_left, check_zero, return_left, return_right, done;
2067 __ fcmpu(left_reg, right_reg);
2068 __ bunordered(&check_nan_left);
2069 __ beq(&check_zero);
2070 __ b(cond, &return_left);
2071 __ b(&return_right);
2073 __ bind(&check_zero);
2074 __ fcmpu(left_reg, kDoubleRegZero);
2075 __ bne(&return_left); // left == right != 0.
2077 // At this point, both left and right are either 0 or -0.
2078 // N.B. The following works because +0 + -0 == +0
2079 if (operation == HMathMinMax::kMathMin) {
2080 // For min we want logical-or of sign bit: -(-L + -R)
2081 __ fneg(left_reg, left_reg);
2082 __ fsub(result_reg, left_reg, right_reg);
2083 __ fneg(result_reg, result_reg);
2085 // For max we want logical-and of sign bit: (L + R)
2086 __ fadd(result_reg, left_reg, right_reg);
2090 __ bind(&check_nan_left);
2091 __ fcmpu(left_reg, left_reg);
2092 __ bunordered(&return_left); // left == NaN.
2094 __ bind(&return_right);
2095 if (!right_reg.is(result_reg)) {
2096 __ fmr(result_reg, right_reg);
2100 __ bind(&return_left);
2101 if (!left_reg.is(result_reg)) {
2102 __ fmr(result_reg, left_reg);
2109 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2110 DoubleRegister left = ToDoubleRegister(instr->left());
2111 DoubleRegister right = ToDoubleRegister(instr->right());
2112 DoubleRegister result = ToDoubleRegister(instr->result());
2113 switch (instr->op()) {
2115 __ fadd(result, left, right);
2118 __ fsub(result, left, right);
2121 __ fmul(result, left, right);
2124 __ fdiv(result, left, right);
2127 __ PrepareCallCFunction(0, 2, scratch0());
2128 __ MovToFloatParameters(left, right);
2129 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
2131 // Move the result in the double result register.
2132 __ MovFromFloatResult(result);
2142 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2143 DCHECK(ToRegister(instr->context()).is(cp));
2144 DCHECK(ToRegister(instr->left()).is(r4));
2145 DCHECK(ToRegister(instr->right()).is(r3));
2146 DCHECK(ToRegister(instr->result()).is(r3));
2149 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
2150 CallCode(code, RelocInfo::CODE_TARGET, instr);
2154 template <class InstrType>
2155 void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
2156 int left_block = instr->TrueDestination(chunk_);
2157 int right_block = instr->FalseDestination(chunk_);
2159 int next_block = GetNextEmittedBlock();
2161 if (right_block == left_block || cond == al) {
2162 EmitGoto(left_block);
2163 } else if (left_block == next_block) {
2164 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
2165 } else if (right_block == next_block) {
2166 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2168 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2169 __ b(chunk_->GetAssemblyLabel(right_block));
2174 template <class InstrType>
2175 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
2176 int false_block = instr->FalseDestination(chunk_);
2177 __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
2181 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
2184 void LCodeGen::DoBranch(LBranch* instr) {
2185 Representation r = instr->hydrogen()->value()->representation();
2186 DoubleRegister dbl_scratch = double_scratch0();
2187 const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
2188 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
2190 if (r.IsInteger32()) {
2191 DCHECK(!info()->IsStub());
2192 Register reg = ToRegister(instr->value());
2193 __ cmpwi(reg, Operand::Zero());
2194 EmitBranch(instr, ne);
2195 } else if (r.IsSmi()) {
2196 DCHECK(!info()->IsStub());
2197 Register reg = ToRegister(instr->value());
2198 __ cmpi(reg, Operand::Zero());
2199 EmitBranch(instr, ne);
2200 } else if (r.IsDouble()) {
2201 DCHECK(!info()->IsStub());
2202 DoubleRegister reg = ToDoubleRegister(instr->value());
2203 // Test the double value. Zero and NaN are false.
2204 __ fcmpu(reg, kDoubleRegZero, cr7);
2206 __ andi(r0, r0, Operand(crZOrNaNBits));
2207 EmitBranch(instr, eq, cr0);
2209 DCHECK(r.IsTagged());
2210 Register reg = ToRegister(instr->value());
2211 HType type = instr->hydrogen()->value()->type();
2212 if (type.IsBoolean()) {
2213 DCHECK(!info()->IsStub());
2214 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2215 EmitBranch(instr, eq);
2216 } else if (type.IsSmi()) {
2217 DCHECK(!info()->IsStub());
2218 __ cmpi(reg, Operand::Zero());
2219 EmitBranch(instr, ne);
2220 } else if (type.IsJSArray()) {
2221 DCHECK(!info()->IsStub());
2222 EmitBranch(instr, al);
2223 } else if (type.IsHeapNumber()) {
2224 DCHECK(!info()->IsStub());
2225 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2226 // Test the double value. Zero and NaN are false.
2227 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2229 __ andi(r0, r0, Operand(crZOrNaNBits));
2230 EmitBranch(instr, eq, cr0);
2231 } else if (type.IsString()) {
2232 DCHECK(!info()->IsStub());
2233 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2234 __ cmpi(ip, Operand::Zero());
2235 EmitBranch(instr, ne);
2237 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2238 // Avoid deopts in the case where we've never executed this path before.
2239 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2241 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2242 // undefined -> false.
2243 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2244 __ beq(instr->FalseLabel(chunk_));
2246 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2247 // Boolean -> its value.
2248 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2249 __ beq(instr->TrueLabel(chunk_));
2250 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2251 __ beq(instr->FalseLabel(chunk_));
2253 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2255 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2256 __ beq(instr->FalseLabel(chunk_));
2259 if (expected.Contains(ToBooleanStub::SMI)) {
2260 // Smis: 0 -> false, all other -> true.
2261 __ cmpi(reg, Operand::Zero());
2262 __ beq(instr->FalseLabel(chunk_));
2263 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2264 } else if (expected.NeedsMap()) {
2265 // If we need a map later and have a Smi -> deopt.
2266 __ TestIfSmi(reg, r0);
2267 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
2270 const Register map = scratch0();
2271 if (expected.NeedsMap()) {
2272 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2274 if (expected.CanBeUndetectable()) {
2275 // Undetectable -> false.
2276 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2277 __ TestBit(ip, Map::kIsUndetectable, r0);
2278 __ bne(instr->FalseLabel(chunk_), cr0);
2282 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2283 // spec object -> true.
2284 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2285 __ bge(instr->TrueLabel(chunk_));
2288 if (expected.Contains(ToBooleanStub::STRING)) {
2289 // String value -> false iff empty.
2291 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2292 __ bge(¬_string);
2293 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2294 __ cmpi(ip, Operand::Zero());
2295 __ bne(instr->TrueLabel(chunk_));
2296 __ b(instr->FalseLabel(chunk_));
2297 __ bind(¬_string);
2300 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2301 // Symbol value -> true.
2302 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2303 __ beq(instr->TrueLabel(chunk_));
2306 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2307 // SIMD value -> true.
2309 __ CompareInstanceType(map, ip, FIRST_SIMD_VALUE_TYPE);
2311 __ CompareInstanceType(map, ip, LAST_SIMD_VALUE_TYPE);
2312 __ ble(instr->TrueLabel(chunk_));
2316 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2317 // heap number -> false iff +0, -0, or NaN.
2318 Label not_heap_number;
2319 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2320 __ bne(¬_heap_number);
2321 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2322 // Test the double value. Zero and NaN are false.
2323 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2325 __ andi(r0, r0, Operand(crZOrNaNBits));
2326 __ bne(instr->FalseLabel(chunk_), cr0);
2327 __ b(instr->TrueLabel(chunk_));
2328 __ bind(¬_heap_number);
2331 if (!expected.IsGeneric()) {
2332 // We've seen something for the first time -> deopt.
2333 // This can only happen if we are not generic already.
2334 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
2341 void LCodeGen::EmitGoto(int block) {
2342 if (!IsNextEmittedBlock(block)) {
2343 __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2348 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
2351 Condition LCodeGen::TokenToCondition(Token::Value op) {
2352 Condition cond = kNoCondition;
2355 case Token::EQ_STRICT:
2359 case Token::NE_STRICT:
2375 case Token::INSTANCEOF:
2383 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2384 LOperand* left = instr->left();
2385 LOperand* right = instr->right();
2387 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2388 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2389 Condition cond = TokenToCondition(instr->op());
2391 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2392 // We can statically evaluate the comparison.
2393 double left_val = ToDouble(LConstantOperand::cast(left));
2394 double right_val = ToDouble(LConstantOperand::cast(right));
2395 int next_block = EvalComparison(instr->op(), left_val, right_val)
2396 ? instr->TrueDestination(chunk_)
2397 : instr->FalseDestination(chunk_);
2398 EmitGoto(next_block);
2400 if (instr->is_double()) {
2401 // Compare left and right operands as doubles and load the
2402 // resulting flags into the normal status register.
2403 __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
2404 // If a NaN is involved, i.e. the result is unordered,
2405 // jump to false block label.
2406 __ bunordered(instr->FalseLabel(chunk_));
2408 if (right->IsConstantOperand()) {
2409 int32_t value = ToInteger32(LConstantOperand::cast(right));
2410 if (instr->hydrogen_value()->representation().IsSmi()) {
2412 __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2414 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2418 __ Cmplwi(ToRegister(left), Operand(value), r0);
2420 __ Cmpwi(ToRegister(left), Operand(value), r0);
2423 } else if (left->IsConstantOperand()) {
2424 int32_t value = ToInteger32(LConstantOperand::cast(left));
2425 if (instr->hydrogen_value()->representation().IsSmi()) {
2427 __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2429 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2433 __ Cmplwi(ToRegister(right), Operand(value), r0);
2435 __ Cmpwi(ToRegister(right), Operand(value), r0);
2438 // We commuted the operands, so commute the condition.
2439 cond = CommuteCondition(cond);
2440 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2442 __ cmpl(ToRegister(left), ToRegister(right));
2444 __ cmp(ToRegister(left), ToRegister(right));
2448 __ cmplw(ToRegister(left), ToRegister(right));
2450 __ cmpw(ToRegister(left), ToRegister(right));
2454 EmitBranch(instr, cond);
2459 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2460 Register left = ToRegister(instr->left());
2461 Register right = ToRegister(instr->right());
2463 __ cmp(left, right);
2464 EmitBranch(instr, eq);
2468 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2469 if (instr->hydrogen()->representation().IsTagged()) {
2470 Register input_reg = ToRegister(instr->object());
2471 __ mov(ip, Operand(factory()->the_hole_value()));
2472 __ cmp(input_reg, ip);
2473 EmitBranch(instr, eq);
2477 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2478 __ fcmpu(input_reg, input_reg);
2479 EmitFalseBranch(instr, ordered);
2481 Register scratch = scratch0();
2482 __ MovDoubleHighToInt(scratch, input_reg);
2483 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
2484 EmitBranch(instr, eq);
2488 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2489 Representation rep = instr->hydrogen()->value()->representation();
2490 DCHECK(!rep.IsInteger32());
2491 Register scratch = ToRegister(instr->temp());
2493 if (rep.IsDouble()) {
2494 DoubleRegister value = ToDoubleRegister(instr->value());
2495 __ fcmpu(value, kDoubleRegZero);
2496 EmitFalseBranch(instr, ne);
2497 #if V8_TARGET_ARCH_PPC64
2498 __ MovDoubleToInt64(scratch, value);
2500 __ MovDoubleHighToInt(scratch, value);
2502 __ cmpi(scratch, Operand::Zero());
2503 EmitBranch(instr, lt);
2505 Register value = ToRegister(instr->value());
2506 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2507 instr->FalseLabel(chunk()), DO_SMI_CHECK);
2508 #if V8_TARGET_ARCH_PPC64
2509 __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2510 __ li(ip, Operand(1));
2511 __ rotrdi(ip, ip, 1); // ip = 0x80000000_00000000
2512 __ cmp(scratch, ip);
2514 __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2515 __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2517 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
2518 __ cmp(scratch, r0);
2520 __ cmpi(ip, Operand::Zero());
2523 EmitBranch(instr, eq);
2528 Condition LCodeGen::EmitIsString(Register input, Register temp1,
2529 Label* is_not_string,
2530 SmiCheck check_needed = INLINE_SMI_CHECK) {
2531 if (check_needed == INLINE_SMI_CHECK) {
2532 __ JumpIfSmi(input, is_not_string);
2534 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2540 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2541 Register reg = ToRegister(instr->value());
2542 Register temp1 = ToRegister(instr->temp());
2544 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2547 Condition true_cond =
2548 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2550 EmitBranch(instr, true_cond);
2554 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2555 Register input_reg = EmitLoadRegister(instr->value(), ip);
2556 __ TestIfSmi(input_reg, r0);
2557 EmitBranch(instr, eq, cr0);
2561 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2562 Register input = ToRegister(instr->value());
2563 Register temp = ToRegister(instr->temp());
2565 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2566 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2568 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2569 __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2570 __ TestBit(temp, Map::kIsUndetectable, r0);
2571 EmitBranch(instr, ne, cr0);
2575 static Condition ComputeCompareCondition(Token::Value op) {
2577 case Token::EQ_STRICT:
2590 return kNoCondition;
2595 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2596 DCHECK(ToRegister(instr->context()).is(cp));
2597 Token::Value op = instr->op();
2600 CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
2601 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2602 // This instruction also signals no smi code inlined
2603 __ cmpi(r3, Operand::Zero());
2605 Condition condition = ComputeCompareCondition(op);
2607 EmitBranch(instr, condition);
2611 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2612 InstanceType from = instr->from();
2613 InstanceType to = instr->to();
2614 if (from == FIRST_TYPE) return to;
2615 DCHECK(from == to || to == LAST_TYPE);
2620 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2621 InstanceType from = instr->from();
2622 InstanceType to = instr->to();
2623 if (from == to) return eq;
2624 if (to == LAST_TYPE) return ge;
2625 if (from == FIRST_TYPE) return le;
2631 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2632 Register scratch = scratch0();
2633 Register input = ToRegister(instr->value());
2635 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2636 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2639 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2640 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2644 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2645 Register input = ToRegister(instr->value());
2646 Register result = ToRegister(instr->result());
2648 __ AssertString(input);
2650 __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
2651 __ IndexFromHash(result, result);
2655 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2656 LHasCachedArrayIndexAndBranch* instr) {
2657 Register input = ToRegister(instr->value());
2658 Register scratch = scratch0();
2660 __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
2661 __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
2662 __ and_(r0, scratch, r0, SetRC);
2663 EmitBranch(instr, eq, cr0);
2667 // Branches to a label or falls through with the answer in flags. Trashes
2668 // the temp registers, but not the input.
2669 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
2670 Handle<String> class_name, Register input,
2671 Register temp, Register temp2) {
2672 DCHECK(!input.is(temp));
2673 DCHECK(!input.is(temp2));
2674 DCHECK(!temp.is(temp2));
2676 __ JumpIfSmi(input, is_false);
2678 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2679 // Assuming the following assertions, we can use the same compares to test
2680 // for both being a function type and being in the object type range.
2681 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2682 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2683 FIRST_SPEC_OBJECT_TYPE + 1);
2684 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2685 LAST_SPEC_OBJECT_TYPE - 1);
2686 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2687 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2690 __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2693 // Faster code path to avoid two compares: subtract lower bound from the
2694 // actual type and do a signed compare with the width of the type range.
2695 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2696 __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2697 __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2698 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2699 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2703 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2704 // Check if the constructor in the map is a function.
2705 Register instance_type = ip;
2706 __ GetMapConstructor(temp, temp, temp2, instance_type);
2708 // Objects with a non-function constructor have class 'Object'.
2709 __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
2710 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
2716 // temp now contains the constructor function. Grab the
2717 // instance class name from there.
2718 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2720 FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
2721 // The class name we are testing against is internalized since it's a literal.
2722 // The name in the constructor is internalized because of the way the context
2723 // is booted. This routine isn't expected to work for random API-created
2724 // classes and it doesn't have to because you can't access it with natives
2725 // syntax. Since both sides are internalized it is sufficient to use an
2726 // identity comparison.
2727 __ Cmpi(temp, Operand(class_name), r0);
2728 // End with the answer in flags.
2732 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2733 Register input = ToRegister(instr->value());
2734 Register temp = scratch0();
2735 Register temp2 = ToRegister(instr->temp());
2736 Handle<String> class_name = instr->hydrogen()->class_name();
2738 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2739 class_name, input, temp, temp2);
2741 EmitBranch(instr, eq);
2745 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2746 Register reg = ToRegister(instr->value());
2747 Register temp = ToRegister(instr->temp());
2749 __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2750 __ Cmpi(temp, Operand(instr->map()), r0);
2751 EmitBranch(instr, eq);
2755 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2756 DCHECK(ToRegister(instr->context()).is(cp));
2757 DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
2758 DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
2760 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2761 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2763 if (CpuFeatures::IsSupported(ISELECT)) {
2764 __ mov(r4, Operand(factory()->true_value()));
2765 __ mov(r5, Operand(factory()->false_value()));
2766 __ cmpi(r3, Operand::Zero());
2767 __ isel(eq, r3, r4, r5);
2770 __ cmpi(r3, Operand::Zero());
2772 __ mov(r3, Operand(factory()->false_value()));
2776 __ mov(r3, Operand(factory()->true_value()));
2782 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2783 class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
2785 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2786 LInstanceOfKnownGlobal* instr)
2787 : LDeferredCode(codegen), instr_(instr) {}
2788 void Generate() override {
2789 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
2792 LInstruction* instr() override { return instr_; }
2793 Label* map_check() { return &map_check_; }
2794 Label* load_bool() { return &load_bool_; }
2797 LInstanceOfKnownGlobal* instr_;
2802 DeferredInstanceOfKnownGlobal* deferred;
2803 deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr);
2805 Label done, false_result;
2806 Register object = ToRegister(instr->value());
2807 Register temp = ToRegister(instr->temp());
2808 Register result = ToRegister(instr->result());
2810 // A Smi is not instance of anything.
2811 __ JumpIfSmi(object, &false_result);
2813 // This is the inlined call site instanceof cache. The two occurences of the
2814 // hole value will be patched to the last map/result pair generated by the
2817 Register map = temp;
2818 __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
2820 // Block trampoline emission to ensure the positions of instructions are
2821 // as expected by the patcher. See InstanceofStub::Generate().
2822 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2823 __ bind(deferred->map_check()); // Label for calculating code patching.
2824 // We use Factory::the_hole_value() on purpose instead of loading from the
2825 // root array to force relocation to be able to later patch with
2827 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2828 __ mov(ip, Operand(cell));
2829 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
2831 __ bc_short(ne, &cache_miss);
2832 __ bind(deferred->load_bool()); // Label for calculating code patching.
2833 // We use Factory::the_hole_value() on purpose instead of loading from the
2834 // root array to force relocation to be able to later patch
2835 // with true or false.
2836 __ mov(result, Operand(factory()->the_hole_value()));
2840 // The inlined call site cache did not match. Check null and string before
2841 // calling the deferred code.
2842 __ bind(&cache_miss);
2843 // Null is not instance of anything.
2844 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2846 __ beq(&false_result);
2848 // String values is not instance of anything.
2849 Condition is_string = masm_->IsObjectStringType(object, temp);
2850 __ b(is_string, &false_result, cr0);
2852 // Go to the deferred code.
2853 __ b(deferred->entry());
2855 __ bind(&false_result);
2856 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2858 // Here result has either true or false. Deferred code also produces true or
2860 __ bind(deferred->exit());
2865 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2868 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2869 flags = static_cast<InstanceofStub::Flags>(flags |
2870 InstanceofStub::kArgsInRegisters);
2871 flags = static_cast<InstanceofStub::Flags>(
2872 flags | InstanceofStub::kCallSiteInlineCheck);
2873 flags = static_cast<InstanceofStub::Flags>(
2874 flags | InstanceofStub::kReturnTrueFalseObject);
2875 InstanceofStub stub(isolate(), flags);
2877 PushSafepointRegistersScope scope(this);
2878 LoadContextFromDeferred(instr->context());
2880 __ Move(InstanceofStub::right(), instr->function());
2882 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2883 Handle<Code> code = stub.GetCode();
2884 // Include instructions below in delta: bitwise_mov32 + li + call
2885 int additional_delta = 3 * Instruction::kInstrSize + masm_->CallSize(code);
2886 // The labels must be already bound since the code has predictabel size up
2887 // to the call instruction.
2888 DCHECK(map_check->is_bound());
2889 DCHECK(bool_load->is_bound());
2890 int map_check_delta =
2891 masm_->InstructionsGeneratedSince(map_check) * Instruction::kInstrSize;
2892 int bool_load_delta =
2893 masm_->InstructionsGeneratedSince(bool_load) * Instruction::kInstrSize;
2894 // r8 is the delta from our callee's lr to the location of the map check.
2895 __ bitwise_mov32(r8, map_check_delta + additional_delta);
2896 // r9 is the delta from map check to bool load.
2897 __ li(r9, Operand(map_check_delta - bool_load_delta));
2898 CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr,
2899 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2900 DCHECK_EQ((map_check_delta + additional_delta) / Instruction::kInstrSize,
2901 masm_->InstructionsGeneratedSince(map_check));
2903 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2904 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2905 // Put the result value (r3) into the result register slot and
2906 // restore all registers.
2907 __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
2911 void LCodeGen::DoCmpT(LCmpT* instr) {
2912 DCHECK(ToRegister(instr->context()).is(cp));
2913 Token::Value op = instr->op();
2916 CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2917 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2918 // This instruction also signals no smi code inlined
2919 __ cmpi(r3, Operand::Zero());
2921 Condition condition = ComputeCompareCondition(op);
2922 if (CpuFeatures::IsSupported(ISELECT)) {
2923 __ LoadRoot(r4, Heap::kTrueValueRootIndex);
2924 __ LoadRoot(r5, Heap::kFalseValueRootIndex);
2925 __ isel(condition, ToRegister(instr->result()), r4, r5);
2927 Label true_value, done;
2929 __ b(condition, &true_value);
2931 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2934 __ bind(&true_value);
2935 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2942 void LCodeGen::DoReturn(LReturn* instr) {
2943 if (FLAG_trace && info()->IsOptimizing()) {
2944 // Push the return value on the stack as the parameter.
2945 // Runtime::TraceExit returns its parameter in r3. We're leaving the code
2946 // managed by the register allocator and tearing down the frame, it's
2947 // safe to write to the context register.
2949 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2950 __ CallRuntime(Runtime::kTraceExit, 1);
2952 if (info()->saves_caller_doubles()) {
2953 RestoreCallerDoubles();
2955 int no_frame_start = -1;
2956 if (instr->has_constant_parameter_count()) {
2957 int parameter_count = ToInteger32(instr->constant_parameter_count());
2958 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2959 if (NeedsEagerFrame()) {
2960 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
2961 } else if (sp_delta != 0) {
2962 __ addi(sp, sp, Operand(sp_delta));
2965 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2966 Register reg = ToRegister(instr->parameter_count());
2967 // The argument count parameter is a smi
2968 if (NeedsEagerFrame()) {
2969 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2971 __ SmiToPtrArrayOffset(r0, reg);
2977 if (no_frame_start != -1) {
2978 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2984 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2985 Register vector_register = ToRegister(instr->temp_vector());
2986 Register slot_register = LoadDescriptor::SlotRegister();
2987 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
2988 DCHECK(slot_register.is(r3));
2990 AllowDeferredHandleDereference vector_structure_check;
2991 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
2992 __ Move(vector_register, vector);
2993 // No need to allocate this register.
2994 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
2995 int index = vector->GetIndex(slot);
2996 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
3001 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
3002 Register vector_register = ToRegister(instr->temp_vector());
3003 Register slot_register = ToRegister(instr->temp_slot());
3005 AllowDeferredHandleDereference vector_structure_check;
3006 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3007 __ Move(vector_register, vector);
3008 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
3009 int index = vector->GetIndex(slot);
3010 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
3014 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3015 DCHECK(ToRegister(instr->context()).is(cp));
3016 DCHECK(ToRegister(instr->global_object())
3017 .is(LoadDescriptor::ReceiverRegister()));
3018 DCHECK(ToRegister(instr->result()).is(r3));
3020 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3021 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3023 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
3024 SLOPPY, PREMONOMORPHIC).code();
3025 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3029 void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
3030 DCHECK(ToRegister(instr->context()).is(cp));
3031 DCHECK(ToRegister(instr->result()).is(r3));
3033 int const slot = instr->slot_index();
3034 int const depth = instr->depth();
3035 if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
3036 __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
3038 CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
3039 CallCode(stub, RelocInfo::CODE_TARGET, instr);
3041 __ Push(Smi::FromInt(slot));
3042 __ CallRuntime(Runtime::kLoadGlobalViaContext, 1);
3047 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3048 Register context = ToRegister(instr->context());
3049 Register result = ToRegister(instr->result());
3050 __ LoadP(result, ContextOperand(context, instr->slot_index()));
3051 if (instr->hydrogen()->RequiresHoleCheck()) {
3052 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3053 if (instr->hydrogen()->DeoptimizesOnHole()) {
3055 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3057 if (CpuFeatures::IsSupported(ISELECT)) {
3058 Register scratch = scratch0();
3059 __ mov(scratch, Operand(factory()->undefined_value()));
3061 __ isel(eq, result, scratch, result);
3066 __ mov(result, Operand(factory()->undefined_value()));
3074 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3075 Register context = ToRegister(instr->context());
3076 Register value = ToRegister(instr->value());
3077 Register scratch = scratch0();
3078 MemOperand target = ContextOperand(context, instr->slot_index());
3080 Label skip_assignment;
3082 if (instr->hydrogen()->RequiresHoleCheck()) {
3083 __ LoadP(scratch, target);
3084 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3085 __ cmp(scratch, ip);
3086 if (instr->hydrogen()->DeoptimizesOnHole()) {
3087 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3089 __ bne(&skip_assignment);
3093 __ StoreP(value, target, r0);
3094 if (instr->hydrogen()->NeedsWriteBarrier()) {
3095 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
3098 __ RecordWriteContextSlot(context, target.offset(), value, scratch,
3099 GetLinkRegisterState(), kSaveFPRegs,
3100 EMIT_REMEMBERED_SET, check_needed);
3103 __ bind(&skip_assignment);
3107 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3108 HObjectAccess access = instr->hydrogen()->access();
3109 int offset = access.offset();
3110 Register object = ToRegister(instr->object());
3112 if (access.IsExternalMemory()) {
3113 Register result = ToRegister(instr->result());
3114 MemOperand operand = MemOperand(object, offset);
3115 __ LoadRepresentation(result, operand, access.representation(), r0);
3119 if (instr->hydrogen()->representation().IsDouble()) {
3120 DCHECK(access.IsInobject());
3121 DoubleRegister result = ToDoubleRegister(instr->result());
3122 __ lfd(result, FieldMemOperand(object, offset));
3126 Register result = ToRegister(instr->result());
3127 if (!access.IsInobject()) {
3128 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3132 Representation representation = access.representation();
3134 #if V8_TARGET_ARCH_PPC64
3135 // 64-bit Smi optimization
3136 if (representation.IsSmi() &&
3137 instr->hydrogen()->representation().IsInteger32()) {
3138 // Read int value directly from upper half of the smi.
3139 offset = SmiWordOffset(offset);
3140 representation = Representation::Integer32();
3144 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
3149 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3150 DCHECK(ToRegister(instr->context()).is(cp));
3151 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3152 DCHECK(ToRegister(instr->result()).is(r3));
3154 // Name is always in r5.
3155 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3156 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3158 CodeFactory::LoadICInOptimizedCode(
3159 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
3160 instr->hydrogen()->initialization_state()).code();
3161 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3165 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3166 Register scratch = scratch0();
3167 Register function = ToRegister(instr->function());
3168 Register result = ToRegister(instr->result());
3170 // Get the prototype or initial map from the function.
3172 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3174 // Check that the function has a prototype or an initial map.
3175 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3177 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3179 // If the function does not have an initial map, we're done.
3180 if (CpuFeatures::IsSupported(ISELECT)) {
3181 // Get the prototype from the initial map (optimistic).
3182 __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
3183 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3184 __ isel(eq, result, ip, result);
3187 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3190 // Get the prototype from the initial map.
3191 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
3199 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3200 Register result = ToRegister(instr->result());
3201 __ LoadRoot(result, instr->index());
3205 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3206 Register arguments = ToRegister(instr->arguments());
3207 Register result = ToRegister(instr->result());
3208 // There are two words between the frame pointer and the last argument.
3209 // Subtracting from length accounts for one of them add one more.
3210 if (instr->length()->IsConstantOperand()) {
3211 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3212 if (instr->index()->IsConstantOperand()) {
3213 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3214 int index = (const_length - const_index) + 1;
3215 __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
3217 Register index = ToRegister(instr->index());
3218 __ subfic(result, index, Operand(const_length + 1));
3219 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3220 __ LoadPX(result, MemOperand(arguments, result));
3222 } else if (instr->index()->IsConstantOperand()) {
3223 Register length = ToRegister(instr->length());
3224 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3225 int loc = const_index - 1;
3227 __ subi(result, length, Operand(loc));
3228 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3229 __ LoadPX(result, MemOperand(arguments, result));
3231 __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
3232 __ LoadPX(result, MemOperand(arguments, result));
3235 Register length = ToRegister(instr->length());
3236 Register index = ToRegister(instr->index());
3237 __ sub(result, length, index);
3238 __ addi(result, result, Operand(1));
3239 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3240 __ LoadPX(result, MemOperand(arguments, result));
3245 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3246 Register external_pointer = ToRegister(instr->elements());
3247 Register key = no_reg;
3248 ElementsKind elements_kind = instr->elements_kind();
3249 bool key_is_constant = instr->key()->IsConstantOperand();
3250 int constant_key = 0;
3251 if (key_is_constant) {
3252 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3253 if (constant_key & 0xF0000000) {
3254 Abort(kArrayIndexConstantValueTooBig);
3257 key = ToRegister(instr->key());
3259 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3260 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3261 int base_offset = instr->base_offset();
3263 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
3264 DoubleRegister result = ToDoubleRegister(instr->result());
3265 if (key_is_constant) {
3266 __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
3269 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3270 __ add(scratch0(), external_pointer, r0);
3272 if (elements_kind == FLOAT32_ELEMENTS) {
3273 __ lfs(result, MemOperand(scratch0(), base_offset));
3274 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3275 __ lfd(result, MemOperand(scratch0(), base_offset));
3278 Register result = ToRegister(instr->result());
3279 MemOperand mem_operand =
3280 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
3281 constant_key, element_size_shift, base_offset);
3282 switch (elements_kind) {
3284 if (key_is_constant) {
3285 __ LoadByte(result, mem_operand, r0);
3287 __ lbzx(result, mem_operand);
3289 __ extsb(result, result);
3291 case UINT8_ELEMENTS:
3292 case UINT8_CLAMPED_ELEMENTS:
3293 if (key_is_constant) {
3294 __ LoadByte(result, mem_operand, r0);
3296 __ lbzx(result, mem_operand);
3299 case INT16_ELEMENTS:
3300 if (key_is_constant) {
3301 __ LoadHalfWordArith(result, mem_operand, r0);
3303 __ lhax(result, mem_operand);
3306 case UINT16_ELEMENTS:
3307 if (key_is_constant) {
3308 __ LoadHalfWord(result, mem_operand, r0);
3310 __ lhzx(result, mem_operand);
3313 case INT32_ELEMENTS:
3314 if (key_is_constant) {
3315 __ LoadWordArith(result, mem_operand, r0);
3317 __ lwax(result, mem_operand);
3320 case UINT32_ELEMENTS:
3321 if (key_is_constant) {
3322 __ LoadWord(result, mem_operand, r0);
3324 __ lwzx(result, mem_operand);
3326 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3327 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3328 __ cmplw(result, r0);
3329 DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
3332 case FLOAT32_ELEMENTS:
3333 case FLOAT64_ELEMENTS:
3334 case FAST_HOLEY_DOUBLE_ELEMENTS:
3335 case FAST_HOLEY_ELEMENTS:
3336 case FAST_HOLEY_SMI_ELEMENTS:
3337 case FAST_DOUBLE_ELEMENTS:
3339 case FAST_SMI_ELEMENTS:
3340 case DICTIONARY_ELEMENTS:
3341 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3342 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3350 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3351 Register elements = ToRegister(instr->elements());
3352 bool key_is_constant = instr->key()->IsConstantOperand();
3353 Register key = no_reg;
3354 DoubleRegister result = ToDoubleRegister(instr->result());
3355 Register scratch = scratch0();
3357 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3358 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3359 int constant_key = 0;
3360 if (key_is_constant) {
3361 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3362 if (constant_key & 0xF0000000) {
3363 Abort(kArrayIndexConstantValueTooBig);
3366 key = ToRegister(instr->key());
3369 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
3370 if (!key_is_constant) {
3371 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3372 __ add(scratch, elements, r0);
3375 if (!is_int16(base_offset)) {
3376 __ Add(scratch, elements, base_offset, r0);
3380 __ lfd(result, MemOperand(elements, base_offset));
3382 if (instr->hydrogen()->RequiresHoleCheck()) {
3383 if (is_int16(base_offset + Register::kExponentOffset)) {
3385 MemOperand(elements, base_offset + Register::kExponentOffset));
3387 __ addi(scratch, elements, Operand(base_offset));
3388 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
3390 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
3391 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3396 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3397 HLoadKeyed* hinstr = instr->hydrogen();
3398 Register elements = ToRegister(instr->elements());
3399 Register result = ToRegister(instr->result());
3400 Register scratch = scratch0();
3401 Register store_base = scratch;
3402 int offset = instr->base_offset();
3404 if (instr->key()->IsConstantOperand()) {
3405 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3406 offset += ToInteger32(const_operand) * kPointerSize;
3407 store_base = elements;
3409 Register key = ToRegister(instr->key());
3410 // Even though the HLoadKeyed instruction forces the input
3411 // representation for the key to be an integer, the input gets replaced
3412 // during bound check elimination with the index argument to the bounds
3413 // check, which can be tagged, so that case must be handled here, too.
3414 if (hinstr->key()->representation().IsSmi()) {
3415 __ SmiToPtrArrayOffset(r0, key);
3417 __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
3419 __ add(scratch, elements, r0);
3422 bool requires_hole_check = hinstr->RequiresHoleCheck();
3423 Representation representation = hinstr->representation();
3425 #if V8_TARGET_ARCH_PPC64
3426 // 64-bit Smi optimization
3427 if (representation.IsInteger32() &&
3428 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3429 DCHECK(!requires_hole_check);
3430 // Read int value directly from upper half of the smi.
3431 offset = SmiWordOffset(offset);
3435 __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
3438 // Check for the hole value.
3439 if (requires_hole_check) {
3440 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3441 __ TestIfSmi(result, r0);
3442 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
3444 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3445 __ cmp(result, scratch);
3446 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3448 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3449 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3451 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3452 __ cmp(result, scratch);
3454 if (info()->IsStub()) {
3455 // A stub can safely convert the hole to undefined only if the array
3456 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3457 // it needs to bail out.
3458 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3459 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
3460 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
3461 DeoptimizeIf(ne, instr, Deoptimizer::kHole);
3463 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3469 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3470 if (instr->is_fixed_typed_array()) {
3471 DoLoadKeyedExternalArray(instr);
3472 } else if (instr->hydrogen()->representation().IsDouble()) {
3473 DoLoadKeyedFixedDoubleArray(instr);
3475 DoLoadKeyedFixedArray(instr);
3480 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
3481 bool key_is_constant, bool key_is_smi,
3483 int element_size_shift,
3485 Register scratch = scratch0();
3487 if (key_is_constant) {
3488 return MemOperand(base, (constant_key << element_size_shift) + base_offset);
3492 (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
3494 if (!(base_offset || needs_shift)) {
3495 return MemOperand(base, key);
3499 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
3504 __ Add(scratch, key, base_offset, r0);
3507 return MemOperand(base, scratch);
3511 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3512 DCHECK(ToRegister(instr->context()).is(cp));
3513 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3514 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3516 if (instr->hydrogen()->HasVectorAndSlot()) {
3517 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3520 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3521 isolate(), instr->hydrogen()->language_mode(),
3522 instr->hydrogen()->initialization_state()).code();
3523 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3527 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3528 Register scratch = scratch0();
3529 Register result = ToRegister(instr->result());
3531 if (instr->hydrogen()->from_inlined()) {
3532 __ subi(result, sp, Operand(2 * kPointerSize));
3534 // Check if the calling frame is an arguments adaptor frame.
3535 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3537 MemOperand(scratch, StandardFrameConstants::kContextOffset));
3538 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3540 // Result is the frame pointer for the frame if not adapted and for the real
3541 // frame below the adaptor frame if adapted.
3542 if (CpuFeatures::IsSupported(ISELECT)) {
3543 __ isel(eq, result, scratch, fp);
3545 Label done, adapted;
3551 __ mr(result, scratch);
3558 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3559 Register elem = ToRegister(instr->elements());
3560 Register result = ToRegister(instr->result());
3564 // If no arguments adaptor frame the number of arguments is fixed.
3566 __ mov(result, Operand(scope()->num_parameters()));
3569 // Arguments adaptor frame present. Get argument length from there.
3570 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3572 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3573 __ SmiUntag(result);
3575 // Argument length is in result register.
3580 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3581 Register receiver = ToRegister(instr->receiver());
3582 Register function = ToRegister(instr->function());
3583 Register result = ToRegister(instr->result());
3584 Register scratch = scratch0();
3586 // If the receiver is null or undefined, we have to pass the global
3587 // object as a receiver to normal functions. Values have to be
3588 // passed unchanged to builtins and strict-mode functions.
3589 Label global_object, result_in_receiver;
3591 if (!instr->hydrogen()->known_function()) {
3592 // Do not transform the receiver to object for strict mode
3595 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3597 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3599 #if V8_TARGET_ARCH_PPC64
3600 SharedFunctionInfo::kStrictModeFunction,
3602 SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
3605 __ bne(&result_in_receiver, cr0);
3607 // Do not transform the receiver to object for builtins.
3609 #if V8_TARGET_ARCH_PPC64
3610 SharedFunctionInfo::kNative,
3612 SharedFunctionInfo::kNative + kSmiTagSize,
3615 __ bne(&result_in_receiver, cr0);
3618 // Normal function. Replace undefined or null with global receiver.
3619 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3620 __ cmp(receiver, scratch);
3621 __ beq(&global_object);
3622 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3623 __ cmp(receiver, scratch);
3624 __ beq(&global_object);
3626 // Deoptimize if the receiver is not a JS object.
3627 __ TestIfSmi(receiver, r0);
3628 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
3629 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3630 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3632 __ b(&result_in_receiver);
3633 __ bind(&global_object);
3634 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3635 __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3636 __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3637 if (result.is(receiver)) {
3638 __ bind(&result_in_receiver);
3642 __ bind(&result_in_receiver);
3643 __ mr(result, receiver);
3644 __ bind(&result_ok);
3649 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3650 Register receiver = ToRegister(instr->receiver());
3651 Register function = ToRegister(instr->function());
3652 Register length = ToRegister(instr->length());
3653 Register elements = ToRegister(instr->elements());
3654 Register scratch = scratch0();
3655 DCHECK(receiver.is(r3)); // Used for parameter count.
3656 DCHECK(function.is(r4)); // Required by InvokeFunction.
3657 DCHECK(ToRegister(instr->result()).is(r3));
3659 // Copy the arguments to this function possibly from the
3660 // adaptor frame below it.
3661 const uint32_t kArgumentsLimit = 1 * KB;
3662 __ cmpli(length, Operand(kArgumentsLimit));
3663 DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
3665 // Push the receiver and use the register to keep the original
3666 // number of arguments.
3668 __ mr(receiver, length);
3669 // The arguments are at a one pointer size offset from elements.
3670 __ addi(elements, elements, Operand(1 * kPointerSize));
3672 // Loop through the arguments pushing them onto the execution
3675 // length is a small non-negative integer, due to the test above.
3676 __ cmpi(length, Operand::Zero());
3680 __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
3681 __ LoadPX(scratch, MemOperand(elements, r0));
3683 __ addi(length, length, Operand(-1));
3687 DCHECK(instr->HasPointerMap());
3688 LPointerMap* pointers = instr->pointer_map();
3689 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3690 // The number of arguments is stored in receiver which is r3, as expected
3691 // by InvokeFunction.
3692 ParameterCount actual(receiver);
3693 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3697 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3698 LOperand* argument = instr->value();
3699 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3700 Abort(kDoPushArgumentNotImplementedForDoubleType);
3702 Register argument_reg = EmitLoadRegister(argument, ip);
3703 __ push(argument_reg);
3708 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
3711 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3712 Register result = ToRegister(instr->result());
3713 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3717 void LCodeGen::DoContext(LContext* instr) {
3718 // If there is a non-return use, the context must be moved to a register.
3719 Register result = ToRegister(instr->result());
3720 if (info()->IsOptimizing()) {
3721 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3723 // If there is no frame, the context must be in cp.
3724 DCHECK(result.is(cp));
3729 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3730 DCHECK(ToRegister(instr->context()).is(cp));
3731 __ Move(scratch0(), instr->hydrogen()->pairs());
3732 __ push(scratch0());
3733 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3734 __ push(scratch0());
3735 CallRuntime(Runtime::kDeclareGlobals, 2, instr);
3739 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3740 int formal_parameter_count, int arity,
3741 LInstruction* instr) {
3742 bool dont_adapt_arguments =
3743 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3744 bool can_invoke_directly =
3745 dont_adapt_arguments || formal_parameter_count == arity;
3747 Register function_reg = r4;
3749 LPointerMap* pointers = instr->pointer_map();
3751 if (can_invoke_directly) {
3753 __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3755 // Set r3 to arguments count if adaption is not needed. Assumes that r3
3756 // is available to write to at this point.
3757 if (dont_adapt_arguments) {
3758 __ mov(r3, Operand(arity));
3761 bool is_self_call = function.is_identical_to(info()->closure());
3767 __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3771 // Set up deoptimization.
3772 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3774 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3775 ParameterCount count(arity);
3776 ParameterCount expected(formal_parameter_count);
3777 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3782 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3783 DCHECK(instr->context() != NULL);
3784 DCHECK(ToRegister(instr->context()).is(cp));
3785 Register input = ToRegister(instr->value());
3786 Register result = ToRegister(instr->result());
3787 Register scratch = scratch0();
3789 // Deoptimize if not a heap number.
3790 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3791 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3792 __ cmp(scratch, ip);
3793 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3796 Register exponent = scratch0();
3798 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3799 // Check the sign of the argument. If the argument is positive, just
3801 __ cmpwi(exponent, Operand::Zero());
3802 // Move the input to the result if necessary.
3803 __ Move(result, input);
3806 // Input is negative. Reverse its sign.
3807 // Preserve the value of all registers.
3809 PushSafepointRegistersScope scope(this);
3811 // Registers were saved at the safepoint, so we can use
3812 // many scratch registers.
3813 Register tmp1 = input.is(r4) ? r3 : r4;
3814 Register tmp2 = input.is(r5) ? r3 : r5;
3815 Register tmp3 = input.is(r6) ? r3 : r6;
3816 Register tmp4 = input.is(r7) ? r3 : r7;
3818 // exponent: floating point exponent value.
3820 Label allocated, slow;
3821 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3822 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3825 // Slow case: Call the runtime system to do the number allocation.
3828 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3830 // Set the pointer to the new heap number in tmp.
3831 if (!tmp1.is(r3)) __ mr(tmp1, r3);
3832 // Restore input_reg after call to runtime.
3833 __ LoadFromSafepointRegisterSlot(input, input);
3834 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3836 __ bind(&allocated);
3837 // exponent: floating point exponent value.
3838 // tmp1: allocated heap number.
3839 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
3840 __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit
3841 __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3842 __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3843 __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3845 __ StoreToSafepointRegisterSlot(tmp1, result);
3852 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3853 Register input = ToRegister(instr->value());
3854 Register result = ToRegister(instr->result());
3856 __ cmpi(input, Operand::Zero());
3857 __ Move(result, input);
3859 __ li(r0, Operand::Zero()); // clear xer
3861 __ neg(result, result, SetOE, SetRC);
3862 // Deoptimize on overflow.
3863 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
3868 #if V8_TARGET_ARCH_PPC64
3869 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3870 Register input = ToRegister(instr->value());
3871 Register result = ToRegister(instr->result());
3873 __ cmpwi(input, Operand::Zero());
3874 __ Move(result, input);
3877 // Deoptimize on overflow.
3878 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3880 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
3882 __ neg(result, result);
3888 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3889 // Class for deferred case.
3890 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3892 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3893 : LDeferredCode(codegen), instr_(instr) {}
3894 void Generate() override {
3895 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3897 LInstruction* instr() override { return instr_; }
3903 Representation r = instr->hydrogen()->value()->representation();
3905 DoubleRegister input = ToDoubleRegister(instr->value());
3906 DoubleRegister result = ToDoubleRegister(instr->result());
3907 __ fabs(result, input);
3908 #if V8_TARGET_ARCH_PPC64
3909 } else if (r.IsInteger32()) {
3910 EmitInteger32MathAbs(instr);
3911 } else if (r.IsSmi()) {
3913 } else if (r.IsSmiOrInteger32()) {
3917 // Representation is tagged.
3918 DeferredMathAbsTaggedHeapNumber* deferred =
3919 new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3920 Register input = ToRegister(instr->value());
3922 __ JumpIfNotSmi(input, deferred->entry());
3923 // If smi, handle it directly.
3925 __ bind(deferred->exit());
3930 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3931 DoubleRegister input = ToDoubleRegister(instr->value());
3932 Register result = ToRegister(instr->result());
3933 Register input_high = scratch0();
3934 Register scratch = ip;
3937 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
3939 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3942 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3944 __ cmpi(result, Operand::Zero());
3946 __ cmpwi(input_high, Operand::Zero());
3947 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
3953 void LCodeGen::DoMathRound(LMathRound* instr) {
3954 DoubleRegister input = ToDoubleRegister(instr->value());
3955 Register result = ToRegister(instr->result());
3956 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3957 DoubleRegister input_plus_dot_five = double_scratch1;
3958 Register scratch1 = scratch0();
3959 Register scratch2 = ip;
3960 DoubleRegister dot_five = double_scratch0();
3961 Label convert, done;
3963 __ LoadDoubleLiteral(dot_five, 0.5, r0);
3964 __ fabs(double_scratch1, input);
3965 __ fcmpu(double_scratch1, dot_five);
3966 DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
3967 // If input is in [-0.5, -0], the result is -0.
3968 // If input is in [+0, +0.5[, the result is +0.
3969 // If the input is +0.5, the result is 1.
3970 __ bgt(&convert); // Out of [-0.5, +0.5].
3971 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3972 #if V8_TARGET_ARCH_PPC64
3973 __ MovDoubleToInt64(scratch1, input);
3975 __ MovDoubleHighToInt(scratch1, input);
3977 __ cmpi(scratch1, Operand::Zero());
3979 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
3981 __ fcmpu(input, dot_five);
3982 if (CpuFeatures::IsSupported(ISELECT)) {
3983 __ li(result, Operand(1));
3984 __ isel(lt, result, r0, result);
3988 __ bne(&return_zero);
3989 __ li(result, Operand(1)); // +0.5.
3991 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3992 // flag kBailoutOnMinusZero.
3993 __ bind(&return_zero);
3994 __ li(result, Operand::Zero());
3999 __ fadd(input_plus_dot_five, input, dot_five);
4000 // Reuse dot_five (double_scratch0) as we no longer need this value.
4001 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
4002 double_scratch0(), &done, &done);
4003 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
4008 void LCodeGen::DoMathFround(LMathFround* instr) {
4009 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4010 DoubleRegister output_reg = ToDoubleRegister(instr->result());
4011 __ frsp(output_reg, input_reg);
4015 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4016 DoubleRegister input = ToDoubleRegister(instr->value());
4017 DoubleRegister result = ToDoubleRegister(instr->result());
4018 __ fsqrt(result, input);
4022 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4023 DoubleRegister input = ToDoubleRegister(instr->value());
4024 DoubleRegister result = ToDoubleRegister(instr->result());
4025 DoubleRegister temp = double_scratch0();
4027 // Note that according to ECMA-262 15.8.2.13:
4028 // Math.pow(-Infinity, 0.5) == Infinity
4029 // Math.sqrt(-Infinity) == NaN
4032 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
4033 __ fcmpu(input, temp);
4035 __ fneg(result, temp);
4038 // Add +0 to convert -0 to +0.
4040 __ fadd(result, input, kDoubleRegZero);
4041 __ fsqrt(result, result);
4046 void LCodeGen::DoPower(LPower* instr) {
4047 Representation exponent_type = instr->hydrogen()->right()->representation();
4048 // Having marked this as a call, we can use any registers.
4049 // Just make sure that the input/output registers are the expected ones.
4050 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
4051 DCHECK(!instr->right()->IsDoubleRegister() ||
4052 ToDoubleRegister(instr->right()).is(d2));
4053 DCHECK(!instr->right()->IsRegister() ||
4054 ToRegister(instr->right()).is(tagged_exponent));
4055 DCHECK(ToDoubleRegister(instr->left()).is(d1));
4056 DCHECK(ToDoubleRegister(instr->result()).is(d3));
4058 if (exponent_type.IsSmi()) {
4059 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4061 } else if (exponent_type.IsTagged()) {
4063 __ JumpIfSmi(tagged_exponent, &no_deopt);
4064 DCHECK(!r10.is(tagged_exponent));
4065 __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
4066 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4068 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4070 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4072 } else if (exponent_type.IsInteger32()) {
4073 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4076 DCHECK(exponent_type.IsDouble());
4077 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4083 void LCodeGen::DoMathExp(LMathExp* instr) {
4084 DoubleRegister input = ToDoubleRegister(instr->value());
4085 DoubleRegister result = ToDoubleRegister(instr->result());
4086 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
4087 DoubleRegister double_scratch2 = double_scratch0();
4088 Register temp1 = ToRegister(instr->temp1());
4089 Register temp2 = ToRegister(instr->temp2());
4091 MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
4092 double_scratch2, temp1, temp2, scratch0());
4096 void LCodeGen::DoMathLog(LMathLog* instr) {
4097 __ PrepareCallCFunction(0, 1, scratch0());
4098 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
4099 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
4101 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
4105 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4106 Register input = ToRegister(instr->value());
4107 Register result = ToRegister(instr->result());
4108 __ cntlzw_(result, input);
4112 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4113 DCHECK(ToRegister(instr->context()).is(cp));
4114 DCHECK(ToRegister(instr->function()).is(r4));
4115 DCHECK(instr->HasPointerMap());
4117 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4118 if (known_function.is_null()) {
4119 LPointerMap* pointers = instr->pointer_map();
4120 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4121 ParameterCount count(instr->arity());
4122 __ InvokeFunction(r4, count, CALL_FUNCTION, generator);
4124 CallKnownFunction(known_function,
4125 instr->hydrogen()->formal_parameter_count(),
4126 instr->arity(), instr);
4131 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
4132 DCHECK(ToRegister(instr->result()).is(r3));
4134 if (instr->hydrogen()->IsTailCall()) {
4135 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
4137 if (instr->target()->IsConstantOperand()) {
4138 LConstantOperand* target = LConstantOperand::cast(instr->target());
4139 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4140 __ Jump(code, RelocInfo::CODE_TARGET);
4142 DCHECK(instr->target()->IsRegister());
4143 Register target = ToRegister(instr->target());
4144 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4145 __ JumpToJSEntry(ip);
4148 LPointerMap* pointers = instr->pointer_map();
4149 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4151 if (instr->target()->IsConstantOperand()) {
4152 LConstantOperand* target = LConstantOperand::cast(instr->target());
4153 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4154 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4155 __ Call(code, RelocInfo::CODE_TARGET);
4157 DCHECK(instr->target()->IsRegister());
4158 Register target = ToRegister(instr->target());
4159 generator.BeforeCall(__ CallSize(target));
4160 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4163 generator.AfterCall();
4168 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4169 DCHECK(ToRegister(instr->function()).is(r4));
4170 DCHECK(ToRegister(instr->result()).is(r3));
4172 if (instr->hydrogen()->pass_argument_count()) {
4173 __ mov(r3, Operand(instr->arity()));
4177 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
4179 bool is_self_call = false;
4180 if (instr->hydrogen()->function()->IsConstant()) {
4181 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
4182 Handle<JSFunction> jsfun =
4183 Handle<JSFunction>::cast(fun_const->handle(isolate()));
4184 is_self_call = jsfun.is_identical_to(info()->closure());
4190 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
4194 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4198 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4199 DCHECK(ToRegister(instr->context()).is(cp));
4200 DCHECK(ToRegister(instr->function()).is(r4));
4201 DCHECK(ToRegister(instr->result()).is(r3));
4203 int arity = instr->arity();
4204 CallFunctionFlags flags = instr->hydrogen()->function_flags();
4205 if (instr->hydrogen()->HasVectorAndSlot()) {
4206 Register slot_register = ToRegister(instr->temp_slot());
4207 Register vector_register = ToRegister(instr->temp_vector());
4208 DCHECK(slot_register.is(r6));
4209 DCHECK(vector_register.is(r5));
4211 AllowDeferredHandleDereference vector_structure_check;
4212 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
4213 int index = vector->GetIndex(instr->hydrogen()->slot());
4215 __ Move(vector_register, vector);
4216 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
4218 CallICState::CallType call_type =
4219 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
4222 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
4223 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4225 CallFunctionStub stub(isolate(), arity, flags);
4226 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4231 void LCodeGen::DoCallNew(LCallNew* instr) {
4232 DCHECK(ToRegister(instr->context()).is(cp));
4233 DCHECK(ToRegister(instr->constructor()).is(r4));
4234 DCHECK(ToRegister(instr->result()).is(r3));
4236 __ mov(r3, Operand(instr->arity()));
4237 // No cell in r5 for construct type feedback in optimized code
4238 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4239 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4240 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4244 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4245 DCHECK(ToRegister(instr->context()).is(cp));
4246 DCHECK(ToRegister(instr->constructor()).is(r4));
4247 DCHECK(ToRegister(instr->result()).is(r3));
4249 __ mov(r3, Operand(instr->arity()));
4250 if (instr->arity() == 1) {
4251 // We only need the allocation site for the case we have a length argument.
4252 // The case may bail out to the runtime, which will determine the correct
4253 // elements kind with the site.
4254 __ Move(r5, instr->hydrogen()->site());
4256 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4258 ElementsKind kind = instr->hydrogen()->elements_kind();
4259 AllocationSiteOverrideMode override_mode =
4260 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4261 ? DISABLE_ALLOCATION_SITES
4264 if (instr->arity() == 0) {
4265 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4266 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4267 } else if (instr->arity() == 1) {
4269 if (IsFastPackedElementsKind(kind)) {
4271 // We might need a change here
4272 // look at the first argument
4273 __ LoadP(r8, MemOperand(sp, 0));
4274 __ cmpi(r8, Operand::Zero());
4275 __ beq(&packed_case);
4277 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4278 ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
4280 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4282 __ bind(&packed_case);
4285 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4286 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4289 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4290 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4295 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4296 CallRuntime(instr->function(), instr->arity(), instr);
4300 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4301 Register function = ToRegister(instr->function());
4302 Register code_object = ToRegister(instr->code_object());
4303 __ addi(code_object, code_object,
4304 Operand(Code::kHeaderSize - kHeapObjectTag));
4305 __ StoreP(code_object,
4306 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
4310 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4311 Register result = ToRegister(instr->result());
4312 Register base = ToRegister(instr->base_object());
4313 if (instr->offset()->IsConstantOperand()) {
4314 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4315 __ Add(result, base, ToInteger32(offset), r0);
4317 Register offset = ToRegister(instr->offset());
4318 __ add(result, base, offset);
4323 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4324 HStoreNamedField* hinstr = instr->hydrogen();
4325 Representation representation = instr->representation();
4327 Register object = ToRegister(instr->object());
4328 Register scratch = scratch0();
4329 HObjectAccess access = hinstr->access();
4330 int offset = access.offset();
4332 if (access.IsExternalMemory()) {
4333 Register value = ToRegister(instr->value());
4334 MemOperand operand = MemOperand(object, offset);
4335 __ StoreRepresentation(value, operand, representation, r0);
4339 __ AssertNotSmi(object);
4341 #if V8_TARGET_ARCH_PPC64
4342 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4343 IsInteger32(LConstantOperand::cast(instr->value())));
4345 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4346 IsSmi(LConstantOperand::cast(instr->value())));
4348 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
4349 DCHECK(access.IsInobject());
4350 DCHECK(!hinstr->has_transition());
4351 DCHECK(!hinstr->NeedsWriteBarrier());
4352 DoubleRegister value = ToDoubleRegister(instr->value());
4353 __ stfd(value, FieldMemOperand(object, offset));
4357 if (hinstr->has_transition()) {
4358 Handle<Map> transition = hinstr->transition_map();
4359 AddDeprecationDependency(transition);
4360 __ mov(scratch, Operand(transition));
4361 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
4362 if (hinstr->NeedsWriteBarrierForMap()) {
4363 Register temp = ToRegister(instr->temp());
4364 // Update the write barrier for the map field.
4365 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
4371 Register record_dest = object;
4372 Register record_value = no_reg;
4373 Register record_scratch = scratch;
4374 #if V8_TARGET_ARCH_PPC64
4375 if (FLAG_unbox_double_fields && representation.IsDouble()) {
4376 DCHECK(access.IsInobject());
4377 DoubleRegister value = ToDoubleRegister(instr->value());
4378 __ stfd(value, FieldMemOperand(object, offset));
4379 if (hinstr->NeedsWriteBarrier()) {
4380 record_value = ToRegister(instr->value());
4383 if (representation.IsSmi() &&
4384 hinstr->value()->representation().IsInteger32()) {
4385 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4386 // 64-bit Smi optimization
4387 // Store int value directly to upper half of the smi.
4388 offset = SmiWordOffset(offset);
4389 representation = Representation::Integer32();
4392 if (access.IsInobject()) {
4393 Register value = ToRegister(instr->value());
4394 MemOperand operand = FieldMemOperand(object, offset);
4395 __ StoreRepresentation(value, operand, representation, r0);
4396 record_value = value;
4398 Register value = ToRegister(instr->value());
4399 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4400 MemOperand operand = FieldMemOperand(scratch, offset);
4401 __ StoreRepresentation(value, operand, representation, r0);
4402 record_dest = scratch;
4403 record_value = value;
4404 record_scratch = object;
4406 #if V8_TARGET_ARCH_PPC64
4410 if (hinstr->NeedsWriteBarrier()) {
4411 __ RecordWriteField(record_dest, offset, record_value, record_scratch,
4412 GetLinkRegisterState(), kSaveFPRegs,
4413 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4414 hinstr->PointersToHereCheckForValue());
4419 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4420 DCHECK(ToRegister(instr->context()).is(cp));
4421 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4422 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4424 if (instr->hydrogen()->HasVectorAndSlot()) {
4425 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4428 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4429 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4430 isolate(), instr->language_mode(),
4431 instr->hydrogen()->initialization_state()).code();
4432 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4436 void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
4437 DCHECK(ToRegister(instr->context()).is(cp));
4438 DCHECK(ToRegister(instr->value())
4439 .is(StoreGlobalViaContextDescriptor::ValueRegister()));
4441 int const slot = instr->slot_index();
4442 int const depth = instr->depth();
4443 if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
4444 __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
4445 Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
4446 isolate(), depth, instr->language_mode()).code();
4447 CallCode(stub, RelocInfo::CODE_TARGET, instr);
4449 __ Push(Smi::FromInt(slot));
4450 __ push(StoreGlobalViaContextDescriptor::ValueRegister());
4451 __ CallRuntime(is_strict(instr->language_mode())
4452 ? Runtime::kStoreGlobalViaContext_Strict
4453 : Runtime::kStoreGlobalViaContext_Sloppy,
4459 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4460 Representation representation = instr->hydrogen()->length()->representation();
4461 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4462 DCHECK(representation.IsSmiOrInteger32());
4464 Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
4465 if (instr->length()->IsConstantOperand()) {
4466 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4467 Register index = ToRegister(instr->index());
4468 if (representation.IsSmi()) {
4469 __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
4471 __ Cmplwi(index, Operand(length), r0);
4473 cc = CommuteCondition(cc);
4474 } else if (instr->index()->IsConstantOperand()) {
4475 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4476 Register length = ToRegister(instr->length());
4477 if (representation.IsSmi()) {
4478 __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
4480 __ Cmplwi(length, Operand(index), r0);
4483 Register index = ToRegister(instr->index());
4484 Register length = ToRegister(instr->length());
4485 if (representation.IsSmi()) {
4486 __ cmpl(length, index);
4488 __ cmplw(length, index);
4491 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4493 __ b(NegateCondition(cc), &done);
4494 __ stop("eliminated bounds check failed");
4497 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4502 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4503 Register external_pointer = ToRegister(instr->elements());
4504 Register key = no_reg;
4505 ElementsKind elements_kind = instr->elements_kind();
4506 bool key_is_constant = instr->key()->IsConstantOperand();
4507 int constant_key = 0;
4508 if (key_is_constant) {
4509 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4510 if (constant_key & 0xF0000000) {
4511 Abort(kArrayIndexConstantValueTooBig);
4514 key = ToRegister(instr->key());
4516 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4517 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4518 int base_offset = instr->base_offset();
4520 if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
4521 Register address = scratch0();
4522 DoubleRegister value(ToDoubleRegister(instr->value()));
4523 if (key_is_constant) {
4524 if (constant_key != 0) {
4525 __ Add(address, external_pointer, constant_key << element_size_shift,
4528 address = external_pointer;
4531 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
4532 __ add(address, external_pointer, r0);
4534 if (elements_kind == FLOAT32_ELEMENTS) {
4535 __ frsp(double_scratch0(), value);
4536 __ stfs(double_scratch0(), MemOperand(address, base_offset));
4537 } else { // Storing doubles, not floats.
4538 __ stfd(value, MemOperand(address, base_offset));
4541 Register value(ToRegister(instr->value()));
4542 MemOperand mem_operand =
4543 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
4544 constant_key, element_size_shift, base_offset);
4545 switch (elements_kind) {
4546 case UINT8_ELEMENTS:
4547 case UINT8_CLAMPED_ELEMENTS:
4549 if (key_is_constant) {
4550 __ StoreByte(value, mem_operand, r0);
4552 __ stbx(value, mem_operand);
4555 case INT16_ELEMENTS:
4556 case UINT16_ELEMENTS:
4557 if (key_is_constant) {
4558 __ StoreHalfWord(value, mem_operand, r0);
4560 __ sthx(value, mem_operand);
4563 case INT32_ELEMENTS:
4564 case UINT32_ELEMENTS:
4565 if (key_is_constant) {
4566 __ StoreWord(value, mem_operand, r0);
4568 __ stwx(value, mem_operand);
4571 case FLOAT32_ELEMENTS:
4572 case FLOAT64_ELEMENTS:
4573 case FAST_DOUBLE_ELEMENTS:
4575 case FAST_SMI_ELEMENTS:
4576 case FAST_HOLEY_DOUBLE_ELEMENTS:
4577 case FAST_HOLEY_ELEMENTS:
4578 case FAST_HOLEY_SMI_ELEMENTS:
4579 case DICTIONARY_ELEMENTS:
4580 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4581 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4589 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4590 DoubleRegister value = ToDoubleRegister(instr->value());
4591 Register elements = ToRegister(instr->elements());
4592 Register key = no_reg;
4593 Register scratch = scratch0();
4594 DoubleRegister double_scratch = double_scratch0();
4595 bool key_is_constant = instr->key()->IsConstantOperand();
4596 int constant_key = 0;
4598 // Calculate the effective address of the slot in the array to store the
4600 if (key_is_constant) {
4601 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4602 if (constant_key & 0xF0000000) {
4603 Abort(kArrayIndexConstantValueTooBig);
4606 key = ToRegister(instr->key());
4608 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4609 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4610 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
4611 if (!key_is_constant) {
4612 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
4613 __ add(scratch, elements, scratch);
4616 if (!is_int16(base_offset)) {
4617 __ Add(scratch, elements, base_offset, r0);
4622 if (instr->NeedsCanonicalization()) {
4623 // Turn potential sNaN value into qNaN.
4624 __ CanonicalizeNaN(double_scratch, value);
4625 __ stfd(double_scratch, MemOperand(elements, base_offset));
4627 __ stfd(value, MemOperand(elements, base_offset));
4632 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4633 HStoreKeyed* hinstr = instr->hydrogen();
4634 Register value = ToRegister(instr->value());
4635 Register elements = ToRegister(instr->elements());
4636 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4637 Register scratch = scratch0();
4638 Register store_base = scratch;
4639 int offset = instr->base_offset();
4642 if (instr->key()->IsConstantOperand()) {
4643 DCHECK(!hinstr->NeedsWriteBarrier());
4644 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4645 offset += ToInteger32(const_operand) * kPointerSize;
4646 store_base = elements;
4648 // Even though the HLoadKeyed instruction forces the input
4649 // representation for the key to be an integer, the input gets replaced
4650 // during bound check elimination with the index argument to the bounds
4651 // check, which can be tagged, so that case must be handled here, too.
4652 if (hinstr->key()->representation().IsSmi()) {
4653 __ SmiToPtrArrayOffset(scratch, key);
4655 __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
4657 __ add(scratch, elements, scratch);
4660 Representation representation = hinstr->value()->representation();
4662 #if V8_TARGET_ARCH_PPC64
4663 // 64-bit Smi optimization
4664 if (representation.IsInteger32()) {
4665 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4666 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4667 // Store int value directly to upper half of the smi.
4668 offset = SmiWordOffset(offset);
4672 __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
4675 if (hinstr->NeedsWriteBarrier()) {
4676 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4679 // Compute address of modified element and store it into key register.
4680 __ Add(key, store_base, offset, r0);
4681 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4682 EMIT_REMEMBERED_SET, check_needed,
4683 hinstr->PointersToHereCheckForValue());
4688 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4689 // By cases: external, fast double
4690 if (instr->is_fixed_typed_array()) {
4691 DoStoreKeyedExternalArray(instr);
4692 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4693 DoStoreKeyedFixedDoubleArray(instr);
4695 DoStoreKeyedFixedArray(instr);
4700 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4701 DCHECK(ToRegister(instr->context()).is(cp));
4702 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4703 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4704 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4706 if (instr->hydrogen()->HasVectorAndSlot()) {
4707 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4710 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4711 isolate(), instr->language_mode(),
4712 instr->hydrogen()->initialization_state()).code();
4713 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4717 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4718 class DeferredMaybeGrowElements final : public LDeferredCode {
4720 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4721 : LDeferredCode(codegen), instr_(instr) {}
4722 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4723 LInstruction* instr() override { return instr_; }
4726 LMaybeGrowElements* instr_;
4729 Register result = r3;
4730 DeferredMaybeGrowElements* deferred =
4731 new (zone()) DeferredMaybeGrowElements(this, instr);
4732 LOperand* key = instr->key();
4733 LOperand* current_capacity = instr->current_capacity();
4735 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4736 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4737 DCHECK(key->IsConstantOperand() || key->IsRegister());
4738 DCHECK(current_capacity->IsConstantOperand() ||
4739 current_capacity->IsRegister());
4741 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4742 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4743 int32_t constant_capacity =
4744 ToInteger32(LConstantOperand::cast(current_capacity));
4745 if (constant_key >= constant_capacity) {
4747 __ b(deferred->entry());
4749 } else if (key->IsConstantOperand()) {
4750 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4751 __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0);
4752 __ ble(deferred->entry());
4753 } else if (current_capacity->IsConstantOperand()) {
4754 int32_t constant_capacity =
4755 ToInteger32(LConstantOperand::cast(current_capacity));
4756 __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0);
4757 __ bge(deferred->entry());
4759 __ cmpw(ToRegister(key), ToRegister(current_capacity));
4760 __ bge(deferred->entry());
4763 if (instr->elements()->IsRegister()) {
4764 __ Move(result, ToRegister(instr->elements()));
4766 __ LoadP(result, ToMemOperand(instr->elements()));
4769 __ bind(deferred->exit());
4773 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4774 // TODO(3095996): Get rid of this. For now, we need to make the
4775 // result register contain a valid pointer because it is already
4776 // contained in the register pointer map.
4777 Register result = r3;
4778 __ li(result, Operand::Zero());
4780 // We have to call a stub.
4782 PushSafepointRegistersScope scope(this);
4783 if (instr->object()->IsRegister()) {
4784 __ Move(result, ToRegister(instr->object()));
4786 __ LoadP(result, ToMemOperand(instr->object()));
4789 LOperand* key = instr->key();
4790 if (key->IsConstantOperand()) {
4791 __ LoadSmiLiteral(r6, ToSmi(LConstantOperand::cast(key)));
4793 __ SmiTag(r6, ToRegister(key));
4796 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4797 instr->hydrogen()->kind());
4799 RecordSafepointWithLazyDeopt(
4800 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4801 __ StoreToSafepointRegisterSlot(result, result);
4804 // Deopt on smi, which means the elements array changed to dictionary mode.
4805 __ TestIfSmi(result, r0);
4806 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
4810 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4811 Register object_reg = ToRegister(instr->object());
4812 Register scratch = scratch0();
4814 Handle<Map> from_map = instr->original_map();
4815 Handle<Map> to_map = instr->transitioned_map();
4816 ElementsKind from_kind = instr->from_kind();
4817 ElementsKind to_kind = instr->to_kind();
4819 Label not_applicable;
4820 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4821 __ Cmpi(scratch, Operand(from_map), r0);
4822 __ bne(¬_applicable);
4824 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4825 Register new_map_reg = ToRegister(instr->new_map_temp());
4826 __ mov(new_map_reg, Operand(to_map));
4827 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
4830 __ RecordWriteForMap(object_reg, new_map_reg, scratch,
4831 GetLinkRegisterState(), kDontSaveFPRegs);
4833 DCHECK(ToRegister(instr->context()).is(cp));
4834 DCHECK(object_reg.is(r3));
4835 PushSafepointRegistersScope scope(this);
4836 __ Move(r4, to_map);
4837 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4838 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4840 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4841 Safepoint::kLazyDeopt);
4843 __ bind(¬_applicable);
4847 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4848 Register object = ToRegister(instr->object());
4849 Register temp = ToRegister(instr->temp());
4850 Label no_memento_found;
4851 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4852 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
4853 __ bind(&no_memento_found);
4857 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4858 DCHECK(ToRegister(instr->context()).is(cp));
4859 DCHECK(ToRegister(instr->left()).is(r4));
4860 DCHECK(ToRegister(instr->right()).is(r3));
4861 StringAddStub stub(isolate(), instr->hydrogen()->flags(),
4862 instr->hydrogen()->pretenure_flag());
4863 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4867 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4868 class DeferredStringCharCodeAt final : public LDeferredCode {
4870 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4871 : LDeferredCode(codegen), instr_(instr) {}
4872 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4873 LInstruction* instr() override { return instr_; }
4876 LStringCharCodeAt* instr_;
4879 DeferredStringCharCodeAt* deferred =
4880 new (zone()) DeferredStringCharCodeAt(this, instr);
4882 StringCharLoadGenerator::Generate(
4883 masm(), ToRegister(instr->string()), ToRegister(instr->index()),
4884 ToRegister(instr->result()), deferred->entry());
4885 __ bind(deferred->exit());
4889 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4890 Register string = ToRegister(instr->string());
4891 Register result = ToRegister(instr->result());
4892 Register scratch = scratch0();
4894 // TODO(3095996): Get rid of this. For now, we need to make the
4895 // result register contain a valid pointer because it is already
4896 // contained in the register pointer map.
4897 __ li(result, Operand::Zero());
4899 PushSafepointRegistersScope scope(this);
4901 // Push the index as a smi. This is safe because of the checks in
4902 // DoStringCharCodeAt above.
4903 if (instr->index()->IsConstantOperand()) {
4904 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4905 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
4908 Register index = ToRegister(instr->index());
4912 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4916 __ StoreToSafepointRegisterSlot(r3, result);
4920 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4921 class DeferredStringCharFromCode final : public LDeferredCode {
4923 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4924 : LDeferredCode(codegen), instr_(instr) {}
4925 void Generate() override {
4926 codegen()->DoDeferredStringCharFromCode(instr_);
4928 LInstruction* instr() override { return instr_; }
4931 LStringCharFromCode* instr_;
4934 DeferredStringCharFromCode* deferred =
4935 new (zone()) DeferredStringCharFromCode(this, instr);
4937 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4938 Register char_code = ToRegister(instr->char_code());
4939 Register result = ToRegister(instr->result());
4940 DCHECK(!char_code.is(result));
4942 __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
4943 __ bgt(deferred->entry());
4944 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4945 __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
4946 __ add(result, result, r0);
4947 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4948 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4950 __ beq(deferred->entry());
4951 __ bind(deferred->exit());
4955 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4956 Register char_code = ToRegister(instr->char_code());
4957 Register result = ToRegister(instr->result());
4959 // TODO(3095996): Get rid of this. For now, we need to make the
4960 // result register contain a valid pointer because it is already
4961 // contained in the register pointer map.
4962 __ li(result, Operand::Zero());
4964 PushSafepointRegistersScope scope(this);
4965 __ SmiTag(char_code);
4967 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4968 __ StoreToSafepointRegisterSlot(r3, result);
4972 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4973 LOperand* input = instr->value();
4974 DCHECK(input->IsRegister() || input->IsStackSlot());
4975 LOperand* output = instr->result();
4976 DCHECK(output->IsDoubleRegister());
4977 if (input->IsStackSlot()) {
4978 Register scratch = scratch0();
4979 __ LoadP(scratch, ToMemOperand(input));
4980 __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
4982 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
4987 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4988 LOperand* input = instr->value();
4989 LOperand* output = instr->result();
4990 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
4994 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4995 class DeferredNumberTagI final : public LDeferredCode {
4997 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4998 : LDeferredCode(codegen), instr_(instr) {}
4999 void Generate() override {
5000 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
5001 instr_->temp2(), SIGNED_INT32);
5003 LInstruction* instr() override { return instr_; }
5006 LNumberTagI* instr_;
5009 Register src = ToRegister(instr->value());
5010 Register dst = ToRegister(instr->result());
5012 DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
5013 #if V8_TARGET_ARCH_PPC64
5014 __ SmiTag(dst, src);
5016 __ SmiTagCheckOverflow(dst, src, r0);
5017 __ BranchOnOverflow(deferred->entry());
5019 __ bind(deferred->exit());
5023 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
5024 class DeferredNumberTagU final : public LDeferredCode {
5026 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
5027 : LDeferredCode(codegen), instr_(instr) {}
5028 void Generate() override {
5029 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
5030 instr_->temp2(), UNSIGNED_INT32);
5032 LInstruction* instr() override { return instr_; }
5035 LNumberTagU* instr_;
5038 Register input = ToRegister(instr->value());
5039 Register result = ToRegister(instr->result());
5041 DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
5042 __ Cmpli(input, Operand(Smi::kMaxValue), r0);
5043 __ bgt(deferred->entry());
5044 __ SmiTag(result, input);
5045 __ bind(deferred->exit());
5049 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
5050 LOperand* temp1, LOperand* temp2,
5051 IntegerSignedness signedness) {
5053 Register src = ToRegister(value);
5054 Register dst = ToRegister(instr->result());
5055 Register tmp1 = scratch0();
5056 Register tmp2 = ToRegister(temp1);
5057 Register tmp3 = ToRegister(temp2);
5058 DoubleRegister dbl_scratch = double_scratch0();
5060 if (signedness == SIGNED_INT32) {
5061 // There was overflow, so bits 30 and 31 of the original integer
5062 // disagree. Try to allocate a heap number in new space and store
5063 // the value in there. If that fails, call the runtime system.
5065 __ SmiUntag(src, dst);
5066 __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
5068 __ ConvertIntToDouble(src, dbl_scratch);
5070 __ ConvertUnsignedIntToDouble(src, dbl_scratch);
5073 if (FLAG_inline_new) {
5074 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
5075 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
5079 // Slow case: Call the runtime system to do the number allocation.
5082 // TODO(3095996): Put a valid pointer value in the stack slot where the
5083 // result register is stored, as this register is in the pointer map, but
5084 // contains an integer value.
5085 __ li(dst, Operand::Zero());
5087 // Preserve the value of all registers.
5088 PushSafepointRegistersScope scope(this);
5090 // NumberTagI and NumberTagD use the context from the frame, rather than
5091 // the environment's HContext or HInlinedContext value.
5092 // They only call Runtime::kAllocateHeapNumber.
5093 // The corresponding HChange instructions are added in a phase that does
5094 // not have easy access to the local context.
5095 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
5096 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5097 RecordSafepointWithRegisters(instr->pointer_map(), 0,
5098 Safepoint::kNoLazyDeopt);
5099 __ StoreToSafepointRegisterSlot(r3, dst);
5102 // Done. Put the value in dbl_scratch into the value of the allocated heap
5105 __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
5109 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5110 class DeferredNumberTagD final : public LDeferredCode {
5112 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
5113 : LDeferredCode(codegen), instr_(instr) {}
5114 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
5115 LInstruction* instr() override { return instr_; }
5118 LNumberTagD* instr_;
5121 DoubleRegister input_reg = ToDoubleRegister(instr->value());
5122 Register scratch = scratch0();
5123 Register reg = ToRegister(instr->result());
5124 Register temp1 = ToRegister(instr->temp());
5125 Register temp2 = ToRegister(instr->temp2());
5127 DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
5128 if (FLAG_inline_new) {
5129 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
5130 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
5132 __ b(deferred->entry());
5134 __ bind(deferred->exit());
5135 __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
5139 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5140 // TODO(3095996): Get rid of this. For now, we need to make the
5141 // result register contain a valid pointer because it is already
5142 // contained in the register pointer map.
5143 Register reg = ToRegister(instr->result());
5144 __ li(reg, Operand::Zero());
5146 PushSafepointRegistersScope scope(this);
5147 // NumberTagI and NumberTagD use the context from the frame, rather than
5148 // the environment's HContext or HInlinedContext value.
5149 // They only call Runtime::kAllocateHeapNumber.
5150 // The corresponding HChange instructions are added in a phase that does
5151 // not have easy access to the local context.
5152 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
5153 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5154 RecordSafepointWithRegisters(instr->pointer_map(), 0,
5155 Safepoint::kNoLazyDeopt);
5156 __ StoreToSafepointRegisterSlot(r3, reg);
5160 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5161 HChange* hchange = instr->hydrogen();
5162 Register input = ToRegister(instr->value());
5163 Register output = ToRegister(instr->result());
5164 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5165 hchange->value()->CheckFlag(HValue::kUint32)) {
5166 __ TestUnsignedSmiCandidate(input, r0);
5167 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
5169 #if !V8_TARGET_ARCH_PPC64
5170 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5171 !hchange->value()->CheckFlag(HValue::kUint32)) {
5172 __ SmiTagCheckOverflow(output, input, r0);
5173 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
5176 __ SmiTag(output, input);
5177 #if !V8_TARGET_ARCH_PPC64
5183 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5184 Register scratch = scratch0();
5185 Register input = ToRegister(instr->value());
5186 Register result = ToRegister(instr->result());
5187 if (instr->needs_check()) {
5188 // If the input is a HeapObject, value of scratch won't be zero.
5189 __ andi(scratch, input, Operand(kHeapObjectTag));
5190 __ SmiUntag(result, input);
5191 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
5193 __ SmiUntag(result, input);
5198 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
5199 DoubleRegister result_reg,
5200 NumberUntagDMode mode) {
5201 bool can_convert_undefined_to_nan =
5202 instr->hydrogen()->can_convert_undefined_to_nan();
5203 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
5205 Register scratch = scratch0();
5206 DCHECK(!result_reg.is(double_scratch0()));
5208 Label convert, load_smi, done;
5210 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5212 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
5214 // Heap number map check.
5215 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5216 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5217 __ cmp(scratch, ip);
5218 if (can_convert_undefined_to_nan) {
5221 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
5224 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5225 if (deoptimize_on_minus_zero) {
5226 #if V8_TARGET_ARCH_PPC64
5227 __ MovDoubleToInt64(scratch, result_reg);
5228 // rotate left by one for simple compare.
5229 __ rldicl(scratch, scratch, 1, 0);
5230 __ cmpi(scratch, Operand(1));
5232 __ MovDoubleToInt64(scratch, ip, result_reg);
5233 __ cmpi(ip, Operand::Zero());
5235 __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
5237 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
5240 if (can_convert_undefined_to_nan) {
5242 // Convert undefined (and hole) to NaN.
5243 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5244 __ cmp(input_reg, ip);
5245 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5246 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
5247 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
5251 __ SmiUntag(scratch, input_reg);
5252 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
5254 // Smi to double register conversion
5256 // scratch: untagged value of input_reg
5257 __ ConvertIntToDouble(scratch, result_reg);
5262 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
5263 Register input_reg = ToRegister(instr->value());
5264 Register scratch1 = scratch0();
5265 Register scratch2 = ToRegister(instr->temp());
5266 DoubleRegister double_scratch = double_scratch0();
5267 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
5269 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
5270 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
5274 // Heap number map check.
5275 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5276 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5277 __ cmp(scratch1, ip);
5279 if (instr->truncating()) {
5280 // Performs a truncating conversion of a floating point number as used by
5281 // the JS bitwise operations.
5282 Label no_heap_number, check_bools, check_false;
5283 __ bne(&no_heap_number);
5284 __ mr(scratch2, input_reg);
5285 __ TruncateHeapNumberToI(input_reg, scratch2);
5288 // Check for Oddballs. Undefined/False is converted to zero and True to one
5289 // for truncating conversions.
5290 __ bind(&no_heap_number);
5291 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5292 __ cmp(input_reg, ip);
5293 __ bne(&check_bools);
5294 __ li(input_reg, Operand::Zero());
5297 __ bind(&check_bools);
5298 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5299 __ cmp(input_reg, ip);
5300 __ bne(&check_false);
5301 __ li(input_reg, Operand(1));
5304 __ bind(&check_false);
5305 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5306 __ cmp(input_reg, ip);
5307 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5308 __ li(input_reg, Operand::Zero());
5310 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
5312 __ lfd(double_scratch2,
5313 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5314 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5315 // preserve heap number pointer in scratch2 for minus zero check below
5316 __ mr(scratch2, input_reg);
5318 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
5320 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5322 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5323 __ cmpi(input_reg, Operand::Zero());
5326 FieldMemOperand(scratch2, HeapNumber::kValueOffset +
5327 Register::kExponentOffset));
5328 __ cmpwi(scratch1, Operand::Zero());
5329 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5336 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5337 class DeferredTaggedToI final : public LDeferredCode {
5339 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5340 : LDeferredCode(codegen), instr_(instr) {}
5341 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
5342 LInstruction* instr() override { return instr_; }
5348 LOperand* input = instr->value();
5349 DCHECK(input->IsRegister());
5350 DCHECK(input->Equals(instr->result()));
5352 Register input_reg = ToRegister(input);
5354 if (instr->hydrogen()->value()->representation().IsSmi()) {
5355 __ SmiUntag(input_reg);
5357 DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
5359 // Branch to deferred code if the input is a HeapObject.
5360 __ JumpIfNotSmi(input_reg, deferred->entry());
5362 __ SmiUntag(input_reg);
5363 __ bind(deferred->exit());
5368 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5369 LOperand* input = instr->value();
5370 DCHECK(input->IsRegister());
5371 LOperand* result = instr->result();
5372 DCHECK(result->IsDoubleRegister());
5374 Register input_reg = ToRegister(input);
5375 DoubleRegister result_reg = ToDoubleRegister(result);
5377 HValue* value = instr->hydrogen()->value();
5378 NumberUntagDMode mode = value->representation().IsSmi()
5379 ? NUMBER_CANDIDATE_IS_SMI
5380 : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5382 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5386 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5387 Register result_reg = ToRegister(instr->result());
5388 Register scratch1 = scratch0();
5389 DoubleRegister double_input = ToDoubleRegister(instr->value());
5390 DoubleRegister double_scratch = double_scratch0();
5392 if (instr->truncating()) {
5393 __ TruncateDoubleToI(result_reg, double_input);
5395 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5397 // Deoptimize if the input wasn't a int32 (inside a double).
5398 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5399 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5401 __ cmpi(result_reg, Operand::Zero());
5403 #if V8_TARGET_ARCH_PPC64
5404 __ MovDoubleToInt64(scratch1, double_input);
5406 __ MovDoubleHighToInt(scratch1, double_input);
5408 __ cmpi(scratch1, Operand::Zero());
5409 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5416 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5417 Register result_reg = ToRegister(instr->result());
5418 Register scratch1 = scratch0();
5419 DoubleRegister double_input = ToDoubleRegister(instr->value());
5420 DoubleRegister double_scratch = double_scratch0();
5422 if (instr->truncating()) {
5423 __ TruncateDoubleToI(result_reg, double_input);
5425 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5427 // Deoptimize if the input wasn't a int32 (inside a double).
5428 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5429 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5431 __ cmpi(result_reg, Operand::Zero());
5433 #if V8_TARGET_ARCH_PPC64
5434 __ MovDoubleToInt64(scratch1, double_input);
5436 __ MovDoubleHighToInt(scratch1, double_input);
5438 __ cmpi(scratch1, Operand::Zero());
5439 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5443 #if V8_TARGET_ARCH_PPC64
5444 __ SmiTag(result_reg);
5446 __ SmiTagCheckOverflow(result_reg, r0);
5447 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
5452 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5453 LOperand* input = instr->value();
5454 __ TestIfSmi(ToRegister(input), r0);
5455 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
5459 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5460 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5461 LOperand* input = instr->value();
5462 __ TestIfSmi(ToRegister(input), r0);
5463 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
5468 void LCodeGen::DoCheckArrayBufferNotNeutered(
5469 LCheckArrayBufferNotNeutered* instr) {
5470 Register view = ToRegister(instr->view());
5471 Register scratch = scratch0();
5473 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5474 __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5475 __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
5476 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
5480 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5481 Register input = ToRegister(instr->value());
5482 Register scratch = scratch0();
5484 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5485 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5487 if (instr->hydrogen()->is_interval_check()) {
5490 instr->hydrogen()->GetCheckInterval(&first, &last);
5492 __ cmpli(scratch, Operand(first));
5494 // If there is only one type in the interval check for equality.
5495 if (first == last) {
5496 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5498 DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
5499 // Omit check for the last type.
5500 if (last != LAST_TYPE) {
5501 __ cmpli(scratch, Operand(last));
5502 DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
5508 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5510 if (base::bits::IsPowerOfTwo32(mask)) {
5511 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5512 __ andi(r0, scratch, Operand(mask));
5513 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
5516 __ andi(scratch, scratch, Operand(mask));
5517 __ cmpi(scratch, Operand(tag));
5518 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5524 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5525 Register reg = ToRegister(instr->value());
5526 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5527 AllowDeferredHandleDereference smi_check;
5528 if (isolate()->heap()->InNewSpace(*object)) {
5529 Register reg = ToRegister(instr->value());
5530 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5531 __ mov(ip, Operand(cell));
5532 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
5535 __ Cmpi(reg, Operand(object), r0);
5537 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
5541 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5542 Register temp = ToRegister(instr->temp());
5544 PushSafepointRegistersScope scope(this);
5546 __ li(cp, Operand::Zero());
5547 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5548 RecordSafepointWithRegisters(instr->pointer_map(), 1,
5549 Safepoint::kNoLazyDeopt);
5550 __ StoreToSafepointRegisterSlot(r3, temp);
5552 __ TestIfSmi(temp, r0);
5553 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
5557 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5558 class DeferredCheckMaps final : public LDeferredCode {
5560 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5561 : LDeferredCode(codegen), instr_(instr), object_(object) {
5562 SetExit(check_maps());
5564 void Generate() override {
5565 codegen()->DoDeferredInstanceMigration(instr_, object_);
5567 Label* check_maps() { return &check_maps_; }
5568 LInstruction* instr() override { return instr_; }
5576 if (instr->hydrogen()->IsStabilityCheck()) {
5577 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5578 for (int i = 0; i < maps->size(); ++i) {
5579 AddStabilityDependency(maps->at(i).handle());
5584 Register object = ToRegister(instr->value());
5585 Register map_reg = ToRegister(instr->temp());
5587 __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
5589 DeferredCheckMaps* deferred = NULL;
5590 if (instr->hydrogen()->HasMigrationTarget()) {
5591 deferred = new (zone()) DeferredCheckMaps(this, instr, object);
5592 __ bind(deferred->check_maps());
5595 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5597 for (int i = 0; i < maps->size() - 1; i++) {
5598 Handle<Map> map = maps->at(i).handle();
5599 __ CompareMap(map_reg, map, &success);
5603 Handle<Map> map = maps->at(maps->size() - 1).handle();
5604 __ CompareMap(map_reg, map, &success);
5605 if (instr->hydrogen()->HasMigrationTarget()) {
5606 __ bne(deferred->entry());
5608 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5615 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5616 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5617 Register result_reg = ToRegister(instr->result());
5618 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5622 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5623 Register unclamped_reg = ToRegister(instr->unclamped());
5624 Register result_reg = ToRegister(instr->result());
5625 __ ClampUint8(result_reg, unclamped_reg);
5629 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5630 Register scratch = scratch0();
5631 Register input_reg = ToRegister(instr->unclamped());
5632 Register result_reg = ToRegister(instr->result());
5633 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5634 Label is_smi, done, heap_number;
5636 // Both smi and heap number cases are handled.
5637 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5639 // Check for heap number
5640 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5641 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
5642 __ beq(&heap_number);
5644 // Check for undefined. Undefined is converted to zero for clamping
5646 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
5647 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5648 __ li(result_reg, Operand::Zero());
5652 __ bind(&heap_number);
5653 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5654 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5659 __ ClampUint8(result_reg, result_reg);
5665 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5666 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5667 Register result_reg = ToRegister(instr->result());
5669 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5670 __ MovDoubleHighToInt(result_reg, value_reg);
5672 __ MovDoubleLowToInt(result_reg, value_reg);
5677 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5678 Register hi_reg = ToRegister(instr->hi());
5679 Register lo_reg = ToRegister(instr->lo());
5680 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5681 #if V8_TARGET_ARCH_PPC64
5682 __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
5684 __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
5689 void LCodeGen::DoAllocate(LAllocate* instr) {
5690 class DeferredAllocate final : public LDeferredCode {
5692 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5693 : LDeferredCode(codegen), instr_(instr) {}
5694 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5695 LInstruction* instr() override { return instr_; }
5701 DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
5703 Register result = ToRegister(instr->result());
5704 Register scratch = ToRegister(instr->temp1());
5705 Register scratch2 = ToRegister(instr->temp2());
5707 // Allocate memory for the object.
5708 AllocationFlags flags = TAG_OBJECT;
5709 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5710 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5712 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5713 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5714 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5717 if (instr->size()->IsConstantOperand()) {
5718 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5719 if (size <= Page::kMaxRegularHeapObjectSize) {
5720 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5722 __ b(deferred->entry());
5725 Register size = ToRegister(instr->size());
5726 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5729 __ bind(deferred->exit());
5731 if (instr->hydrogen()->MustPrefillWithFiller()) {
5732 if (instr->size()->IsConstantOperand()) {
5733 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5734 __ LoadIntLiteral(scratch, size - kHeapObjectTag);
5736 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5738 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5741 __ subi(scratch, scratch, Operand(kPointerSize));
5742 __ StorePX(scratch2, MemOperand(result, scratch));
5743 __ cmpi(scratch, Operand::Zero());
5749 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5750 Register result = ToRegister(instr->result());
5752 // TODO(3095996): Get rid of this. For now, we need to make the
5753 // result register contain a valid pointer because it is already
5754 // contained in the register pointer map.
5755 __ LoadSmiLiteral(result, Smi::FromInt(0));
5757 PushSafepointRegistersScope scope(this);
5758 if (instr->size()->IsRegister()) {
5759 Register size = ToRegister(instr->size());
5760 DCHECK(!size.is(result));
5764 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5765 #if !V8_TARGET_ARCH_PPC64
5766 if (size >= 0 && size <= Smi::kMaxValue) {
5768 __ Push(Smi::FromInt(size));
5769 #if !V8_TARGET_ARCH_PPC64
5771 // We should never get here at runtime => abort
5772 __ stop("invalid allocation size");
5778 int flags = AllocateDoubleAlignFlag::encode(
5779 instr->hydrogen()->MustAllocateDoubleAligned());
5780 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5781 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5782 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5784 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5786 __ Push(Smi::FromInt(flags));
5788 CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
5790 __ StoreToSafepointRegisterSlot(r3, result);
5794 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5795 DCHECK(ToRegister(instr->value()).is(r3));
5797 CallRuntime(Runtime::kToFastProperties, 1, instr);
5801 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5802 DCHECK(ToRegister(instr->context()).is(cp));
5804 // Registers will be used as follows:
5805 // r10 = literals array.
5806 // r4 = regexp literal.
5807 // r3 = regexp literal clone.
5808 // r5 and r7-r9 are used as temporaries.
5809 int literal_offset =
5810 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5811 __ Move(r10, instr->hydrogen()->literals());
5812 __ LoadP(r4, FieldMemOperand(r10, literal_offset));
5813 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5815 __ bne(&materialized);
5817 // Create regexp literal using runtime function
5818 // Result will be in r3.
5819 __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index()));
5820 __ mov(r8, Operand(instr->hydrogen()->pattern()));
5821 __ mov(r7, Operand(instr->hydrogen()->flags()));
5822 __ Push(r10, r9, r8, r7);
5823 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5826 __ bind(&materialized);
5827 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5828 Label allocated, runtime_allocate;
5830 __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
5833 __ bind(&runtime_allocate);
5834 __ LoadSmiLiteral(r3, Smi::FromInt(size));
5836 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5839 __ bind(&allocated);
5840 // Copy the content into the newly allocated memory.
5841 __ CopyFields(r3, r4, r5.bit(), size / kPointerSize);
5845 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5846 DCHECK(ToRegister(instr->context()).is(cp));
5847 // Use the fast case closure allocation code that allocates in new
5848 // space for nested functions that don't need literals cloning.
5849 bool pretenure = instr->hydrogen()->pretenure();
5850 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5851 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
5852 instr->hydrogen()->kind());
5853 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5854 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5856 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5857 __ mov(r4, Operand(pretenure ? factory()->true_value()
5858 : factory()->false_value()));
5859 __ Push(cp, r5, r4);
5860 CallRuntime(Runtime::kNewClosure, 3, instr);
5865 void LCodeGen::DoTypeof(LTypeof* instr) {
5866 DCHECK(ToRegister(instr->value()).is(r6));
5867 DCHECK(ToRegister(instr->result()).is(r3));
5869 Register value_register = ToRegister(instr->value());
5870 __ JumpIfNotSmi(value_register, &do_call);
5871 __ mov(r3, Operand(isolate()->factory()->number_string()));
5874 TypeofStub stub(isolate());
5875 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5880 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5881 Register input = ToRegister(instr->value());
5883 Condition final_branch_condition =
5884 EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
5885 instr->type_literal());
5886 if (final_branch_condition != kNoCondition) {
5887 EmitBranch(instr, final_branch_condition);
5892 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
5893 Register input, Handle<String> type_name) {
5894 Condition final_branch_condition = kNoCondition;
5895 Register scratch = scratch0();
5896 Factory* factory = isolate()->factory();
5897 if (String::Equals(type_name, factory->number_string())) {
5898 __ JumpIfSmi(input, true_label);
5899 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5900 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5901 final_branch_condition = eq;
5903 } else if (String::Equals(type_name, factory->string_string())) {
5904 __ JumpIfSmi(input, false_label);
5905 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5906 __ bge(false_label);
5907 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5908 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5909 __ cmpi(r0, Operand::Zero());
5910 final_branch_condition = eq;
5912 } else if (String::Equals(type_name, factory->symbol_string())) {
5913 __ JumpIfSmi(input, false_label);
5914 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5915 final_branch_condition = eq;
5917 } else if (String::Equals(type_name, factory->boolean_string())) {
5918 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5920 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5921 final_branch_condition = eq;
5923 } else if (String::Equals(type_name, factory->undefined_string())) {
5924 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5926 __ JumpIfSmi(input, false_label);
5927 // Check for undetectable objects => true.
5928 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5929 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5930 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5931 __ cmpi(r0, Operand::Zero());
5932 final_branch_condition = ne;
5934 } else if (String::Equals(type_name, factory->function_string())) {
5935 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5936 Register type_reg = scratch;
5937 __ JumpIfSmi(input, false_label);
5938 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5940 __ cmpi(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5941 final_branch_condition = eq;
5943 } else if (String::Equals(type_name, factory->object_string())) {
5944 Register map = scratch;
5945 __ JumpIfSmi(input, false_label);
5946 __ CompareRoot(input, Heap::kNullValueRootIndex);
5948 __ CheckObjectTypeRange(input, map, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5949 LAST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label);
5950 // Check for undetectable objects => false.
5951 __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5952 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5953 __ cmpi(r0, Operand::Zero());
5954 final_branch_condition = eq;
5956 } else if (String::Equals(type_name, factory->float32x4_string())) {
5957 __ JumpIfSmi(input, false_label);
5958 __ CompareObjectType(input, scratch, no_reg, FLOAT32X4_TYPE);
5959 final_branch_condition = eq;
5961 } else if (String::Equals(type_name, factory->int32x4_string())) {
5962 __ JumpIfSmi(input, false_label);
5963 __ CompareObjectType(input, scratch, no_reg, INT32X4_TYPE);
5964 final_branch_condition = eq;
5966 } else if (String::Equals(type_name, factory->bool32x4_string())) {
5967 __ JumpIfSmi(input, false_label);
5968 __ CompareObjectType(input, scratch, no_reg, BOOL32X4_TYPE);
5969 final_branch_condition = eq;
5971 } else if (String::Equals(type_name, factory->int16x8_string())) {
5972 __ JumpIfSmi(input, false_label);
5973 __ CompareObjectType(input, scratch, no_reg, INT16X8_TYPE);
5974 final_branch_condition = eq;
5976 } else if (String::Equals(type_name, factory->bool16x8_string())) {
5977 __ JumpIfSmi(input, false_label);
5978 __ CompareObjectType(input, scratch, no_reg, BOOL16X8_TYPE);
5979 final_branch_condition = eq;
5981 } else if (String::Equals(type_name, factory->int8x16_string())) {
5982 __ JumpIfSmi(input, false_label);
5983 __ CompareObjectType(input, scratch, no_reg, INT8X16_TYPE);
5984 final_branch_condition = eq;
5986 } else if (String::Equals(type_name, factory->bool8x16_string())) {
5987 __ JumpIfSmi(input, false_label);
5988 __ CompareObjectType(input, scratch, no_reg, BOOL8X16_TYPE);
5989 final_branch_condition = eq;
5995 return final_branch_condition;
5999 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6000 Register temp1 = ToRegister(instr->temp());
6002 EmitIsConstructCall(temp1, scratch0());
6003 EmitBranch(instr, eq);
6007 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
6008 DCHECK(!temp1.is(temp2));
6009 // Get the frame pointer for the calling frame.
6010 __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6012 // Skip the arguments adaptor frame if it exists.
6013 Label check_frame_marker;
6014 __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
6015 __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
6016 __ bne(&check_frame_marker);
6017 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
6019 // Check the marker in the calling frame.
6020 __ bind(&check_frame_marker);
6021 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
6022 __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0);
6026 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6027 if (!info()->IsStub()) {
6028 // Ensure that we have enough space after the previous lazy-bailout
6029 // instruction for patching the code here.
6030 int current_pc = masm()->pc_offset();
6031 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6032 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6033 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
6034 while (padding_size > 0) {
6036 padding_size -= Assembler::kInstrSize;
6040 last_lazy_deopt_pc_ = masm()->pc_offset();
6044 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6045 last_lazy_deopt_pc_ = masm()->pc_offset();
6046 DCHECK(instr->HasEnvironment());
6047 LEnvironment* env = instr->environment();
6048 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6049 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6053 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6054 Deoptimizer::BailoutType type = instr->hydrogen()->type();
6055 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6056 // needed return address), even though the implementation of LAZY and EAGER is
6057 // now identical. When LAZY is eventually completely folded into EAGER, remove
6058 // the special case below.
6059 if (info()->IsStub() && type == Deoptimizer::EAGER) {
6060 type = Deoptimizer::LAZY;
6063 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
6067 void LCodeGen::DoDummy(LDummy* instr) {
6068 // Nothing to see here, move on!
6072 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6073 // Nothing to see here, move on!
6077 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6078 PushSafepointRegistersScope scope(this);
6079 LoadContextFromDeferred(instr->context());
6080 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
6081 RecordSafepointWithLazyDeopt(
6082 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
6083 DCHECK(instr->HasEnvironment());
6084 LEnvironment* env = instr->environment();
6085 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6089 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6090 class DeferredStackCheck final : public LDeferredCode {
6092 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
6093 : LDeferredCode(codegen), instr_(instr) {}
6094 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
6095 LInstruction* instr() override { return instr_; }
6098 LStackCheck* instr_;
6101 DCHECK(instr->HasEnvironment());
6102 LEnvironment* env = instr->environment();
6103 // There is no LLazyBailout instruction for stack-checks. We have to
6104 // prepare for lazy deoptimization explicitly here.
6105 if (instr->hydrogen()->is_function_entry()) {
6106 // Perform stack overflow check.
6108 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
6111 DCHECK(instr->context()->IsRegister());
6112 DCHECK(ToRegister(instr->context()).is(cp));
6113 CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
6117 DCHECK(instr->hydrogen()->is_backwards_branch());
6118 // Perform stack overflow check if this goto needs it before jumping.
6119 DeferredStackCheck* deferred_stack_check =
6120 new (zone()) DeferredStackCheck(this, instr);
6121 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
6123 __ blt(deferred_stack_check->entry());
6124 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6125 __ bind(instr->done_label());
6126 deferred_stack_check->SetExit(instr->done_label());
6127 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6128 // Don't record a deoptimization index for the safepoint here.
6129 // This will be done explicitly when emitting call and the safepoint in
6130 // the deferred code.
6135 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6136 // This is a pseudo-instruction that ensures that the environment here is
6137 // properly registered for deoptimization and records the assembler's PC
6139 LEnvironment* environment = instr->environment();
6141 // If the environment were already registered, we would have no way of
6142 // backpatching it with the spill slot operands.
6143 DCHECK(!environment->HasBeenRegistered());
6144 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6146 GenerateOsrPrologue();
6150 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6151 __ TestIfSmi(r3, r0);
6152 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
6154 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6155 __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
6156 DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
6158 Label use_cache, call_runtime;
6159 Register null_value = r8;
6160 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
6161 __ CheckEnumCache(null_value, &call_runtime);
6163 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
6166 // Get the set of properties to enumerate.
6167 __ bind(&call_runtime);
6169 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6171 __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
6172 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
6174 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
6175 __ bind(&use_cache);
6179 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6180 Register map = ToRegister(instr->map());
6181 Register result = ToRegister(instr->result());
6182 Label load_cache, done;
6183 __ EnumLength(result, map);
6184 __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
6185 __ bne(&load_cache);
6186 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
6189 __ bind(&load_cache);
6190 __ LoadInstanceDescriptors(map, result);
6191 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
6192 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
6193 __ cmpi(result, Operand::Zero());
6194 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
6200 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6201 Register object = ToRegister(instr->value());
6202 Register map = ToRegister(instr->map());
6203 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
6204 __ cmp(map, scratch0());
6205 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
6209 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
6210 Register result, Register object,
6212 PushSafepointRegistersScope scope(this);
6213 __ Push(object, index);
6214 __ li(cp, Operand::Zero());
6215 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
6216 RecordSafepointWithRegisters(instr->pointer_map(), 2,
6217 Safepoint::kNoLazyDeopt);
6218 __ StoreToSafepointRegisterSlot(r3, result);
6222 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6223 class DeferredLoadMutableDouble final : public LDeferredCode {
6225 DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
6226 Register result, Register object, Register index)
6227 : LDeferredCode(codegen),
6232 void Generate() override {
6233 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
6235 LInstruction* instr() override { return instr_; }
6238 LLoadFieldByIndex* instr_;
6244 Register object = ToRegister(instr->object());
6245 Register index = ToRegister(instr->index());
6246 Register result = ToRegister(instr->result());
6247 Register scratch = scratch0();
6249 DeferredLoadMutableDouble* deferred;
6250 deferred = new (zone())
6251 DeferredLoadMutableDouble(this, instr, result, object, index);
6253 Label out_of_object, done;
6255 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
6256 __ bne(deferred->entry(), cr0);
6257 __ ShiftRightArithImm(index, index, 1);
6259 __ cmpi(index, Operand::Zero());
6260 __ blt(&out_of_object);
6262 __ SmiToPtrArrayOffset(r0, index);
6263 __ add(scratch, object, r0);
6264 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
6268 __ bind(&out_of_object);
6269 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6270 // Index is equal to negated out of object property index plus 1.
6271 __ SmiToPtrArrayOffset(r0, index);
6272 __ sub(scratch, result, r0);
6274 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
6275 __ bind(deferred->exit());
6280 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6281 Register context = ToRegister(instr->context());
6282 __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6286 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6287 Handle<ScopeInfo> scope_info = instr->scope_info();
6288 __ Push(scope_info);
6289 __ push(ToRegister(instr->function()));
6290 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6291 RecordSafepoint(Safepoint::kNoLazyDeopt);
6296 } // namespace internal