1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/bits.h"
8 #include "src/code-factory.h"
9 #include "src/code-stubs.h"
10 #include "src/cpu-profiler.h"
11 #include "src/hydrogen-osr.h"
12 #include "src/ic/ic.h"
13 #include "src/ic/stub-cache.h"
14 #include "src/ppc/lithium-codegen-ppc.h"
15 #include "src/ppc/lithium-gap-resolver-ppc.h"
21 class SafepointGenerator final : public CallWrapper {
23 SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
24 Safepoint::DeoptMode mode)
25 : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
26 virtual ~SafepointGenerator() {}
28 void BeforeCall(int call_size) const override {}
30 void AfterCall() const override {
31 codegen_->RecordSafepoint(pointers_, deopt_mode_);
36 LPointerMap* pointers_;
37 Safepoint::DeoptMode deopt_mode_;
43 bool LCodeGen::GenerateCode() {
44 LPhase phase("Z_Code generation", chunk());
48 // Open a frame scope to indicate that there is a frame on the stack. The
49 // NONE indicates that the scope shouldn't actually generate code to set up
50 // the frame (that is done in GeneratePrologue).
51 FrameScope frame_scope(masm_, StackFrame::NONE);
53 bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
54 GenerateJumpTable() && GenerateSafepointTable();
55 if (FLAG_enable_embedded_constant_pool && !rc) {
56 masm()->AbortConstantPoolBuilding();
62 void LCodeGen::FinishCode(Handle<Code> code) {
64 code->set_stack_slots(GetStackSlotCount());
65 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
66 PopulateDeoptimizationData(code);
70 void LCodeGen::SaveCallerDoubles() {
71 DCHECK(info()->saves_caller_doubles());
72 DCHECK(NeedsEagerFrame());
73 Comment(";;; Save clobbered callee double registers");
75 BitVector* doubles = chunk()->allocated_double_registers();
76 BitVector::Iterator save_iterator(doubles);
77 while (!save_iterator.Done()) {
78 __ stfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
79 MemOperand(sp, count * kDoubleSize));
80 save_iterator.Advance();
86 void LCodeGen::RestoreCallerDoubles() {
87 DCHECK(info()->saves_caller_doubles());
88 DCHECK(NeedsEagerFrame());
89 Comment(";;; Restore clobbered callee double registers");
90 BitVector* doubles = chunk()->allocated_double_registers();
91 BitVector::Iterator save_iterator(doubles);
93 while (!save_iterator.Done()) {
94 __ lfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
95 MemOperand(sp, count * kDoubleSize));
96 save_iterator.Advance();
102 bool LCodeGen::GeneratePrologue() {
103 DCHECK(is_generating());
105 if (info()->IsOptimizing()) {
106 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
109 if (strlen(FLAG_stop_at) > 0 &&
110 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
115 // r4: Callee's JS function.
116 // cp: Callee's context.
117 // pp: Callee's constant pool pointer (if enabled)
118 // fp: Caller's frame pointer.
120 // ip: Our own function entry (required by the prologue)
122 // Sloppy mode functions and builtins need to replace the receiver with the
123 // global proxy when called as functions (without an explicit receiver
125 if (is_sloppy(info_->language_mode()) && info_->MayUseThis() &&
126 !info_->is_native() && info_->scope()->has_this_declaration()) {
128 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
129 __ LoadP(r5, MemOperand(sp, receiver_offset));
130 __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
133 __ LoadP(r5, GlobalObjectOperand());
134 __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
136 __ StoreP(r5, MemOperand(sp, receiver_offset));
142 int prologue_offset = masm_->pc_offset();
144 if (prologue_offset) {
145 // Prologue logic requires it's starting address in ip and the
146 // corresponding offset from the function entry.
147 prologue_offset += Instruction::kInstrSize;
148 __ addi(ip, ip, Operand(prologue_offset));
150 info()->set_prologue_offset(prologue_offset);
151 if (NeedsEagerFrame()) {
152 if (info()->IsStub()) {
153 __ StubPrologue(prologue_offset);
155 __ Prologue(info()->IsCodePreAgingActive(), prologue_offset);
157 frame_is_built_ = true;
158 info_->AddNoFrameRange(0, masm_->pc_offset());
161 // Reserve space for the stack slots needed by the code.
162 int slots = GetStackSlotCount();
164 __ subi(sp, sp, Operand(slots * kPointerSize));
165 if (FLAG_debug_code) {
167 __ li(r0, Operand(slots));
169 __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
170 __ mov(r4, Operand(kSlotsZapValue));
173 __ StorePU(r4, MemOperand(r3, -kPointerSize));
179 if (info()->saves_caller_doubles()) {
183 // Possibly allocate a local context.
184 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
185 if (heap_slots > 0) {
186 Comment(";;; Allocate local context");
187 bool need_write_barrier = true;
188 // Argument to NewContext is the function, which is in r4.
189 DCHECK(!info()->scope()->is_script_scope());
190 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
191 FastNewContextStub stub(isolate(), heap_slots);
193 // Result of FastNewContextStub is always in new space.
194 need_write_barrier = false;
197 __ CallRuntime(Runtime::kNewFunctionContext, 1);
199 RecordSafepoint(Safepoint::kNoLazyDeopt);
200 // Context is returned in both r3 and cp. It replaces the context
201 // passed to us. It's saved in the stack and kept live in cp.
203 __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
204 // Copy any necessary parameters into the context.
205 int num_parameters = scope()->num_parameters();
206 int first_parameter = scope()->has_this_declaration() ? -1 : 0;
207 for (int i = first_parameter; i < num_parameters; i++) {
208 Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
209 if (var->IsContextSlot()) {
210 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
211 (num_parameters - 1 - i) * kPointerSize;
212 // Load parameter from stack.
213 __ LoadP(r3, MemOperand(fp, parameter_offset));
214 // Store it in the context.
215 MemOperand target = ContextOperand(cp, var->index());
216 __ StoreP(r3, target, r0);
217 // Update the write barrier. This clobbers r6 and r3.
218 if (need_write_barrier) {
219 __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
220 GetLinkRegisterState(), kSaveFPRegs);
221 } else if (FLAG_debug_code) {
223 __ JumpIfInNewSpace(cp, r3, &done);
224 __ Abort(kExpectedNewSpaceObject);
229 Comment(";;; End allocate local context");
233 if (FLAG_trace && info()->IsOptimizing()) {
234 // We have not executed any compiled code yet, so cp still holds the
236 __ CallRuntime(Runtime::kTraceEnter, 0);
238 return !is_aborted();
242 void LCodeGen::GenerateOsrPrologue() {
243 // Generate the OSR entry prologue at the first unknown OSR value, or if there
244 // are none, at the OSR entrypoint instruction.
245 if (osr_pc_offset_ >= 0) return;
247 osr_pc_offset_ = masm()->pc_offset();
249 // Adjust the frame size, subsuming the unoptimized frame into the
251 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
253 __ subi(sp, sp, Operand(slots * kPointerSize));
257 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
258 if (instr->IsCall()) {
259 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
261 if (!instr->IsLazyBailout() && !instr->IsGap()) {
262 safepoints_.BumpLastLazySafepointIndex();
267 bool LCodeGen::GenerateDeferredCode() {
268 DCHECK(is_generating());
269 if (deferred_.length() > 0) {
270 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
271 LDeferredCode* code = deferred_[i];
274 instructions_->at(code->instruction_index())->hydrogen_value();
275 RecordAndWritePosition(
276 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
280 "-------------------- Deferred %s --------------------",
281 code->instruction_index(), code->instr()->hydrogen_value()->id(),
282 code->instr()->Mnemonic());
283 __ bind(code->entry());
284 if (NeedsDeferredFrame()) {
285 Comment(";;; Build frame");
286 DCHECK(!frame_is_built_);
287 DCHECK(info()->IsStub());
288 frame_is_built_ = true;
289 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
290 __ PushFixedFrame(scratch0());
291 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
292 Comment(";;; Deferred code");
295 if (NeedsDeferredFrame()) {
296 Comment(";;; Destroy frame");
297 DCHECK(frame_is_built_);
298 __ PopFixedFrame(ip);
299 frame_is_built_ = false;
305 return !is_aborted();
309 bool LCodeGen::GenerateJumpTable() {
310 // Check that the jump table is accessible from everywhere in the function
311 // code, i.e. that offsets to the table can be encoded in the 24bit signed
312 // immediate of a branch instruction.
313 // To simplify we consider the code size from the first instruction to the
314 // end of the jump table. We also don't consider the pc load delta.
315 // Each entry in the jump table generates one instruction and inlines one
316 // 32bit data after it.
317 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
318 jump_table_.length() * 7)) {
319 Abort(kGeneratedCodeIsTooLarge);
322 if (jump_table_.length() > 0) {
323 Label needs_frame, call_deopt_entry;
325 Comment(";;; -------------------- Jump table --------------------");
326 Address base = jump_table_[0].address;
328 Register entry_offset = scratch0();
330 int length = jump_table_.length();
331 for (int i = 0; i < length; i++) {
332 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
333 __ bind(&table_entry->label);
335 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
336 Address entry = table_entry->address;
337 DeoptComment(table_entry->deopt_info);
339 // Second-level deopt table entries are contiguous and small, so instead
340 // of loading the full, absolute address of each one, load an immediate
341 // offset which will be added to the base address later.
342 __ mov(entry_offset, Operand(entry - base));
344 if (table_entry->needs_frame) {
345 DCHECK(!info()->saves_caller_doubles());
346 Comment(";;; call deopt with frame");
348 __ b(&needs_frame, SetLK);
350 __ b(&call_deopt_entry, SetLK);
352 info()->LogDeoptCallPosition(masm()->pc_offset(),
353 table_entry->deopt_info.inlining_id);
356 if (needs_frame.is_linked()) {
357 __ bind(&needs_frame);
358 // This variant of deopt can only be used with stubs. Since we don't
359 // have a function pointer to install in the stack frame that we're
360 // building, install a special marker there instead.
361 DCHECK(info()->IsStub());
362 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
364 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
367 Comment(";;; call deopt");
368 __ bind(&call_deopt_entry);
370 if (info()->saves_caller_doubles()) {
371 DCHECK(info()->IsStub());
372 RestoreCallerDoubles();
375 // Add the base address to the offset previously loaded in entry_offset.
376 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
377 __ add(ip, entry_offset, ip);
381 // The deoptimization jump table is the last part of the instruction
382 // sequence. Mark the generated code as done unless we bailed out.
383 if (!is_aborted()) status_ = DONE;
384 return !is_aborted();
388 bool LCodeGen::GenerateSafepointTable() {
390 safepoints_.Emit(masm(), GetStackSlotCount());
391 return !is_aborted();
395 Register LCodeGen::ToRegister(int index) const {
396 return Register::FromAllocationIndex(index);
400 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
401 return DoubleRegister::FromAllocationIndex(index);
405 Register LCodeGen::ToRegister(LOperand* op) const {
406 DCHECK(op->IsRegister());
407 return ToRegister(op->index());
411 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
412 if (op->IsRegister()) {
413 return ToRegister(op->index());
414 } else if (op->IsConstantOperand()) {
415 LConstantOperand* const_op = LConstantOperand::cast(op);
416 HConstant* constant = chunk_->LookupConstant(const_op);
417 Handle<Object> literal = constant->handle(isolate());
418 Representation r = chunk_->LookupLiteralRepresentation(const_op);
419 if (r.IsInteger32()) {
420 AllowDeferredHandleDereference get_number;
421 DCHECK(literal->IsNumber());
422 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
423 } else if (r.IsDouble()) {
424 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
426 DCHECK(r.IsSmiOrTagged());
427 __ Move(scratch, literal);
430 } else if (op->IsStackSlot()) {
431 __ LoadP(scratch, ToMemOperand(op));
439 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
441 DCHECK(IsInteger32(const_op));
442 HConstant* constant = chunk_->LookupConstant(const_op);
443 int32_t value = constant->Integer32Value();
444 if (IsSmi(const_op)) {
445 __ LoadSmiLiteral(dst, Smi::FromInt(value));
447 __ LoadIntLiteral(dst, value);
452 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
453 DCHECK(op->IsDoubleRegister());
454 return ToDoubleRegister(op->index());
458 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
459 HConstant* constant = chunk_->LookupConstant(op);
460 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
461 return constant->handle(isolate());
465 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
466 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
470 bool LCodeGen::IsSmi(LConstantOperand* op) const {
471 return chunk_->LookupLiteralRepresentation(op).IsSmi();
475 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
476 return ToRepresentation(op, Representation::Integer32());
480 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
481 const Representation& r) const {
482 HConstant* constant = chunk_->LookupConstant(op);
483 int32_t value = constant->Integer32Value();
484 if (r.IsInteger32()) return value;
485 DCHECK(r.IsSmiOrTagged());
486 return reinterpret_cast<intptr_t>(Smi::FromInt(value));
490 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
491 HConstant* constant = chunk_->LookupConstant(op);
492 return Smi::FromInt(constant->Integer32Value());
496 double LCodeGen::ToDouble(LConstantOperand* op) const {
497 HConstant* constant = chunk_->LookupConstant(op);
498 DCHECK(constant->HasDoubleValue());
499 return constant->DoubleValue();
503 Operand LCodeGen::ToOperand(LOperand* op) {
504 if (op->IsConstantOperand()) {
505 LConstantOperand* const_op = LConstantOperand::cast(op);
506 HConstant* constant = chunk()->LookupConstant(const_op);
507 Representation r = chunk_->LookupLiteralRepresentation(const_op);
509 DCHECK(constant->HasSmiValue());
510 return Operand(Smi::FromInt(constant->Integer32Value()));
511 } else if (r.IsInteger32()) {
512 DCHECK(constant->HasInteger32Value());
513 return Operand(constant->Integer32Value());
514 } else if (r.IsDouble()) {
515 Abort(kToOperandUnsupportedDoubleImmediate);
517 DCHECK(r.IsTagged());
518 return Operand(constant->handle(isolate()));
519 } else if (op->IsRegister()) {
520 return Operand(ToRegister(op));
521 } else if (op->IsDoubleRegister()) {
522 Abort(kToOperandIsDoubleRegisterUnimplemented);
523 return Operand::Zero();
525 // Stack slots not implemented, use ToMemOperand instead.
527 return Operand::Zero();
531 static int ArgumentsOffsetWithoutFrame(int index) {
533 return -(index + 1) * kPointerSize;
537 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
538 DCHECK(!op->IsRegister());
539 DCHECK(!op->IsDoubleRegister());
540 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
541 if (NeedsEagerFrame()) {
542 return MemOperand(fp, StackSlotOffset(op->index()));
544 // Retrieve parameter without eager stack-frame relative to the
546 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
551 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
552 DCHECK(op->IsDoubleStackSlot());
553 if (NeedsEagerFrame()) {
554 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
556 // Retrieve parameter without eager stack-frame relative to the
558 return MemOperand(sp,
559 ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
564 void LCodeGen::WriteTranslation(LEnvironment* environment,
565 Translation* translation) {
566 if (environment == NULL) return;
568 // The translation includes one command per value in the environment.
569 int translation_size = environment->translation_size();
571 WriteTranslation(environment->outer(), translation);
572 WriteTranslationFrame(environment, translation);
574 int object_index = 0;
575 int dematerialized_index = 0;
576 for (int i = 0; i < translation_size; ++i) {
577 LOperand* value = environment->values()->at(i);
579 environment, translation, value, environment->HasTaggedValueAt(i),
580 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
585 void LCodeGen::AddToTranslation(LEnvironment* environment,
586 Translation* translation, LOperand* op,
587 bool is_tagged, bool is_uint32,
588 int* object_index_pointer,
589 int* dematerialized_index_pointer) {
590 if (op == LEnvironment::materialization_marker()) {
591 int object_index = (*object_index_pointer)++;
592 if (environment->ObjectIsDuplicateAt(object_index)) {
593 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
594 translation->DuplicateObject(dupe_of);
597 int object_length = environment->ObjectLengthAt(object_index);
598 if (environment->ObjectIsArgumentsAt(object_index)) {
599 translation->BeginArgumentsObject(object_length);
601 translation->BeginCapturedObject(object_length);
603 int dematerialized_index = *dematerialized_index_pointer;
604 int env_offset = environment->translation_size() + dematerialized_index;
605 *dematerialized_index_pointer += object_length;
606 for (int i = 0; i < object_length; ++i) {
607 LOperand* value = environment->values()->at(env_offset + i);
608 AddToTranslation(environment, translation, value,
609 environment->HasTaggedValueAt(env_offset + i),
610 environment->HasUint32ValueAt(env_offset + i),
611 object_index_pointer, dematerialized_index_pointer);
616 if (op->IsStackSlot()) {
618 translation->StoreStackSlot(op->index());
619 } else if (is_uint32) {
620 translation->StoreUint32StackSlot(op->index());
622 translation->StoreInt32StackSlot(op->index());
624 } else if (op->IsDoubleStackSlot()) {
625 translation->StoreDoubleStackSlot(op->index());
626 } else if (op->IsRegister()) {
627 Register reg = ToRegister(op);
629 translation->StoreRegister(reg);
630 } else if (is_uint32) {
631 translation->StoreUint32Register(reg);
633 translation->StoreInt32Register(reg);
635 } else if (op->IsDoubleRegister()) {
636 DoubleRegister reg = ToDoubleRegister(op);
637 translation->StoreDoubleRegister(reg);
638 } else if (op->IsConstantOperand()) {
639 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
640 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
641 translation->StoreLiteral(src_index);
648 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
649 LInstruction* instr) {
650 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
654 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
656 SafepointMode safepoint_mode) {
657 DCHECK(instr != NULL);
659 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
661 // Signal that we don't inline smi code before these stubs in the
662 // optimizing code generator.
663 if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
669 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
670 LInstruction* instr, SaveFPRegsMode save_doubles) {
671 DCHECK(instr != NULL);
673 __ CallRuntime(function, num_arguments, save_doubles);
675 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
679 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
680 if (context->IsRegister()) {
681 __ Move(cp, ToRegister(context));
682 } else if (context->IsStackSlot()) {
683 __ LoadP(cp, ToMemOperand(context));
684 } else if (context->IsConstantOperand()) {
685 HConstant* constant =
686 chunk_->LookupConstant(LConstantOperand::cast(context));
687 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
694 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
695 LInstruction* instr, LOperand* context) {
696 LoadContextFromDeferred(context);
697 __ CallRuntimeSaveDoubles(id);
698 RecordSafepointWithRegisters(instr->pointer_map(), argc,
699 Safepoint::kNoLazyDeopt);
703 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
704 Safepoint::DeoptMode mode) {
705 environment->set_has_been_used();
706 if (!environment->HasBeenRegistered()) {
707 // Physical stack frame layout:
708 // -x ............. -4 0 ..................................... y
709 // [incoming arguments] [spill slots] [pushed outgoing arguments]
711 // Layout of the environment:
712 // 0 ..................................................... size-1
713 // [parameters] [locals] [expression stack including arguments]
715 // Layout of the translation:
716 // 0 ........................................................ size - 1 + 4
717 // [expression stack including arguments] [locals] [4 words] [parameters]
718 // |>------------ translation_size ------------<|
721 int jsframe_count = 0;
722 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
724 if (e->frame_type() == JS_FUNCTION) {
728 Translation translation(&translations_, frame_count, jsframe_count, zone());
729 WriteTranslation(environment, &translation);
730 int deoptimization_index = deoptimizations_.length();
731 int pc_offset = masm()->pc_offset();
732 environment->Register(deoptimization_index, translation.index(),
733 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
734 deoptimizations_.Add(environment, zone());
739 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
740 Deoptimizer::DeoptReason deopt_reason,
741 Deoptimizer::BailoutType bailout_type,
743 LEnvironment* environment = instr->environment();
744 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
745 DCHECK(environment->HasBeenRegistered());
746 int id = environment->deoptimization_index();
747 DCHECK(info()->IsOptimizing() || info()->IsStub());
749 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
751 Abort(kBailoutWasNotPrepared);
755 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
756 CRegister alt_cr = cr6;
757 Register scratch = scratch0();
758 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
760 DCHECK(!alt_cr.is(cr));
761 __ Push(r4, scratch);
762 __ mov(scratch, Operand(count));
763 __ lwz(r4, MemOperand(scratch));
764 __ subi(r4, r4, Operand(1));
765 __ cmpi(r4, Operand::Zero(), alt_cr);
766 __ bne(&no_deopt, alt_cr);
767 __ li(r4, Operand(FLAG_deopt_every_n_times));
768 __ stw(r4, MemOperand(scratch));
771 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
773 __ stw(r4, MemOperand(scratch));
777 if (info()->ShouldTrapOnDeopt()) {
778 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
781 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
783 DCHECK(info()->IsStub() || frame_is_built_);
784 // Go through jump table if we need to handle condition, build frame, or
785 // restore caller doubles.
786 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
787 DeoptComment(deopt_info);
788 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
789 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
791 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
793 // We often have several deopts to the same entry, reuse the last
794 // jump entry if this is the case.
795 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
796 jump_table_.is_empty() ||
797 !table_entry.IsEquivalentTo(jump_table_.last())) {
798 jump_table_.Add(table_entry, zone());
800 __ b(cond, &jump_table_.last().label, cr);
805 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
806 Deoptimizer::DeoptReason deopt_reason,
808 Deoptimizer::BailoutType bailout_type =
809 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
810 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
814 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
815 int length = deoptimizations_.length();
816 if (length == 0) return;
817 Handle<DeoptimizationInputData> data =
818 DeoptimizationInputData::New(isolate(), length, TENURED);
820 Handle<ByteArray> translations =
821 translations_.CreateByteArray(isolate()->factory());
822 data->SetTranslationByteArray(*translations);
823 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
824 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
825 if (info_->IsOptimizing()) {
826 // Reference to shared function info does not change between phases.
827 AllowDeferredHandleDereference allow_handle_dereference;
828 data->SetSharedFunctionInfo(*info_->shared_info());
830 data->SetSharedFunctionInfo(Smi::FromInt(0));
832 data->SetWeakCellCache(Smi::FromInt(0));
834 Handle<FixedArray> literals =
835 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
837 AllowDeferredHandleDereference copy_handles;
838 for (int i = 0; i < deoptimization_literals_.length(); i++) {
839 literals->set(i, *deoptimization_literals_[i]);
841 data->SetLiteralArray(*literals);
844 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
845 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
847 // Populate the deoptimization entries.
848 for (int i = 0; i < length; i++) {
849 LEnvironment* env = deoptimizations_[i];
850 data->SetAstId(i, env->ast_id());
851 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
852 data->SetArgumentsStackHeight(i,
853 Smi::FromInt(env->arguments_stack_height()));
854 data->SetPc(i, Smi::FromInt(env->pc_offset()));
856 code->set_deoptimization_data(*data);
860 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
861 DCHECK_EQ(0, deoptimization_literals_.length());
862 for (auto function : chunk()->inlined_functions()) {
863 DefineDeoptimizationLiteral(function);
865 inlined_function_count_ = deoptimization_literals_.length();
869 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
870 SafepointMode safepoint_mode) {
871 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
872 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
874 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
875 RecordSafepointWithRegisters(instr->pointer_map(), 0,
876 Safepoint::kLazyDeopt);
881 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
882 int arguments, Safepoint::DeoptMode deopt_mode) {
883 DCHECK(expected_safepoint_kind_ == kind);
885 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
886 Safepoint safepoint =
887 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
888 for (int i = 0; i < operands->length(); i++) {
889 LOperand* pointer = operands->at(i);
890 if (pointer->IsStackSlot()) {
891 safepoint.DefinePointerSlot(pointer->index(), zone());
892 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
893 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
899 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
900 Safepoint::DeoptMode deopt_mode) {
901 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
905 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
906 LPointerMap empty_pointers(zone());
907 RecordSafepoint(&empty_pointers, deopt_mode);
911 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
913 Safepoint::DeoptMode deopt_mode) {
914 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
918 void LCodeGen::RecordAndWritePosition(int position) {
919 if (position == RelocInfo::kNoPosition) return;
920 masm()->positions_recorder()->RecordPosition(position);
921 masm()->positions_recorder()->WriteRecordedPositions();
925 static const char* LabelType(LLabel* label) {
926 if (label->is_loop_header()) return " (loop header)";
927 if (label->is_osr_entry()) return " (OSR entry)";
932 void LCodeGen::DoLabel(LLabel* label) {
933 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
934 current_instruction_, label->hydrogen_value()->id(),
935 label->block_id(), LabelType(label));
936 __ bind(label->label());
937 current_block_ = label->block_id();
942 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
945 void LCodeGen::DoGap(LGap* gap) {
946 for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
948 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
949 LParallelMove* move = gap->GetParallelMove(inner_pos);
950 if (move != NULL) DoParallelMove(move);
955 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
958 void LCodeGen::DoParameter(LParameter* instr) {
963 void LCodeGen::DoCallStub(LCallStub* instr) {
964 DCHECK(ToRegister(instr->context()).is(cp));
965 DCHECK(ToRegister(instr->result()).is(r3));
966 switch (instr->hydrogen()->major_key()) {
967 case CodeStub::RegExpExec: {
968 RegExpExecStub stub(isolate());
969 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
972 case CodeStub::SubString: {
973 SubStringStub stub(isolate());
974 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
977 case CodeStub::StringCompare: {
978 StringCompareStub stub(isolate());
979 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
988 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
989 GenerateOsrPrologue();
993 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
994 Register dividend = ToRegister(instr->dividend());
995 int32_t divisor = instr->divisor();
996 DCHECK(dividend.is(ToRegister(instr->result())));
998 // Theoretically, a variation of the branch-free code for integer division by
999 // a power of 2 (calculating the remainder via an additional multiplication
1000 // (which gets simplified to an 'and') and subtraction) should be faster, and
1001 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1002 // indicate that positive dividends are heavily favored, so the branching
1003 // version performs better.
1004 HMod* hmod = instr->hydrogen();
1005 int32_t shift = WhichPowerOf2Abs(divisor);
1006 Label dividend_is_not_negative, done;
1007 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1008 __ cmpwi(dividend, Operand::Zero());
1009 __ bge(÷nd_is_not_negative);
1011 // Note that this is correct even for kMinInt operands.
1012 __ neg(dividend, dividend);
1013 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1014 __ neg(dividend, dividend, LeaveOE, SetRC);
1015 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1016 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
1018 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1019 __ li(dividend, Operand::Zero());
1021 DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
1026 __ bind(÷nd_is_not_negative);
1028 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1030 __ li(dividend, Operand::Zero());
1036 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1037 Register dividend = ToRegister(instr->dividend());
1038 int32_t divisor = instr->divisor();
1039 Register result = ToRegister(instr->result());
1040 DCHECK(!dividend.is(result));
1043 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1047 __ TruncatingDiv(result, dividend, Abs(divisor));
1048 __ mov(ip, Operand(Abs(divisor)));
1049 __ mullw(result, result, ip);
1050 __ sub(result, dividend, result, LeaveOE, SetRC);
1052 // Check for negative zero.
1053 HMod* hmod = instr->hydrogen();
1054 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1055 Label remainder_not_zero;
1056 __ bne(&remainder_not_zero, cr0);
1057 __ cmpwi(dividend, Operand::Zero());
1058 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1059 __ bind(&remainder_not_zero);
1064 void LCodeGen::DoModI(LModI* instr) {
1065 HMod* hmod = instr->hydrogen();
1066 Register left_reg = ToRegister(instr->left());
1067 Register right_reg = ToRegister(instr->right());
1068 Register result_reg = ToRegister(instr->result());
1069 Register scratch = scratch0();
1070 bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
1074 __ li(r0, Operand::Zero()); // clear xer
1078 __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
1081 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1082 __ cmpwi(right_reg, Operand::Zero());
1083 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1086 // Check for kMinInt % -1, divw will return undefined, which is not what we
1087 // want. We have to deopt if we care about -0, because we can't return that.
1089 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1090 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
1092 if (CpuFeatures::IsSupported(ISELECT)) {
1093 __ isel(overflow, result_reg, r0, result_reg, cr0);
1094 __ boverflow(&done, cr0);
1096 Label no_overflow_possible;
1097 __ bnooverflow(&no_overflow_possible, cr0);
1098 __ li(result_reg, Operand::Zero());
1100 __ bind(&no_overflow_possible);
1105 __ mullw(scratch, right_reg, scratch);
1106 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
1108 // If we care about -0, test if the dividend is <0 and the result is 0.
1109 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1111 __ cmpwi(left_reg, Operand::Zero());
1112 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1119 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1120 Register dividend = ToRegister(instr->dividend());
1121 int32_t divisor = instr->divisor();
1122 Register result = ToRegister(instr->result());
1123 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1124 DCHECK(!result.is(dividend));
1126 // Check for (0 / -x) that will produce negative zero.
1127 HDiv* hdiv = instr->hydrogen();
1128 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1129 __ cmpwi(dividend, Operand::Zero());
1130 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1132 // Check for (kMinInt / -1).
1133 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1134 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1135 __ cmpw(dividend, r0);
1136 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1139 int32_t shift = WhichPowerOf2Abs(divisor);
1141 // Deoptimize if remainder will not be 0.
1142 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1143 __ TestBitRange(dividend, shift - 1, 0, r0);
1144 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
1147 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1148 __ neg(result, dividend);
1152 __ mr(result, dividend);
1155 __ srwi(result, dividend, Operand(31));
1157 __ srawi(result, dividend, 31);
1158 __ srwi(result, result, Operand(32 - shift));
1160 __ add(result, dividend, result);
1161 __ srawi(result, result, shift);
1163 if (divisor < 0) __ neg(result, result);
1167 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1168 Register dividend = ToRegister(instr->dividend());
1169 int32_t divisor = instr->divisor();
1170 Register result = ToRegister(instr->result());
1171 DCHECK(!dividend.is(result));
1174 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1178 // Check for (0 / -x) that will produce negative zero.
1179 HDiv* hdiv = instr->hydrogen();
1180 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1181 __ cmpwi(dividend, Operand::Zero());
1182 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1185 __ TruncatingDiv(result, dividend, Abs(divisor));
1186 if (divisor < 0) __ neg(result, result);
1188 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1189 Register scratch = scratch0();
1190 __ mov(ip, Operand(divisor));
1191 __ mullw(scratch, result, ip);
1192 __ cmpw(scratch, dividend);
1193 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1198 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1199 void LCodeGen::DoDivI(LDivI* instr) {
1200 HBinaryOperation* hdiv = instr->hydrogen();
1201 const Register dividend = ToRegister(instr->dividend());
1202 const Register divisor = ToRegister(instr->divisor());
1203 Register result = ToRegister(instr->result());
1204 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1206 DCHECK(!dividend.is(result));
1207 DCHECK(!divisor.is(result));
1210 __ li(r0, Operand::Zero()); // clear xer
1214 __ divw(result, dividend, divisor, SetOE, SetRC);
1217 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1218 __ cmpwi(divisor, Operand::Zero());
1219 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1222 // Check for (0 / -x) that will produce negative zero.
1223 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1224 Label dividend_not_zero;
1225 __ cmpwi(dividend, Operand::Zero());
1226 __ bne(÷nd_not_zero);
1227 __ cmpwi(divisor, Operand::Zero());
1228 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1229 __ bind(÷nd_not_zero);
1232 // Check for (kMinInt / -1).
1234 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1235 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1237 // When truncating, we want kMinInt / -1 = kMinInt.
1238 if (CpuFeatures::IsSupported(ISELECT)) {
1239 __ isel(overflow, result, dividend, result, cr0);
1241 Label no_overflow_possible;
1242 __ bnooverflow(&no_overflow_possible, cr0);
1243 __ mr(result, dividend);
1244 __ bind(&no_overflow_possible);
1249 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1250 // Deoptimize if remainder is not 0.
1251 Register scratch = scratch0();
1252 __ mullw(scratch, divisor, result);
1253 __ cmpw(dividend, scratch);
1254 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1259 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1260 HBinaryOperation* hdiv = instr->hydrogen();
1261 Register dividend = ToRegister(instr->dividend());
1262 Register result = ToRegister(instr->result());
1263 int32_t divisor = instr->divisor();
1264 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
1266 // If the divisor is positive, things are easy: There can be no deopts and we
1267 // can simply do an arithmetic right shift.
1268 int32_t shift = WhichPowerOf2Abs(divisor);
1270 if (shift || !result.is(dividend)) {
1271 __ srawi(result, dividend, shift);
1276 // If the divisor is negative, we have to negate and handle edge cases.
1278 #if V8_TARGET_ARCH_PPC64
1279 if (divisor == -1 && can_overflow) {
1280 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1281 __ cmpw(dividend, r0);
1282 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1286 __ li(r0, Operand::Zero()); // clear xer
1292 __ neg(result, dividend, oe, SetRC);
1293 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1294 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
1297 // If the negation could not overflow, simply shifting is OK.
1298 #if !V8_TARGET_ARCH_PPC64
1299 if (!can_overflow) {
1302 __ ShiftRightArithImm(result, result, shift);
1305 #if !V8_TARGET_ARCH_PPC64
1308 // Dividing by -1 is basically negation, unless we overflow.
1309 if (divisor == -1) {
1310 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1314 Label overflow, done;
1315 __ boverflow(&overflow, cr0);
1316 __ srawi(result, result, shift);
1319 __ mov(result, Operand(kMinInt / divisor));
1325 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1326 Register dividend = ToRegister(instr->dividend());
1327 int32_t divisor = instr->divisor();
1328 Register result = ToRegister(instr->result());
1329 DCHECK(!dividend.is(result));
1332 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1336 // Check for (0 / -x) that will produce negative zero.
1337 HMathFloorOfDiv* hdiv = instr->hydrogen();
1338 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1339 __ cmpwi(dividend, Operand::Zero());
1340 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1343 // Easy case: We need no dynamic check for the dividend and the flooring
1344 // division is the same as the truncating division.
1345 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1346 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1347 __ TruncatingDiv(result, dividend, Abs(divisor));
1348 if (divisor < 0) __ neg(result, result);
1352 // In the general case we may need to adjust before and after the truncating
1353 // division to get a flooring division.
1354 Register temp = ToRegister(instr->temp());
1355 DCHECK(!temp.is(dividend) && !temp.is(result));
1356 Label needs_adjustment, done;
1357 __ cmpwi(dividend, Operand::Zero());
1358 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1359 __ TruncatingDiv(result, dividend, Abs(divisor));
1360 if (divisor < 0) __ neg(result, result);
1362 __ bind(&needs_adjustment);
1363 __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1364 __ TruncatingDiv(result, temp, Abs(divisor));
1365 if (divisor < 0) __ neg(result, result);
1366 __ subi(result, result, Operand(1));
1371 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1372 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1373 HBinaryOperation* hdiv = instr->hydrogen();
1374 const Register dividend = ToRegister(instr->dividend());
1375 const Register divisor = ToRegister(instr->divisor());
1376 Register result = ToRegister(instr->result());
1377 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1379 DCHECK(!dividend.is(result));
1380 DCHECK(!divisor.is(result));
1383 __ li(r0, Operand::Zero()); // clear xer
1387 __ divw(result, dividend, divisor, SetOE, SetRC);
1390 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1391 __ cmpwi(divisor, Operand::Zero());
1392 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1395 // Check for (0 / -x) that will produce negative zero.
1396 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1397 Label dividend_not_zero;
1398 __ cmpwi(dividend, Operand::Zero());
1399 __ bne(÷nd_not_zero);
1400 __ cmpwi(divisor, Operand::Zero());
1401 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1402 __ bind(÷nd_not_zero);
1405 // Check for (kMinInt / -1).
1407 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1408 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1410 // When truncating, we want kMinInt / -1 = kMinInt.
1411 if (CpuFeatures::IsSupported(ISELECT)) {
1412 __ isel(overflow, result, dividend, result, cr0);
1414 Label no_overflow_possible;
1415 __ bnooverflow(&no_overflow_possible, cr0);
1416 __ mr(result, dividend);
1417 __ bind(&no_overflow_possible);
1423 Register scratch = scratch0();
1424 // If both operands have the same sign then we are done.
1425 #if V8_TARGET_ARCH_PPC64
1426 __ xor_(scratch, dividend, divisor);
1427 __ cmpwi(scratch, Operand::Zero());
1430 __ xor_(scratch, dividend, divisor, SetRC);
1434 // If there is no remainder then we are done.
1435 __ mullw(scratch, divisor, result);
1436 __ cmpw(dividend, scratch);
1439 // We performed a truncating division. Correct the result.
1440 __ subi(result, result, Operand(1));
1445 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1446 DoubleRegister addend = ToDoubleRegister(instr->addend());
1447 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1448 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1449 DoubleRegister result = ToDoubleRegister(instr->result());
1451 __ fmadd(result, multiplier, multiplicand, addend);
1455 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1456 DoubleRegister minuend = ToDoubleRegister(instr->minuend());
1457 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1458 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1459 DoubleRegister result = ToDoubleRegister(instr->result());
1461 __ fmsub(result, multiplier, multiplicand, minuend);
1465 void LCodeGen::DoMulI(LMulI* instr) {
1466 Register scratch = scratch0();
1467 Register result = ToRegister(instr->result());
1468 // Note that result may alias left.
1469 Register left = ToRegister(instr->left());
1470 LOperand* right_op = instr->right();
1472 bool bailout_on_minus_zero =
1473 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1474 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1476 if (right_op->IsConstantOperand()) {
1477 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1479 if (bailout_on_minus_zero && (constant < 0)) {
1480 // The case of a null constant will be handled separately.
1481 // If constant is negative and left is null, the result should be -0.
1482 __ cmpi(left, Operand::Zero());
1483 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1489 #if V8_TARGET_ARCH_PPC64
1490 if (instr->hydrogen()->representation().IsSmi()) {
1492 __ li(r0, Operand::Zero()); // clear xer
1494 __ neg(result, left, SetOE, SetRC);
1495 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1496 #if V8_TARGET_ARCH_PPC64
1498 __ neg(result, left);
1499 __ TestIfInt32(result, r0);
1500 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1504 __ neg(result, left);
1508 if (bailout_on_minus_zero) {
1509 // If left is strictly negative and the constant is null, the
1510 // result is -0. Deoptimize if required, otherwise return 0.
1511 #if V8_TARGET_ARCH_PPC64
1512 if (instr->hydrogen()->representation().IsSmi()) {
1514 __ cmpi(left, Operand::Zero());
1515 #if V8_TARGET_ARCH_PPC64
1517 __ cmpwi(left, Operand::Zero());
1520 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1522 __ li(result, Operand::Zero());
1525 __ Move(result, left);
1528 // Multiplying by powers of two and powers of two plus or minus
1529 // one can be done faster with shifted operands.
1530 // For other constants we emit standard code.
1531 int32_t mask = constant >> 31;
1532 uint32_t constant_abs = (constant + mask) ^ mask;
1534 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1535 int32_t shift = WhichPowerOf2(constant_abs);
1536 __ ShiftLeftImm(result, left, Operand(shift));
1537 // Correct the sign of the result if the constant is negative.
1538 if (constant < 0) __ neg(result, result);
1539 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1540 int32_t shift = WhichPowerOf2(constant_abs - 1);
1541 __ ShiftLeftImm(scratch, left, Operand(shift));
1542 __ add(result, scratch, left);
1543 // Correct the sign of the result if the constant is negative.
1544 if (constant < 0) __ neg(result, result);
1545 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1546 int32_t shift = WhichPowerOf2(constant_abs + 1);
1547 __ ShiftLeftImm(scratch, left, Operand(shift));
1548 __ sub(result, scratch, left);
1549 // Correct the sign of the result if the constant is negative.
1550 if (constant < 0) __ neg(result, result);
1552 // Generate standard code.
1553 __ mov(ip, Operand(constant));
1554 __ Mul(result, left, ip);
1559 DCHECK(right_op->IsRegister());
1560 Register right = ToRegister(right_op);
1563 #if V8_TARGET_ARCH_PPC64
1564 // result = left * right.
1565 if (instr->hydrogen()->representation().IsSmi()) {
1566 __ SmiUntag(result, left);
1567 __ SmiUntag(scratch, right);
1568 __ Mul(result, result, scratch);
1570 __ Mul(result, left, right);
1572 __ TestIfInt32(result, r0);
1573 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1574 if (instr->hydrogen()->representation().IsSmi()) {
1578 // scratch:result = left * right.
1579 if (instr->hydrogen()->representation().IsSmi()) {
1580 __ SmiUntag(result, left);
1581 __ mulhw(scratch, result, right);
1582 __ mullw(result, result, right);
1584 __ mulhw(scratch, left, right);
1585 __ mullw(result, left, right);
1587 __ TestIfInt32(scratch, result, r0);
1588 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1591 if (instr->hydrogen()->representation().IsSmi()) {
1592 __ SmiUntag(result, left);
1593 __ Mul(result, result, right);
1595 __ Mul(result, left, right);
1599 if (bailout_on_minus_zero) {
1601 #if V8_TARGET_ARCH_PPC64
1602 if (instr->hydrogen()->representation().IsSmi()) {
1604 __ xor_(r0, left, right, SetRC);
1606 #if V8_TARGET_ARCH_PPC64
1608 __ xor_(r0, left, right);
1609 __ cmpwi(r0, Operand::Zero());
1613 // Bail out if the result is minus zero.
1614 __ cmpi(result, Operand::Zero());
1615 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1622 void LCodeGen::DoBitI(LBitI* instr) {
1623 LOperand* left_op = instr->left();
1624 LOperand* right_op = instr->right();
1625 DCHECK(left_op->IsRegister());
1626 Register left = ToRegister(left_op);
1627 Register result = ToRegister(instr->result());
1628 Operand right(no_reg);
1630 if (right_op->IsStackSlot()) {
1631 right = Operand(EmitLoadRegister(right_op, ip));
1633 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1634 right = ToOperand(right_op);
1636 if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
1637 switch (instr->op()) {
1638 case Token::BIT_AND:
1639 __ andi(result, left, right);
1642 __ ori(result, left, right);
1644 case Token::BIT_XOR:
1645 __ xori(result, left, right);
1655 switch (instr->op()) {
1656 case Token::BIT_AND:
1657 __ And(result, left, right);
1660 __ Or(result, left, right);
1662 case Token::BIT_XOR:
1663 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1664 __ notx(result, left);
1666 __ Xor(result, left, right);
1676 void LCodeGen::DoShiftI(LShiftI* instr) {
1677 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1678 // result may alias either of them.
1679 LOperand* right_op = instr->right();
1680 Register left = ToRegister(instr->left());
1681 Register result = ToRegister(instr->result());
1682 Register scratch = scratch0();
1683 if (right_op->IsRegister()) {
1684 // Mask the right_op operand.
1685 __ andi(scratch, ToRegister(right_op), Operand(0x1F));
1686 switch (instr->op()) {
1688 // rotate_right(a, b) == rotate_left(a, 32 - b)
1689 __ subfic(scratch, scratch, Operand(32));
1690 __ rotlw(result, left, scratch);
1693 __ sraw(result, left, scratch);
1696 if (instr->can_deopt()) {
1697 __ srw(result, left, scratch, SetRC);
1698 #if V8_TARGET_ARCH_PPC64
1699 __ extsw(result, result, SetRC);
1701 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
1703 __ srw(result, left, scratch);
1707 __ slw(result, left, scratch);
1708 #if V8_TARGET_ARCH_PPC64
1709 __ extsw(result, result);
1717 // Mask the right_op operand.
1718 int value = ToInteger32(LConstantOperand::cast(right_op));
1719 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1720 switch (instr->op()) {
1722 if (shift_count != 0) {
1723 __ rotrwi(result, left, shift_count);
1725 __ Move(result, left);
1729 if (shift_count != 0) {
1730 __ srawi(result, left, shift_count);
1732 __ Move(result, left);
1736 if (shift_count != 0) {
1737 __ srwi(result, left, Operand(shift_count));
1739 if (instr->can_deopt()) {
1740 __ cmpwi(left, Operand::Zero());
1741 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
1743 __ Move(result, left);
1747 if (shift_count != 0) {
1748 #if V8_TARGET_ARCH_PPC64
1749 if (instr->hydrogen_value()->representation().IsSmi()) {
1750 __ sldi(result, left, Operand(shift_count));
1752 if (instr->hydrogen_value()->representation().IsSmi() &&
1753 instr->can_deopt()) {
1754 if (shift_count != 1) {
1755 __ slwi(result, left, Operand(shift_count - 1));
1756 __ SmiTagCheckOverflow(result, result, scratch);
1758 __ SmiTagCheckOverflow(result, left, scratch);
1760 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1763 __ slwi(result, left, Operand(shift_count));
1764 #if V8_TARGET_ARCH_PPC64
1765 __ extsw(result, result);
1769 __ Move(result, left);
1780 void LCodeGen::DoSubI(LSubI* instr) {
1781 LOperand* right = instr->right();
1782 Register left = ToRegister(instr->left());
1783 Register result = ToRegister(instr->result());
1784 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1785 #if V8_TARGET_ARCH_PPC64
1786 const bool isInteger = !instr->hydrogen()->representation().IsSmi();
1788 const bool isInteger = false;
1790 if (!can_overflow || isInteger) {
1791 if (right->IsConstantOperand()) {
1792 __ Add(result, left, -(ToOperand(right).immediate()), r0);
1794 __ sub(result, left, EmitLoadRegister(right, ip));
1796 #if V8_TARGET_ARCH_PPC64
1798 __ TestIfInt32(result, r0);
1799 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1803 if (right->IsConstantOperand()) {
1804 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
1807 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1810 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1815 void LCodeGen::DoRSubI(LRSubI* instr) {
1816 LOperand* left = instr->left();
1817 LOperand* right = instr->right();
1818 LOperand* result = instr->result();
1820 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1821 right->IsConstantOperand());
1823 Operand right_operand = ToOperand(right);
1824 if (is_int16(right_operand.immediate())) {
1825 __ subfic(ToRegister(result), ToRegister(left), right_operand);
1827 __ mov(r0, right_operand);
1828 __ sub(ToRegister(result), r0, ToRegister(left));
1833 void LCodeGen::DoConstantI(LConstantI* instr) {
1834 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1838 void LCodeGen::DoConstantS(LConstantS* instr) {
1839 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1843 void LCodeGen::DoConstantD(LConstantD* instr) {
1844 DCHECK(instr->result()->IsDoubleRegister());
1845 DoubleRegister result = ToDoubleRegister(instr->result());
1846 #if V8_HOST_ARCH_IA32
1847 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1849 uint64_t bits = instr->bits();
1850 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1851 V8_UINT64_C(0x7FF0000000000000)) {
1852 uint32_t lo = static_cast<uint32_t>(bits);
1853 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1854 __ mov(ip, Operand(lo));
1855 __ mov(scratch0(), Operand(hi));
1856 __ MovInt64ToDouble(result, scratch0(), ip);
1860 double v = instr->value();
1861 __ LoadDoubleLiteral(result, v, scratch0());
1865 void LCodeGen::DoConstantE(LConstantE* instr) {
1866 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1870 void LCodeGen::DoConstantT(LConstantT* instr) {
1871 Handle<Object> object = instr->value(isolate());
1872 AllowDeferredHandleDereference smi_check;
1873 __ Move(ToRegister(instr->result()), object);
1877 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1878 Register result = ToRegister(instr->result());
1879 Register map = ToRegister(instr->value());
1880 __ EnumLength(result, map);
1884 void LCodeGen::DoDateField(LDateField* instr) {
1885 Register object = ToRegister(instr->date());
1886 Register result = ToRegister(instr->result());
1887 Register scratch = ToRegister(instr->temp());
1888 Smi* index = instr->index();
1889 DCHECK(object.is(result));
1890 DCHECK(object.is(r3));
1891 DCHECK(!scratch.is(scratch0()));
1892 DCHECK(!scratch.is(object));
1894 if (index->value() == 0) {
1895 __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
1897 Label runtime, done;
1898 if (index->value() < JSDate::kFirstUncachedField) {
1899 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1900 __ mov(scratch, Operand(stamp));
1901 __ LoadP(scratch, MemOperand(scratch));
1902 __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1903 __ cmp(scratch, scratch0());
1906 FieldMemOperand(object, JSDate::kValueOffset +
1907 kPointerSize * index->value()));
1911 __ PrepareCallCFunction(2, scratch);
1912 __ LoadSmiLiteral(r4, index);
1913 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1919 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
1920 String::Encoding encoding) {
1921 if (index->IsConstantOperand()) {
1922 int offset = ToInteger32(LConstantOperand::cast(index));
1923 if (encoding == String::TWO_BYTE_ENCODING) {
1924 offset *= kUC16Size;
1926 STATIC_ASSERT(kCharSize == 1);
1927 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1929 Register scratch = scratch0();
1930 DCHECK(!scratch.is(string));
1931 DCHECK(!scratch.is(ToRegister(index)));
1932 if (encoding == String::ONE_BYTE_ENCODING) {
1933 __ add(scratch, string, ToRegister(index));
1935 STATIC_ASSERT(kUC16Size == 2);
1936 __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
1937 __ add(scratch, string, scratch);
1939 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1943 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1944 String::Encoding encoding = instr->hydrogen()->encoding();
1945 Register string = ToRegister(instr->string());
1946 Register result = ToRegister(instr->result());
1948 if (FLAG_debug_code) {
1949 Register scratch = scratch0();
1950 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1951 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1953 __ andi(scratch, scratch,
1954 Operand(kStringRepresentationMask | kStringEncodingMask));
1955 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1956 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1958 Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
1959 : two_byte_seq_type));
1960 __ Check(eq, kUnexpectedStringType);
1963 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1964 if (encoding == String::ONE_BYTE_ENCODING) {
1965 __ lbz(result, operand);
1967 __ lhz(result, operand);
1972 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1973 String::Encoding encoding = instr->hydrogen()->encoding();
1974 Register string = ToRegister(instr->string());
1975 Register value = ToRegister(instr->value());
1977 if (FLAG_debug_code) {
1978 Register index = ToRegister(instr->index());
1979 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1980 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1982 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
1984 : two_byte_seq_type;
1985 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
1988 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1989 if (encoding == String::ONE_BYTE_ENCODING) {
1990 __ stb(value, operand);
1992 __ sth(value, operand);
1997 void LCodeGen::DoAddI(LAddI* instr) {
1998 LOperand* right = instr->right();
1999 Register left = ToRegister(instr->left());
2000 Register result = ToRegister(instr->result());
2001 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2002 #if V8_TARGET_ARCH_PPC64
2003 const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
2004 instr->hydrogen()->representation().IsExternal());
2006 const bool isInteger = false;
2009 if (!can_overflow || isInteger) {
2010 if (right->IsConstantOperand()) {
2011 __ Add(result, left, ToOperand(right).immediate(), r0);
2013 __ add(result, left, EmitLoadRegister(right, ip));
2015 #if V8_TARGET_ARCH_PPC64
2017 __ TestIfInt32(result, r0);
2018 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
2022 if (right->IsConstantOperand()) {
2023 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
2026 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
2029 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
2034 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2035 LOperand* left = instr->left();
2036 LOperand* right = instr->right();
2037 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2038 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
2039 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2040 Register left_reg = ToRegister(left);
2041 Register right_reg = EmitLoadRegister(right, ip);
2042 Register result_reg = ToRegister(instr->result());
2043 Label return_left, done;
2044 #if V8_TARGET_ARCH_PPC64
2045 if (instr->hydrogen_value()->representation().IsSmi()) {
2047 __ cmp(left_reg, right_reg);
2048 #if V8_TARGET_ARCH_PPC64
2050 __ cmpw(left_reg, right_reg);
2053 if (CpuFeatures::IsSupported(ISELECT)) {
2054 __ isel(cond, result_reg, left_reg, right_reg);
2056 __ b(cond, &return_left);
2057 __ Move(result_reg, right_reg);
2059 __ bind(&return_left);
2060 __ Move(result_reg, left_reg);
2064 DCHECK(instr->hydrogen()->representation().IsDouble());
2065 DoubleRegister left_reg = ToDoubleRegister(left);
2066 DoubleRegister right_reg = ToDoubleRegister(right);
2067 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2068 Label check_nan_left, check_zero, return_left, return_right, done;
2069 __ fcmpu(left_reg, right_reg);
2070 __ bunordered(&check_nan_left);
2071 __ beq(&check_zero);
2072 __ b(cond, &return_left);
2073 __ b(&return_right);
2075 __ bind(&check_zero);
2076 __ fcmpu(left_reg, kDoubleRegZero);
2077 __ bne(&return_left); // left == right != 0.
2079 // At this point, both left and right are either 0 or -0.
2080 // N.B. The following works because +0 + -0 == +0
2081 if (operation == HMathMinMax::kMathMin) {
2082 // For min we want logical-or of sign bit: -(-L + -R)
2083 __ fneg(left_reg, left_reg);
2084 __ fsub(result_reg, left_reg, right_reg);
2085 __ fneg(result_reg, result_reg);
2087 // For max we want logical-and of sign bit: (L + R)
2088 __ fadd(result_reg, left_reg, right_reg);
2092 __ bind(&check_nan_left);
2093 __ fcmpu(left_reg, left_reg);
2094 __ bunordered(&return_left); // left == NaN.
2096 __ bind(&return_right);
2097 if (!right_reg.is(result_reg)) {
2098 __ fmr(result_reg, right_reg);
2102 __ bind(&return_left);
2103 if (!left_reg.is(result_reg)) {
2104 __ fmr(result_reg, left_reg);
2111 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2112 DoubleRegister left = ToDoubleRegister(instr->left());
2113 DoubleRegister right = ToDoubleRegister(instr->right());
2114 DoubleRegister result = ToDoubleRegister(instr->result());
2115 switch (instr->op()) {
2117 __ fadd(result, left, right);
2120 __ fsub(result, left, right);
2123 __ fmul(result, left, right);
2126 __ fdiv(result, left, right);
2129 __ PrepareCallCFunction(0, 2, scratch0());
2130 __ MovToFloatParameters(left, right);
2131 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
2133 // Move the result in the double result register.
2134 __ MovFromFloatResult(result);
2144 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2145 DCHECK(ToRegister(instr->context()).is(cp));
2146 DCHECK(ToRegister(instr->left()).is(r4));
2147 DCHECK(ToRegister(instr->right()).is(r3));
2148 DCHECK(ToRegister(instr->result()).is(r3));
2151 CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
2152 CallCode(code, RelocInfo::CODE_TARGET, instr);
2156 template <class InstrType>
2157 void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
2158 int left_block = instr->TrueDestination(chunk_);
2159 int right_block = instr->FalseDestination(chunk_);
2161 int next_block = GetNextEmittedBlock();
2163 if (right_block == left_block || cond == al) {
2164 EmitGoto(left_block);
2165 } else if (left_block == next_block) {
2166 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
2167 } else if (right_block == next_block) {
2168 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2170 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2171 __ b(chunk_->GetAssemblyLabel(right_block));
2176 template <class InstrType>
2177 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
2178 int false_block = instr->FalseDestination(chunk_);
2179 __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
2183 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
2186 void LCodeGen::DoBranch(LBranch* instr) {
2187 Representation r = instr->hydrogen()->value()->representation();
2188 DoubleRegister dbl_scratch = double_scratch0();
2189 const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
2190 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
2192 if (r.IsInteger32()) {
2193 DCHECK(!info()->IsStub());
2194 Register reg = ToRegister(instr->value());
2195 __ cmpwi(reg, Operand::Zero());
2196 EmitBranch(instr, ne);
2197 } else if (r.IsSmi()) {
2198 DCHECK(!info()->IsStub());
2199 Register reg = ToRegister(instr->value());
2200 __ cmpi(reg, Operand::Zero());
2201 EmitBranch(instr, ne);
2202 } else if (r.IsDouble()) {
2203 DCHECK(!info()->IsStub());
2204 DoubleRegister reg = ToDoubleRegister(instr->value());
2205 // Test the double value. Zero and NaN are false.
2206 __ fcmpu(reg, kDoubleRegZero, cr7);
2208 __ andi(r0, r0, Operand(crZOrNaNBits));
2209 EmitBranch(instr, eq, cr0);
2211 DCHECK(r.IsTagged());
2212 Register reg = ToRegister(instr->value());
2213 HType type = instr->hydrogen()->value()->type();
2214 if (type.IsBoolean()) {
2215 DCHECK(!info()->IsStub());
2216 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2217 EmitBranch(instr, eq);
2218 } else if (type.IsSmi()) {
2219 DCHECK(!info()->IsStub());
2220 __ cmpi(reg, Operand::Zero());
2221 EmitBranch(instr, ne);
2222 } else if (type.IsJSArray()) {
2223 DCHECK(!info()->IsStub());
2224 EmitBranch(instr, al);
2225 } else if (type.IsHeapNumber()) {
2226 DCHECK(!info()->IsStub());
2227 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2228 // Test the double value. Zero and NaN are false.
2229 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2231 __ andi(r0, r0, Operand(crZOrNaNBits));
2232 EmitBranch(instr, eq, cr0);
2233 } else if (type.IsString()) {
2234 DCHECK(!info()->IsStub());
2235 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2236 __ cmpi(ip, Operand::Zero());
2237 EmitBranch(instr, ne);
2239 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2240 // Avoid deopts in the case where we've never executed this path before.
2241 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2243 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2244 // undefined -> false.
2245 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2246 __ beq(instr->FalseLabel(chunk_));
2248 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2249 // Boolean -> its value.
2250 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2251 __ beq(instr->TrueLabel(chunk_));
2252 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2253 __ beq(instr->FalseLabel(chunk_));
2255 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2257 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2258 __ beq(instr->FalseLabel(chunk_));
2261 if (expected.Contains(ToBooleanStub::SMI)) {
2262 // Smis: 0 -> false, all other -> true.
2263 __ cmpi(reg, Operand::Zero());
2264 __ beq(instr->FalseLabel(chunk_));
2265 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2266 } else if (expected.NeedsMap()) {
2267 // If we need a map later and have a Smi -> deopt.
2268 __ TestIfSmi(reg, r0);
2269 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
2272 const Register map = scratch0();
2273 if (expected.NeedsMap()) {
2274 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2276 if (expected.CanBeUndetectable()) {
2277 // Undetectable -> false.
2278 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2279 __ TestBit(ip, Map::kIsUndetectable, r0);
2280 __ bne(instr->FalseLabel(chunk_), cr0);
2284 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2285 // spec object -> true.
2286 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2287 __ bge(instr->TrueLabel(chunk_));
2290 if (expected.Contains(ToBooleanStub::STRING)) {
2291 // String value -> false iff empty.
2293 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2294 __ bge(¬_string);
2295 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2296 __ cmpi(ip, Operand::Zero());
2297 __ bne(instr->TrueLabel(chunk_));
2298 __ b(instr->FalseLabel(chunk_));
2299 __ bind(¬_string);
2302 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2303 // Symbol value -> true.
2304 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2305 __ beq(instr->TrueLabel(chunk_));
2308 if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
2309 // SIMD value -> true.
2310 __ CompareInstanceType(map, ip, FLOAT32X4_TYPE);
2311 __ beq(instr->TrueLabel(chunk_));
2314 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2315 // heap number -> false iff +0, -0, or NaN.
2316 Label not_heap_number;
2317 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2318 __ bne(¬_heap_number);
2319 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2320 // Test the double value. Zero and NaN are false.
2321 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2323 __ andi(r0, r0, Operand(crZOrNaNBits));
2324 __ bne(instr->FalseLabel(chunk_), cr0);
2325 __ b(instr->TrueLabel(chunk_));
2326 __ bind(¬_heap_number);
2329 if (!expected.IsGeneric()) {
2330 // We've seen something for the first time -> deopt.
2331 // This can only happen if we are not generic already.
2332 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
2339 void LCodeGen::EmitGoto(int block) {
2340 if (!IsNextEmittedBlock(block)) {
2341 __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2346 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
2349 Condition LCodeGen::TokenToCondition(Token::Value op) {
2350 Condition cond = kNoCondition;
2353 case Token::EQ_STRICT:
2357 case Token::NE_STRICT:
2373 case Token::INSTANCEOF:
2381 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2382 LOperand* left = instr->left();
2383 LOperand* right = instr->right();
2385 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2386 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2387 Condition cond = TokenToCondition(instr->op());
2389 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2390 // We can statically evaluate the comparison.
2391 double left_val = ToDouble(LConstantOperand::cast(left));
2392 double right_val = ToDouble(LConstantOperand::cast(right));
2393 int next_block = EvalComparison(instr->op(), left_val, right_val)
2394 ? instr->TrueDestination(chunk_)
2395 : instr->FalseDestination(chunk_);
2396 EmitGoto(next_block);
2398 if (instr->is_double()) {
2399 // Compare left and right operands as doubles and load the
2400 // resulting flags into the normal status register.
2401 __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
2402 // If a NaN is involved, i.e. the result is unordered,
2403 // jump to false block label.
2404 __ bunordered(instr->FalseLabel(chunk_));
2406 if (right->IsConstantOperand()) {
2407 int32_t value = ToInteger32(LConstantOperand::cast(right));
2408 if (instr->hydrogen_value()->representation().IsSmi()) {
2410 __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2412 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2416 __ Cmplwi(ToRegister(left), Operand(value), r0);
2418 __ Cmpwi(ToRegister(left), Operand(value), r0);
2421 } else if (left->IsConstantOperand()) {
2422 int32_t value = ToInteger32(LConstantOperand::cast(left));
2423 if (instr->hydrogen_value()->representation().IsSmi()) {
2425 __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2427 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2431 __ Cmplwi(ToRegister(right), Operand(value), r0);
2433 __ Cmpwi(ToRegister(right), Operand(value), r0);
2436 // We commuted the operands, so commute the condition.
2437 cond = CommuteCondition(cond);
2438 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2440 __ cmpl(ToRegister(left), ToRegister(right));
2442 __ cmp(ToRegister(left), ToRegister(right));
2446 __ cmplw(ToRegister(left), ToRegister(right));
2448 __ cmpw(ToRegister(left), ToRegister(right));
2452 EmitBranch(instr, cond);
2457 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2458 Register left = ToRegister(instr->left());
2459 Register right = ToRegister(instr->right());
2461 __ cmp(left, right);
2462 EmitBranch(instr, eq);
2466 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2467 if (instr->hydrogen()->representation().IsTagged()) {
2468 Register input_reg = ToRegister(instr->object());
2469 __ mov(ip, Operand(factory()->the_hole_value()));
2470 __ cmp(input_reg, ip);
2471 EmitBranch(instr, eq);
2475 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2476 __ fcmpu(input_reg, input_reg);
2477 EmitFalseBranch(instr, ordered);
2479 Register scratch = scratch0();
2480 __ MovDoubleHighToInt(scratch, input_reg);
2481 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
2482 EmitBranch(instr, eq);
2486 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2487 Representation rep = instr->hydrogen()->value()->representation();
2488 DCHECK(!rep.IsInteger32());
2489 Register scratch = ToRegister(instr->temp());
2491 if (rep.IsDouble()) {
2492 DoubleRegister value = ToDoubleRegister(instr->value());
2493 __ fcmpu(value, kDoubleRegZero);
2494 EmitFalseBranch(instr, ne);
2495 #if V8_TARGET_ARCH_PPC64
2496 __ MovDoubleToInt64(scratch, value);
2498 __ MovDoubleHighToInt(scratch, value);
2500 __ cmpi(scratch, Operand::Zero());
2501 EmitBranch(instr, lt);
2503 Register value = ToRegister(instr->value());
2504 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2505 instr->FalseLabel(chunk()), DO_SMI_CHECK);
2506 #if V8_TARGET_ARCH_PPC64
2507 __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2508 __ li(ip, Operand(1));
2509 __ rotrdi(ip, ip, 1); // ip = 0x80000000_00000000
2510 __ cmp(scratch, ip);
2512 __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2513 __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2515 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
2516 __ cmp(scratch, r0);
2518 __ cmpi(ip, Operand::Zero());
2521 EmitBranch(instr, eq);
2526 Condition LCodeGen::EmitIsObject(Register input, Register temp1,
2527 Label* is_not_object, Label* is_object) {
2528 Register temp2 = scratch0();
2529 __ JumpIfSmi(input, is_not_object);
2531 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2532 __ cmp(input, temp2);
2536 __ LoadP(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2537 // Undetectable objects behave like undefined.
2538 __ lbz(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2539 __ TestBit(temp2, Map::kIsUndetectable, r0);
2540 __ bne(is_not_object, cr0);
2542 // Load instance type and check that it is in object type range.
2543 __ lbz(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2544 __ cmpi(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2545 __ blt(is_not_object);
2546 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2551 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2552 Register reg = ToRegister(instr->value());
2553 Register temp1 = ToRegister(instr->temp());
2555 Condition true_cond = EmitIsObject(reg, temp1, instr->FalseLabel(chunk_),
2556 instr->TrueLabel(chunk_));
2558 EmitBranch(instr, true_cond);
2562 Condition LCodeGen::EmitIsString(Register input, Register temp1,
2563 Label* is_not_string,
2564 SmiCheck check_needed = INLINE_SMI_CHECK) {
2565 if (check_needed == INLINE_SMI_CHECK) {
2566 __ JumpIfSmi(input, is_not_string);
2568 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2574 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2575 Register reg = ToRegister(instr->value());
2576 Register temp1 = ToRegister(instr->temp());
2578 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2581 Condition true_cond =
2582 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2584 EmitBranch(instr, true_cond);
2588 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2589 Register input_reg = EmitLoadRegister(instr->value(), ip);
2590 __ TestIfSmi(input_reg, r0);
2591 EmitBranch(instr, eq, cr0);
2595 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2596 Register input = ToRegister(instr->value());
2597 Register temp = ToRegister(instr->temp());
2599 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2600 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2602 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2603 __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2604 __ TestBit(temp, Map::kIsUndetectable, r0);
2605 EmitBranch(instr, ne, cr0);
2609 static Condition ComputeCompareCondition(Token::Value op) {
2611 case Token::EQ_STRICT:
2624 return kNoCondition;
2629 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2630 DCHECK(ToRegister(instr->context()).is(cp));
2631 Token::Value op = instr->op();
2634 CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
2635 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2636 // This instruction also signals no smi code inlined
2637 __ cmpi(r3, Operand::Zero());
2639 Condition condition = ComputeCompareCondition(op);
2641 EmitBranch(instr, condition);
2645 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2646 InstanceType from = instr->from();
2647 InstanceType to = instr->to();
2648 if (from == FIRST_TYPE) return to;
2649 DCHECK(from == to || to == LAST_TYPE);
2654 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2655 InstanceType from = instr->from();
2656 InstanceType to = instr->to();
2657 if (from == to) return eq;
2658 if (to == LAST_TYPE) return ge;
2659 if (from == FIRST_TYPE) return le;
2665 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2666 Register scratch = scratch0();
2667 Register input = ToRegister(instr->value());
2669 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2670 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2673 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2674 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2678 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2679 Register input = ToRegister(instr->value());
2680 Register result = ToRegister(instr->result());
2682 __ AssertString(input);
2684 __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
2685 __ IndexFromHash(result, result);
2689 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2690 LHasCachedArrayIndexAndBranch* instr) {
2691 Register input = ToRegister(instr->value());
2692 Register scratch = scratch0();
2694 __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
2695 __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
2696 __ and_(r0, scratch, r0, SetRC);
2697 EmitBranch(instr, eq, cr0);
2701 // Branches to a label or falls through with the answer in flags. Trashes
2702 // the temp registers, but not the input.
2703 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
2704 Handle<String> class_name, Register input,
2705 Register temp, Register temp2) {
2706 DCHECK(!input.is(temp));
2707 DCHECK(!input.is(temp2));
2708 DCHECK(!temp.is(temp2));
2710 __ JumpIfSmi(input, is_false);
2712 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2713 // Assuming the following assertions, we can use the same compares to test
2714 // for both being a function type and being in the object type range.
2715 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2716 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2717 FIRST_SPEC_OBJECT_TYPE + 1);
2718 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2719 LAST_SPEC_OBJECT_TYPE - 1);
2720 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2721 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2724 __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2727 // Faster code path to avoid two compares: subtract lower bound from the
2728 // actual type and do a signed compare with the width of the type range.
2729 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2730 __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2731 __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2732 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2733 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2737 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2738 // Check if the constructor in the map is a function.
2739 Register instance_type = ip;
2740 __ GetMapConstructor(temp, temp, temp2, instance_type);
2742 // Objects with a non-function constructor have class 'Object'.
2743 __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
2744 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
2750 // temp now contains the constructor function. Grab the
2751 // instance class name from there.
2752 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2754 FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
2755 // The class name we are testing against is internalized since it's a literal.
2756 // The name in the constructor is internalized because of the way the context
2757 // is booted. This routine isn't expected to work for random API-created
2758 // classes and it doesn't have to because you can't access it with natives
2759 // syntax. Since both sides are internalized it is sufficient to use an
2760 // identity comparison.
2761 __ Cmpi(temp, Operand(class_name), r0);
2762 // End with the answer in flags.
2766 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2767 Register input = ToRegister(instr->value());
2768 Register temp = scratch0();
2769 Register temp2 = ToRegister(instr->temp());
2770 Handle<String> class_name = instr->hydrogen()->class_name();
2772 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2773 class_name, input, temp, temp2);
2775 EmitBranch(instr, eq);
2779 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2780 Register reg = ToRegister(instr->value());
2781 Register temp = ToRegister(instr->temp());
2783 __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2784 __ Cmpi(temp, Operand(instr->map()), r0);
2785 EmitBranch(instr, eq);
2789 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2790 DCHECK(ToRegister(instr->context()).is(cp));
2791 DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
2792 DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
2794 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2795 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2797 if (CpuFeatures::IsSupported(ISELECT)) {
2798 __ mov(r4, Operand(factory()->true_value()));
2799 __ mov(r5, Operand(factory()->false_value()));
2800 __ cmpi(r3, Operand::Zero());
2801 __ isel(eq, r3, r4, r5);
2804 __ cmpi(r3, Operand::Zero());
2806 __ mov(r3, Operand(factory()->false_value()));
2810 __ mov(r3, Operand(factory()->true_value()));
2816 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2817 class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
2819 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2820 LInstanceOfKnownGlobal* instr)
2821 : LDeferredCode(codegen), instr_(instr) {}
2822 void Generate() override {
2823 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
2826 LInstruction* instr() override { return instr_; }
2827 Label* map_check() { return &map_check_; }
2828 Label* load_bool() { return &load_bool_; }
2831 LInstanceOfKnownGlobal* instr_;
2836 DeferredInstanceOfKnownGlobal* deferred;
2837 deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr);
2839 Label done, false_result;
2840 Register object = ToRegister(instr->value());
2841 Register temp = ToRegister(instr->temp());
2842 Register result = ToRegister(instr->result());
2844 // A Smi is not instance of anything.
2845 __ JumpIfSmi(object, &false_result);
2847 // This is the inlined call site instanceof cache. The two occurences of the
2848 // hole value will be patched to the last map/result pair generated by the
2851 Register map = temp;
2852 __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
2854 // Block trampoline emission to ensure the positions of instructions are
2855 // as expected by the patcher. See InstanceofStub::Generate().
2856 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2857 __ bind(deferred->map_check()); // Label for calculating code patching.
2858 // We use Factory::the_hole_value() on purpose instead of loading from the
2859 // root array to force relocation to be able to later patch with
2861 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2862 __ mov(ip, Operand(cell));
2863 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
2865 __ bc_short(ne, &cache_miss);
2866 __ bind(deferred->load_bool()); // Label for calculating code patching.
2867 // We use Factory::the_hole_value() on purpose instead of loading from the
2868 // root array to force relocation to be able to later patch
2869 // with true or false.
2870 __ mov(result, Operand(factory()->the_hole_value()));
2874 // The inlined call site cache did not match. Check null and string before
2875 // calling the deferred code.
2876 __ bind(&cache_miss);
2877 // Null is not instance of anything.
2878 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2880 __ beq(&false_result);
2882 // String values is not instance of anything.
2883 Condition is_string = masm_->IsObjectStringType(object, temp);
2884 __ b(is_string, &false_result, cr0);
2886 // Go to the deferred code.
2887 __ b(deferred->entry());
2889 __ bind(&false_result);
2890 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2892 // Here result has either true or false. Deferred code also produces true or
2894 __ bind(deferred->exit());
2899 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2902 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2903 flags = static_cast<InstanceofStub::Flags>(flags |
2904 InstanceofStub::kArgsInRegisters);
2905 flags = static_cast<InstanceofStub::Flags>(
2906 flags | InstanceofStub::kCallSiteInlineCheck);
2907 flags = static_cast<InstanceofStub::Flags>(
2908 flags | InstanceofStub::kReturnTrueFalseObject);
2909 InstanceofStub stub(isolate(), flags);
2911 PushSafepointRegistersScope scope(this);
2912 LoadContextFromDeferred(instr->context());
2914 __ Move(InstanceofStub::right(), instr->function());
2916 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2917 Handle<Code> code = stub.GetCode();
2918 // Include instructions below in delta: bitwise_mov32 + li + call
2919 int additional_delta = 3 * Instruction::kInstrSize + masm_->CallSize(code);
2920 // The labels must be already bound since the code has predictabel size up
2921 // to the call instruction.
2922 DCHECK(map_check->is_bound());
2923 DCHECK(bool_load->is_bound());
2924 int map_check_delta =
2925 masm_->InstructionsGeneratedSince(map_check) * Instruction::kInstrSize;
2926 int bool_load_delta =
2927 masm_->InstructionsGeneratedSince(bool_load) * Instruction::kInstrSize;
2928 // r8 is the delta from our callee's lr to the location of the map check.
2929 __ bitwise_mov32(r8, map_check_delta + additional_delta);
2930 // r9 is the delta from map check to bool load.
2931 __ li(r9, Operand(map_check_delta - bool_load_delta));
2932 CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr,
2933 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2934 DCHECK_EQ((map_check_delta + additional_delta) / Instruction::kInstrSize,
2935 masm_->InstructionsGeneratedSince(map_check));
2937 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2938 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2939 // Put the result value (r3) into the result register slot and
2940 // restore all registers.
2941 __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
2945 void LCodeGen::DoCmpT(LCmpT* instr) {
2946 DCHECK(ToRegister(instr->context()).is(cp));
2947 Token::Value op = instr->op();
2950 CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
2951 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2952 // This instruction also signals no smi code inlined
2953 __ cmpi(r3, Operand::Zero());
2955 Condition condition = ComputeCompareCondition(op);
2956 if (CpuFeatures::IsSupported(ISELECT)) {
2957 __ LoadRoot(r4, Heap::kTrueValueRootIndex);
2958 __ LoadRoot(r5, Heap::kFalseValueRootIndex);
2959 __ isel(condition, ToRegister(instr->result()), r4, r5);
2961 Label true_value, done;
2963 __ b(condition, &true_value);
2965 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2968 __ bind(&true_value);
2969 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2976 void LCodeGen::DoReturn(LReturn* instr) {
2977 if (FLAG_trace && info()->IsOptimizing()) {
2978 // Push the return value on the stack as the parameter.
2979 // Runtime::TraceExit returns its parameter in r3. We're leaving the code
2980 // managed by the register allocator and tearing down the frame, it's
2981 // safe to write to the context register.
2983 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2984 __ CallRuntime(Runtime::kTraceExit, 1);
2986 if (info()->saves_caller_doubles()) {
2987 RestoreCallerDoubles();
2989 int no_frame_start = -1;
2990 if (instr->has_constant_parameter_count()) {
2991 int parameter_count = ToInteger32(instr->constant_parameter_count());
2992 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2993 if (NeedsEagerFrame()) {
2994 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
2995 } else if (sp_delta != 0) {
2996 __ addi(sp, sp, Operand(sp_delta));
2999 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
3000 Register reg = ToRegister(instr->parameter_count());
3001 // The argument count parameter is a smi
3002 if (NeedsEagerFrame()) {
3003 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
3005 __ SmiToPtrArrayOffset(r0, reg);
3011 if (no_frame_start != -1) {
3012 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
3018 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
3019 Register vector_register = ToRegister(instr->temp_vector());
3020 Register slot_register = LoadDescriptor::SlotRegister();
3021 DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
3022 DCHECK(slot_register.is(r3));
3024 AllowDeferredHandleDereference vector_structure_check;
3025 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3026 __ Move(vector_register, vector);
3027 // No need to allocate this register.
3028 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
3029 int index = vector->GetIndex(slot);
3030 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
3035 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
3036 Register vector_register = ToRegister(instr->temp_vector());
3037 Register slot_register = ToRegister(instr->temp_slot());
3039 AllowDeferredHandleDereference vector_structure_check;
3040 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3041 __ Move(vector_register, vector);
3042 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
3043 int index = vector->GetIndex(slot);
3044 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
3048 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3049 DCHECK(ToRegister(instr->context()).is(cp));
3050 DCHECK(ToRegister(instr->global_object())
3051 .is(LoadDescriptor::ReceiverRegister()));
3052 DCHECK(ToRegister(instr->result()).is(r3));
3054 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3055 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3057 CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
3058 SLOPPY, PREMONOMORPHIC).code();
3059 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3063 void LCodeGen::DoLoadGlobalViaContext(LLoadGlobalViaContext* instr) {
3064 DCHECK(ToRegister(instr->context()).is(cp));
3065 DCHECK(ToRegister(instr->result()).is(r3));
3067 int const slot = instr->slot_index();
3068 int const depth = instr->depth();
3069 if (depth <= LoadGlobalViaContextStub::kMaximumDepth) {
3070 __ mov(LoadGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
3071 __ mov(LoadGlobalViaContextDescriptor::NameRegister(),
3072 Operand(instr->name()));
3074 CodeFactory::LoadGlobalViaContext(isolate(), depth).code();
3075 CallCode(stub, RelocInfo::CODE_TARGET, instr);
3077 __ Push(Smi::FromInt(slot));
3078 __ Push(instr->name());
3079 __ CallRuntime(Runtime::kLoadGlobalViaContext, 2);
3084 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3085 Register context = ToRegister(instr->context());
3086 Register result = ToRegister(instr->result());
3087 __ LoadP(result, ContextOperand(context, instr->slot_index()));
3088 if (instr->hydrogen()->RequiresHoleCheck()) {
3089 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3090 if (instr->hydrogen()->DeoptimizesOnHole()) {
3092 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3094 if (CpuFeatures::IsSupported(ISELECT)) {
3095 Register scratch = scratch0();
3096 __ mov(scratch, Operand(factory()->undefined_value()));
3098 __ isel(eq, result, scratch, result);
3103 __ mov(result, Operand(factory()->undefined_value()));
3111 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3112 Register context = ToRegister(instr->context());
3113 Register value = ToRegister(instr->value());
3114 Register scratch = scratch0();
3115 MemOperand target = ContextOperand(context, instr->slot_index());
3117 Label skip_assignment;
3119 if (instr->hydrogen()->RequiresHoleCheck()) {
3120 __ LoadP(scratch, target);
3121 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3122 __ cmp(scratch, ip);
3123 if (instr->hydrogen()->DeoptimizesOnHole()) {
3124 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3126 __ bne(&skip_assignment);
3130 __ StoreP(value, target, r0);
3131 if (instr->hydrogen()->NeedsWriteBarrier()) {
3132 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
3135 __ RecordWriteContextSlot(context, target.offset(), value, scratch,
3136 GetLinkRegisterState(), kSaveFPRegs,
3137 EMIT_REMEMBERED_SET, check_needed);
3140 __ bind(&skip_assignment);
3144 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3145 HObjectAccess access = instr->hydrogen()->access();
3146 int offset = access.offset();
3147 Register object = ToRegister(instr->object());
3149 if (access.IsExternalMemory()) {
3150 Register result = ToRegister(instr->result());
3151 MemOperand operand = MemOperand(object, offset);
3152 __ LoadRepresentation(result, operand, access.representation(), r0);
3156 if (instr->hydrogen()->representation().IsDouble()) {
3157 DCHECK(access.IsInobject());
3158 DoubleRegister result = ToDoubleRegister(instr->result());
3159 __ lfd(result, FieldMemOperand(object, offset));
3163 Register result = ToRegister(instr->result());
3164 if (!access.IsInobject()) {
3165 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3169 Representation representation = access.representation();
3171 #if V8_TARGET_ARCH_PPC64
3172 // 64-bit Smi optimization
3173 if (representation.IsSmi() &&
3174 instr->hydrogen()->representation().IsInteger32()) {
3175 // Read int value directly from upper half of the smi.
3176 offset = SmiWordOffset(offset);
3177 representation = Representation::Integer32();
3181 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
3186 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3187 DCHECK(ToRegister(instr->context()).is(cp));
3188 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3189 DCHECK(ToRegister(instr->result()).is(r3));
3191 // Name is always in r5.
3192 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3193 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3195 CodeFactory::LoadICInOptimizedCode(
3196 isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
3197 instr->hydrogen()->initialization_state()).code();
3198 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3202 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3203 Register scratch = scratch0();
3204 Register function = ToRegister(instr->function());
3205 Register result = ToRegister(instr->result());
3207 // Get the prototype or initial map from the function.
3209 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3211 // Check that the function has a prototype or an initial map.
3212 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3214 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3216 // If the function does not have an initial map, we're done.
3217 if (CpuFeatures::IsSupported(ISELECT)) {
3218 // Get the prototype from the initial map (optimistic).
3219 __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
3220 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3221 __ isel(eq, result, ip, result);
3224 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3227 // Get the prototype from the initial map.
3228 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
3236 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3237 Register result = ToRegister(instr->result());
3238 __ LoadRoot(result, instr->index());
3242 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3243 Register arguments = ToRegister(instr->arguments());
3244 Register result = ToRegister(instr->result());
3245 // There are two words between the frame pointer and the last argument.
3246 // Subtracting from length accounts for one of them add one more.
3247 if (instr->length()->IsConstantOperand()) {
3248 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3249 if (instr->index()->IsConstantOperand()) {
3250 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3251 int index = (const_length - const_index) + 1;
3252 __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
3254 Register index = ToRegister(instr->index());
3255 __ subfic(result, index, Operand(const_length + 1));
3256 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3257 __ LoadPX(result, MemOperand(arguments, result));
3259 } else if (instr->index()->IsConstantOperand()) {
3260 Register length = ToRegister(instr->length());
3261 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3262 int loc = const_index - 1;
3264 __ subi(result, length, Operand(loc));
3265 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3266 __ LoadPX(result, MemOperand(arguments, result));
3268 __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
3269 __ LoadPX(result, MemOperand(arguments, result));
3272 Register length = ToRegister(instr->length());
3273 Register index = ToRegister(instr->index());
3274 __ sub(result, length, index);
3275 __ addi(result, result, Operand(1));
3276 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3277 __ LoadPX(result, MemOperand(arguments, result));
3282 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3283 Register external_pointer = ToRegister(instr->elements());
3284 Register key = no_reg;
3285 ElementsKind elements_kind = instr->elements_kind();
3286 bool key_is_constant = instr->key()->IsConstantOperand();
3287 int constant_key = 0;
3288 if (key_is_constant) {
3289 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3290 if (constant_key & 0xF0000000) {
3291 Abort(kArrayIndexConstantValueTooBig);
3294 key = ToRegister(instr->key());
3296 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3297 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3298 int base_offset = instr->base_offset();
3300 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3301 elements_kind == FLOAT32_ELEMENTS ||
3302 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3303 elements_kind == FLOAT64_ELEMENTS) {
3304 DoubleRegister result = ToDoubleRegister(instr->result());
3305 if (key_is_constant) {
3306 __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
3309 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3310 __ add(scratch0(), external_pointer, r0);
3312 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3313 elements_kind == FLOAT32_ELEMENTS) {
3314 __ lfs(result, MemOperand(scratch0(), base_offset));
3315 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3316 __ lfd(result, MemOperand(scratch0(), base_offset));
3319 Register result = ToRegister(instr->result());
3320 MemOperand mem_operand =
3321 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
3322 constant_key, element_size_shift, base_offset);
3323 switch (elements_kind) {
3324 case EXTERNAL_INT8_ELEMENTS:
3326 if (key_is_constant) {
3327 __ LoadByte(result, mem_operand, r0);
3329 __ lbzx(result, mem_operand);
3331 __ extsb(result, result);
3333 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3334 case EXTERNAL_UINT8_ELEMENTS:
3335 case UINT8_ELEMENTS:
3336 case UINT8_CLAMPED_ELEMENTS:
3337 if (key_is_constant) {
3338 __ LoadByte(result, mem_operand, r0);
3340 __ lbzx(result, mem_operand);
3343 case EXTERNAL_INT16_ELEMENTS:
3344 case INT16_ELEMENTS:
3345 if (key_is_constant) {
3346 __ LoadHalfWordArith(result, mem_operand, r0);
3348 __ lhax(result, mem_operand);
3351 case EXTERNAL_UINT16_ELEMENTS:
3352 case UINT16_ELEMENTS:
3353 if (key_is_constant) {
3354 __ LoadHalfWord(result, mem_operand, r0);
3356 __ lhzx(result, mem_operand);
3359 case EXTERNAL_INT32_ELEMENTS:
3360 case INT32_ELEMENTS:
3361 if (key_is_constant) {
3362 __ LoadWordArith(result, mem_operand, r0);
3364 __ lwax(result, mem_operand);
3367 case EXTERNAL_UINT32_ELEMENTS:
3368 case UINT32_ELEMENTS:
3369 if (key_is_constant) {
3370 __ LoadWord(result, mem_operand, r0);
3372 __ lwzx(result, mem_operand);
3374 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3375 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3376 __ cmplw(result, r0);
3377 DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
3380 case FLOAT32_ELEMENTS:
3381 case FLOAT64_ELEMENTS:
3382 case EXTERNAL_FLOAT32_ELEMENTS:
3383 case EXTERNAL_FLOAT64_ELEMENTS:
3384 case FAST_HOLEY_DOUBLE_ELEMENTS:
3385 case FAST_HOLEY_ELEMENTS:
3386 case FAST_HOLEY_SMI_ELEMENTS:
3387 case FAST_DOUBLE_ELEMENTS:
3389 case FAST_SMI_ELEMENTS:
3390 case DICTIONARY_ELEMENTS:
3391 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
3392 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
3400 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3401 Register elements = ToRegister(instr->elements());
3402 bool key_is_constant = instr->key()->IsConstantOperand();
3403 Register key = no_reg;
3404 DoubleRegister result = ToDoubleRegister(instr->result());
3405 Register scratch = scratch0();
3407 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3408 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3409 int constant_key = 0;
3410 if (key_is_constant) {
3411 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3412 if (constant_key & 0xF0000000) {
3413 Abort(kArrayIndexConstantValueTooBig);
3416 key = ToRegister(instr->key());
3419 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
3420 if (!key_is_constant) {
3421 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3422 __ add(scratch, elements, r0);
3425 if (!is_int16(base_offset)) {
3426 __ Add(scratch, elements, base_offset, r0);
3430 __ lfd(result, MemOperand(elements, base_offset));
3432 if (instr->hydrogen()->RequiresHoleCheck()) {
3433 if (is_int16(base_offset + Register::kExponentOffset)) {
3435 MemOperand(elements, base_offset + Register::kExponentOffset));
3437 __ addi(scratch, elements, Operand(base_offset));
3438 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
3440 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
3441 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3446 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3447 HLoadKeyed* hinstr = instr->hydrogen();
3448 Register elements = ToRegister(instr->elements());
3449 Register result = ToRegister(instr->result());
3450 Register scratch = scratch0();
3451 Register store_base = scratch;
3452 int offset = instr->base_offset();
3454 if (instr->key()->IsConstantOperand()) {
3455 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3456 offset += ToInteger32(const_operand) * kPointerSize;
3457 store_base = elements;
3459 Register key = ToRegister(instr->key());
3460 // Even though the HLoadKeyed instruction forces the input
3461 // representation for the key to be an integer, the input gets replaced
3462 // during bound check elimination with the index argument to the bounds
3463 // check, which can be tagged, so that case must be handled here, too.
3464 if (hinstr->key()->representation().IsSmi()) {
3465 __ SmiToPtrArrayOffset(r0, key);
3467 __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
3469 __ add(scratch, elements, r0);
3472 bool requires_hole_check = hinstr->RequiresHoleCheck();
3473 Representation representation = hinstr->representation();
3475 #if V8_TARGET_ARCH_PPC64
3476 // 64-bit Smi optimization
3477 if (representation.IsInteger32() &&
3478 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3479 DCHECK(!requires_hole_check);
3480 // Read int value directly from upper half of the smi.
3481 offset = SmiWordOffset(offset);
3485 __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
3488 // Check for the hole value.
3489 if (requires_hole_check) {
3490 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3491 __ TestIfSmi(result, r0);
3492 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
3494 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3495 __ cmp(result, scratch);
3496 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3498 } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
3499 DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
3501 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3502 __ cmp(result, scratch);
3504 if (info()->IsStub()) {
3505 // A stub can safely convert the hole to undefined only if the array
3506 // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
3507 // it needs to bail out.
3508 __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
3509 __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
3510 __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
3511 DeoptimizeIf(ne, instr, Deoptimizer::kHole);
3513 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
3519 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3520 if (instr->is_typed_elements()) {
3521 DoLoadKeyedExternalArray(instr);
3522 } else if (instr->hydrogen()->representation().IsDouble()) {
3523 DoLoadKeyedFixedDoubleArray(instr);
3525 DoLoadKeyedFixedArray(instr);
3530 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
3531 bool key_is_constant, bool key_is_smi,
3533 int element_size_shift,
3535 Register scratch = scratch0();
3537 if (key_is_constant) {
3538 return MemOperand(base, (constant_key << element_size_shift) + base_offset);
3542 (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
3544 if (!(base_offset || needs_shift)) {
3545 return MemOperand(base, key);
3549 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
3554 __ Add(scratch, key, base_offset, r0);
3557 return MemOperand(base, scratch);
3561 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3562 DCHECK(ToRegister(instr->context()).is(cp));
3563 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3564 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3566 if (instr->hydrogen()->HasVectorAndSlot()) {
3567 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3570 Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
3571 isolate(), instr->hydrogen()->language_mode(),
3572 instr->hydrogen()->initialization_state()).code();
3573 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3577 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3578 Register scratch = scratch0();
3579 Register result = ToRegister(instr->result());
3581 if (instr->hydrogen()->from_inlined()) {
3582 __ subi(result, sp, Operand(2 * kPointerSize));
3584 // Check if the calling frame is an arguments adaptor frame.
3585 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3587 MemOperand(scratch, StandardFrameConstants::kContextOffset));
3588 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3590 // Result is the frame pointer for the frame if not adapted and for the real
3591 // frame below the adaptor frame if adapted.
3592 if (CpuFeatures::IsSupported(ISELECT)) {
3593 __ isel(eq, result, scratch, fp);
3595 Label done, adapted;
3601 __ mr(result, scratch);
3608 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3609 Register elem = ToRegister(instr->elements());
3610 Register result = ToRegister(instr->result());
3614 // If no arguments adaptor frame the number of arguments is fixed.
3616 __ mov(result, Operand(scope()->num_parameters()));
3619 // Arguments adaptor frame present. Get argument length from there.
3620 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3622 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3623 __ SmiUntag(result);
3625 // Argument length is in result register.
3630 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3631 Register receiver = ToRegister(instr->receiver());
3632 Register function = ToRegister(instr->function());
3633 Register result = ToRegister(instr->result());
3634 Register scratch = scratch0();
3636 // If the receiver is null or undefined, we have to pass the global
3637 // object as a receiver to normal functions. Values have to be
3638 // passed unchanged to builtins and strict-mode functions.
3639 Label global_object, result_in_receiver;
3641 if (!instr->hydrogen()->known_function()) {
3642 // Do not transform the receiver to object for strict mode
3645 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3647 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3649 #if V8_TARGET_ARCH_PPC64
3650 SharedFunctionInfo::kStrictModeFunction,
3652 SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
3655 __ bne(&result_in_receiver, cr0);
3657 // Do not transform the receiver to object for builtins.
3659 #if V8_TARGET_ARCH_PPC64
3660 SharedFunctionInfo::kNative,
3662 SharedFunctionInfo::kNative + kSmiTagSize,
3665 __ bne(&result_in_receiver, cr0);
3668 // Normal function. Replace undefined or null with global receiver.
3669 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3670 __ cmp(receiver, scratch);
3671 __ beq(&global_object);
3672 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3673 __ cmp(receiver, scratch);
3674 __ beq(&global_object);
3676 // Deoptimize if the receiver is not a JS object.
3677 __ TestIfSmi(receiver, r0);
3678 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
3679 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3680 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3682 __ b(&result_in_receiver);
3683 __ bind(&global_object);
3684 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3685 __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3686 __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3687 if (result.is(receiver)) {
3688 __ bind(&result_in_receiver);
3692 __ bind(&result_in_receiver);
3693 __ mr(result, receiver);
3694 __ bind(&result_ok);
3699 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3700 Register receiver = ToRegister(instr->receiver());
3701 Register function = ToRegister(instr->function());
3702 Register length = ToRegister(instr->length());
3703 Register elements = ToRegister(instr->elements());
3704 Register scratch = scratch0();
3705 DCHECK(receiver.is(r3)); // Used for parameter count.
3706 DCHECK(function.is(r4)); // Required by InvokeFunction.
3707 DCHECK(ToRegister(instr->result()).is(r3));
3709 // Copy the arguments to this function possibly from the
3710 // adaptor frame below it.
3711 const uint32_t kArgumentsLimit = 1 * KB;
3712 __ cmpli(length, Operand(kArgumentsLimit));
3713 DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
3715 // Push the receiver and use the register to keep the original
3716 // number of arguments.
3718 __ mr(receiver, length);
3719 // The arguments are at a one pointer size offset from elements.
3720 __ addi(elements, elements, Operand(1 * kPointerSize));
3722 // Loop through the arguments pushing them onto the execution
3725 // length is a small non-negative integer, due to the test above.
3726 __ cmpi(length, Operand::Zero());
3730 __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
3731 __ LoadPX(scratch, MemOperand(elements, r0));
3733 __ addi(length, length, Operand(-1));
3737 DCHECK(instr->HasPointerMap());
3738 LPointerMap* pointers = instr->pointer_map();
3739 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3740 // The number of arguments is stored in receiver which is r3, as expected
3741 // by InvokeFunction.
3742 ParameterCount actual(receiver);
3743 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3747 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3748 LOperand* argument = instr->value();
3749 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3750 Abort(kDoPushArgumentNotImplementedForDoubleType);
3752 Register argument_reg = EmitLoadRegister(argument, ip);
3753 __ push(argument_reg);
3758 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
3761 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3762 Register result = ToRegister(instr->result());
3763 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3767 void LCodeGen::DoContext(LContext* instr) {
3768 // If there is a non-return use, the context must be moved to a register.
3769 Register result = ToRegister(instr->result());
3770 if (info()->IsOptimizing()) {
3771 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3773 // If there is no frame, the context must be in cp.
3774 DCHECK(result.is(cp));
3779 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3780 DCHECK(ToRegister(instr->context()).is(cp));
3781 __ push(cp); // The context is the first argument.
3782 __ Move(scratch0(), instr->hydrogen()->pairs());
3783 __ push(scratch0());
3784 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3785 __ push(scratch0());
3786 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3790 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3791 int formal_parameter_count, int arity,
3792 LInstruction* instr) {
3793 bool dont_adapt_arguments =
3794 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3795 bool can_invoke_directly =
3796 dont_adapt_arguments || formal_parameter_count == arity;
3798 Register function_reg = r4;
3800 LPointerMap* pointers = instr->pointer_map();
3802 if (can_invoke_directly) {
3804 __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3806 // Set r3 to arguments count if adaption is not needed. Assumes that r3
3807 // is available to write to at this point.
3808 if (dont_adapt_arguments) {
3809 __ mov(r3, Operand(arity));
3812 bool is_self_call = function.is_identical_to(info()->closure());
3818 __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3822 // Set up deoptimization.
3823 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3825 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3826 ParameterCount count(arity);
3827 ParameterCount expected(formal_parameter_count);
3828 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3833 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3834 DCHECK(instr->context() != NULL);
3835 DCHECK(ToRegister(instr->context()).is(cp));
3836 Register input = ToRegister(instr->value());
3837 Register result = ToRegister(instr->result());
3838 Register scratch = scratch0();
3840 // Deoptimize if not a heap number.
3841 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3842 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3843 __ cmp(scratch, ip);
3844 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3847 Register exponent = scratch0();
3849 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3850 // Check the sign of the argument. If the argument is positive, just
3852 __ cmpwi(exponent, Operand::Zero());
3853 // Move the input to the result if necessary.
3854 __ Move(result, input);
3857 // Input is negative. Reverse its sign.
3858 // Preserve the value of all registers.
3860 PushSafepointRegistersScope scope(this);
3862 // Registers were saved at the safepoint, so we can use
3863 // many scratch registers.
3864 Register tmp1 = input.is(r4) ? r3 : r4;
3865 Register tmp2 = input.is(r5) ? r3 : r5;
3866 Register tmp3 = input.is(r6) ? r3 : r6;
3867 Register tmp4 = input.is(r7) ? r3 : r7;
3869 // exponent: floating point exponent value.
3871 Label allocated, slow;
3872 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3873 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3876 // Slow case: Call the runtime system to do the number allocation.
3879 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3881 // Set the pointer to the new heap number in tmp.
3882 if (!tmp1.is(r3)) __ mr(tmp1, r3);
3883 // Restore input_reg after call to runtime.
3884 __ LoadFromSafepointRegisterSlot(input, input);
3885 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3887 __ bind(&allocated);
3888 // exponent: floating point exponent value.
3889 // tmp1: allocated heap number.
3890 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
3891 __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit
3892 __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3893 __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3894 __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3896 __ StoreToSafepointRegisterSlot(tmp1, result);
3903 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3904 Register input = ToRegister(instr->value());
3905 Register result = ToRegister(instr->result());
3907 __ cmpi(input, Operand::Zero());
3908 __ Move(result, input);
3910 __ li(r0, Operand::Zero()); // clear xer
3912 __ neg(result, result, SetOE, SetRC);
3913 // Deoptimize on overflow.
3914 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
3919 #if V8_TARGET_ARCH_PPC64
3920 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3921 Register input = ToRegister(instr->value());
3922 Register result = ToRegister(instr->result());
3924 __ cmpwi(input, Operand::Zero());
3925 __ Move(result, input);
3928 // Deoptimize on overflow.
3929 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3931 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
3933 __ neg(result, result);
3939 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3940 // Class for deferred case.
3941 class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
3943 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3944 : LDeferredCode(codegen), instr_(instr) {}
3945 void Generate() override {
3946 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3948 LInstruction* instr() override { return instr_; }
3954 Representation r = instr->hydrogen()->value()->representation();
3956 DoubleRegister input = ToDoubleRegister(instr->value());
3957 DoubleRegister result = ToDoubleRegister(instr->result());
3958 __ fabs(result, input);
3959 #if V8_TARGET_ARCH_PPC64
3960 } else if (r.IsInteger32()) {
3961 EmitInteger32MathAbs(instr);
3962 } else if (r.IsSmi()) {
3964 } else if (r.IsSmiOrInteger32()) {
3968 // Representation is tagged.
3969 DeferredMathAbsTaggedHeapNumber* deferred =
3970 new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3971 Register input = ToRegister(instr->value());
3973 __ JumpIfNotSmi(input, deferred->entry());
3974 // If smi, handle it directly.
3976 __ bind(deferred->exit());
3981 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3982 DoubleRegister input = ToDoubleRegister(instr->value());
3983 Register result = ToRegister(instr->result());
3984 Register input_high = scratch0();
3985 Register scratch = ip;
3988 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
3990 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3993 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3995 __ cmpi(result, Operand::Zero());
3997 __ cmpwi(input_high, Operand::Zero());
3998 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
4004 void LCodeGen::DoMathRound(LMathRound* instr) {
4005 DoubleRegister input = ToDoubleRegister(instr->value());
4006 Register result = ToRegister(instr->result());
4007 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
4008 DoubleRegister input_plus_dot_five = double_scratch1;
4009 Register scratch1 = scratch0();
4010 Register scratch2 = ip;
4011 DoubleRegister dot_five = double_scratch0();
4012 Label convert, done;
4014 __ LoadDoubleLiteral(dot_five, 0.5, r0);
4015 __ fabs(double_scratch1, input);
4016 __ fcmpu(double_scratch1, dot_five);
4017 DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
4018 // If input is in [-0.5, -0], the result is -0.
4019 // If input is in [+0, +0.5[, the result is +0.
4020 // If the input is +0.5, the result is 1.
4021 __ bgt(&convert); // Out of [-0.5, +0.5].
4022 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4023 #if V8_TARGET_ARCH_PPC64
4024 __ MovDoubleToInt64(scratch1, input);
4026 __ MovDoubleHighToInt(scratch1, input);
4028 __ cmpi(scratch1, Operand::Zero());
4030 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
4032 __ fcmpu(input, dot_five);
4033 if (CpuFeatures::IsSupported(ISELECT)) {
4034 __ li(result, Operand(1));
4035 __ isel(lt, result, r0, result);
4039 __ bne(&return_zero);
4040 __ li(result, Operand(1)); // +0.5.
4042 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
4043 // flag kBailoutOnMinusZero.
4044 __ bind(&return_zero);
4045 __ li(result, Operand::Zero());
4050 __ fadd(input_plus_dot_five, input, dot_five);
4051 // Reuse dot_five (double_scratch0) as we no longer need this value.
4052 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
4053 double_scratch0(), &done, &done);
4054 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
4059 void LCodeGen::DoMathFround(LMathFround* instr) {
4060 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4061 DoubleRegister output_reg = ToDoubleRegister(instr->result());
4062 __ frsp(output_reg, input_reg);
4066 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4067 DoubleRegister input = ToDoubleRegister(instr->value());
4068 DoubleRegister result = ToDoubleRegister(instr->result());
4069 __ fsqrt(result, input);
4073 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4074 DoubleRegister input = ToDoubleRegister(instr->value());
4075 DoubleRegister result = ToDoubleRegister(instr->result());
4076 DoubleRegister temp = double_scratch0();
4078 // Note that according to ECMA-262 15.8.2.13:
4079 // Math.pow(-Infinity, 0.5) == Infinity
4080 // Math.sqrt(-Infinity) == NaN
4083 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
4084 __ fcmpu(input, temp);
4086 __ fneg(result, temp);
4089 // Add +0 to convert -0 to +0.
4091 __ fadd(result, input, kDoubleRegZero);
4092 __ fsqrt(result, result);
4097 void LCodeGen::DoPower(LPower* instr) {
4098 Representation exponent_type = instr->hydrogen()->right()->representation();
4099 // Having marked this as a call, we can use any registers.
4100 // Just make sure that the input/output registers are the expected ones.
4101 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
4102 DCHECK(!instr->right()->IsDoubleRegister() ||
4103 ToDoubleRegister(instr->right()).is(d2));
4104 DCHECK(!instr->right()->IsRegister() ||
4105 ToRegister(instr->right()).is(tagged_exponent));
4106 DCHECK(ToDoubleRegister(instr->left()).is(d1));
4107 DCHECK(ToDoubleRegister(instr->result()).is(d3));
4109 if (exponent_type.IsSmi()) {
4110 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4112 } else if (exponent_type.IsTagged()) {
4114 __ JumpIfSmi(tagged_exponent, &no_deopt);
4115 DCHECK(!r10.is(tagged_exponent));
4116 __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
4117 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4119 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4121 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4123 } else if (exponent_type.IsInteger32()) {
4124 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4127 DCHECK(exponent_type.IsDouble());
4128 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4134 void LCodeGen::DoMathExp(LMathExp* instr) {
4135 DoubleRegister input = ToDoubleRegister(instr->value());
4136 DoubleRegister result = ToDoubleRegister(instr->result());
4137 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
4138 DoubleRegister double_scratch2 = double_scratch0();
4139 Register temp1 = ToRegister(instr->temp1());
4140 Register temp2 = ToRegister(instr->temp2());
4142 MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
4143 double_scratch2, temp1, temp2, scratch0());
4147 void LCodeGen::DoMathLog(LMathLog* instr) {
4148 __ PrepareCallCFunction(0, 1, scratch0());
4149 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
4150 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
4152 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
4156 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4157 Register input = ToRegister(instr->value());
4158 Register result = ToRegister(instr->result());
4159 __ cntlzw_(result, input);
4163 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4164 DCHECK(ToRegister(instr->context()).is(cp));
4165 DCHECK(ToRegister(instr->function()).is(r4));
4166 DCHECK(instr->HasPointerMap());
4168 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4169 if (known_function.is_null()) {
4170 LPointerMap* pointers = instr->pointer_map();
4171 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4172 ParameterCount count(instr->arity());
4173 __ InvokeFunction(r4, count, CALL_FUNCTION, generator);
4175 CallKnownFunction(known_function,
4176 instr->hydrogen()->formal_parameter_count(),
4177 instr->arity(), instr);
4182 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
4183 DCHECK(ToRegister(instr->result()).is(r3));
4185 if (instr->hydrogen()->IsTailCall()) {
4186 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
4188 if (instr->target()->IsConstantOperand()) {
4189 LConstantOperand* target = LConstantOperand::cast(instr->target());
4190 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4191 __ Jump(code, RelocInfo::CODE_TARGET);
4193 DCHECK(instr->target()->IsRegister());
4194 Register target = ToRegister(instr->target());
4195 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4196 __ JumpToJSEntry(ip);
4199 LPointerMap* pointers = instr->pointer_map();
4200 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4202 if (instr->target()->IsConstantOperand()) {
4203 LConstantOperand* target = LConstantOperand::cast(instr->target());
4204 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4205 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4206 __ Call(code, RelocInfo::CODE_TARGET);
4208 DCHECK(instr->target()->IsRegister());
4209 Register target = ToRegister(instr->target());
4210 generator.BeforeCall(__ CallSize(target));
4211 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4214 generator.AfterCall();
4219 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4220 DCHECK(ToRegister(instr->function()).is(r4));
4221 DCHECK(ToRegister(instr->result()).is(r3));
4223 if (instr->hydrogen()->pass_argument_count()) {
4224 __ mov(r3, Operand(instr->arity()));
4228 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
4230 bool is_self_call = false;
4231 if (instr->hydrogen()->function()->IsConstant()) {
4232 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
4233 Handle<JSFunction> jsfun =
4234 Handle<JSFunction>::cast(fun_const->handle(isolate()));
4235 is_self_call = jsfun.is_identical_to(info()->closure());
4241 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
4245 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4249 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4250 DCHECK(ToRegister(instr->context()).is(cp));
4251 DCHECK(ToRegister(instr->function()).is(r4));
4252 DCHECK(ToRegister(instr->result()).is(r3));
4254 int arity = instr->arity();
4255 CallFunctionFlags flags = instr->hydrogen()->function_flags();
4256 if (instr->hydrogen()->HasVectorAndSlot()) {
4257 Register slot_register = ToRegister(instr->temp_slot());
4258 Register vector_register = ToRegister(instr->temp_vector());
4259 DCHECK(slot_register.is(r6));
4260 DCHECK(vector_register.is(r5));
4262 AllowDeferredHandleDereference vector_structure_check;
4263 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
4264 int index = vector->GetIndex(instr->hydrogen()->slot());
4266 __ Move(vector_register, vector);
4267 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
4269 CallICState::CallType call_type =
4270 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
4273 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
4274 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4276 CallFunctionStub stub(isolate(), arity, flags);
4277 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4282 void LCodeGen::DoCallNew(LCallNew* instr) {
4283 DCHECK(ToRegister(instr->context()).is(cp));
4284 DCHECK(ToRegister(instr->constructor()).is(r4));
4285 DCHECK(ToRegister(instr->result()).is(r3));
4287 __ mov(r3, Operand(instr->arity()));
4288 // No cell in r5 for construct type feedback in optimized code
4289 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4290 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4291 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4295 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4296 DCHECK(ToRegister(instr->context()).is(cp));
4297 DCHECK(ToRegister(instr->constructor()).is(r4));
4298 DCHECK(ToRegister(instr->result()).is(r3));
4300 __ mov(r3, Operand(instr->arity()));
4301 if (instr->arity() == 1) {
4302 // We only need the allocation site for the case we have a length argument.
4303 // The case may bail out to the runtime, which will determine the correct
4304 // elements kind with the site.
4305 __ Move(r5, instr->hydrogen()->site());
4307 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4309 ElementsKind kind = instr->hydrogen()->elements_kind();
4310 AllocationSiteOverrideMode override_mode =
4311 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4312 ? DISABLE_ALLOCATION_SITES
4315 if (instr->arity() == 0) {
4316 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4317 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4318 } else if (instr->arity() == 1) {
4320 if (IsFastPackedElementsKind(kind)) {
4322 // We might need a change here
4323 // look at the first argument
4324 __ LoadP(r8, MemOperand(sp, 0));
4325 __ cmpi(r8, Operand::Zero());
4326 __ beq(&packed_case);
4328 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4329 ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
4331 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4333 __ bind(&packed_case);
4336 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4337 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4340 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4341 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4346 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4347 CallRuntime(instr->function(), instr->arity(), instr);
4351 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4352 Register function = ToRegister(instr->function());
4353 Register code_object = ToRegister(instr->code_object());
4354 __ addi(code_object, code_object,
4355 Operand(Code::kHeaderSize - kHeapObjectTag));
4356 __ StoreP(code_object,
4357 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
4361 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4362 Register result = ToRegister(instr->result());
4363 Register base = ToRegister(instr->base_object());
4364 if (instr->offset()->IsConstantOperand()) {
4365 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4366 __ Add(result, base, ToInteger32(offset), r0);
4368 Register offset = ToRegister(instr->offset());
4369 __ add(result, base, offset);
4374 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4375 HStoreNamedField* hinstr = instr->hydrogen();
4376 Representation representation = instr->representation();
4378 Register object = ToRegister(instr->object());
4379 Register scratch = scratch0();
4380 HObjectAccess access = hinstr->access();
4381 int offset = access.offset();
4383 if (access.IsExternalMemory()) {
4384 Register value = ToRegister(instr->value());
4385 MemOperand operand = MemOperand(object, offset);
4386 __ StoreRepresentation(value, operand, representation, r0);
4390 __ AssertNotSmi(object);
4392 #if V8_TARGET_ARCH_PPC64
4393 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4394 IsInteger32(LConstantOperand::cast(instr->value())));
4396 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4397 IsSmi(LConstantOperand::cast(instr->value())));
4399 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
4400 DCHECK(access.IsInobject());
4401 DCHECK(!hinstr->has_transition());
4402 DCHECK(!hinstr->NeedsWriteBarrier());
4403 DoubleRegister value = ToDoubleRegister(instr->value());
4404 __ stfd(value, FieldMemOperand(object, offset));
4408 if (hinstr->has_transition()) {
4409 Handle<Map> transition = hinstr->transition_map();
4410 AddDeprecationDependency(transition);
4411 __ mov(scratch, Operand(transition));
4412 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
4413 if (hinstr->NeedsWriteBarrierForMap()) {
4414 Register temp = ToRegister(instr->temp());
4415 // Update the write barrier for the map field.
4416 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
4422 Register record_dest = object;
4423 Register record_value = no_reg;
4424 Register record_scratch = scratch;
4425 #if V8_TARGET_ARCH_PPC64
4426 if (FLAG_unbox_double_fields && representation.IsDouble()) {
4427 DCHECK(access.IsInobject());
4428 DoubleRegister value = ToDoubleRegister(instr->value());
4429 __ stfd(value, FieldMemOperand(object, offset));
4430 if (hinstr->NeedsWriteBarrier()) {
4431 record_value = ToRegister(instr->value());
4434 if (representation.IsSmi() &&
4435 hinstr->value()->representation().IsInteger32()) {
4436 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4437 // 64-bit Smi optimization
4438 // Store int value directly to upper half of the smi.
4439 offset = SmiWordOffset(offset);
4440 representation = Representation::Integer32();
4443 if (access.IsInobject()) {
4444 Register value = ToRegister(instr->value());
4445 MemOperand operand = FieldMemOperand(object, offset);
4446 __ StoreRepresentation(value, operand, representation, r0);
4447 record_value = value;
4449 Register value = ToRegister(instr->value());
4450 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4451 MemOperand operand = FieldMemOperand(scratch, offset);
4452 __ StoreRepresentation(value, operand, representation, r0);
4453 record_dest = scratch;
4454 record_value = value;
4455 record_scratch = object;
4457 #if V8_TARGET_ARCH_PPC64
4461 if (hinstr->NeedsWriteBarrier()) {
4462 __ RecordWriteField(record_dest, offset, record_value, record_scratch,
4463 GetLinkRegisterState(), kSaveFPRegs,
4464 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4465 hinstr->PointersToHereCheckForValue());
4470 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4471 DCHECK(ToRegister(instr->context()).is(cp));
4472 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4473 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4475 if (instr->hydrogen()->HasVectorAndSlot()) {
4476 EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
4479 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4480 Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
4481 isolate(), instr->language_mode(),
4482 instr->hydrogen()->initialization_state()).code();
4483 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4487 void LCodeGen::DoStoreGlobalViaContext(LStoreGlobalViaContext* instr) {
4488 DCHECK(ToRegister(instr->context()).is(cp));
4489 DCHECK(ToRegister(instr->value())
4490 .is(StoreGlobalViaContextDescriptor::ValueRegister()));
4492 int const slot = instr->slot_index();
4493 int const depth = instr->depth();
4494 if (depth <= StoreGlobalViaContextStub::kMaximumDepth) {
4495 __ mov(StoreGlobalViaContextDescriptor::SlotRegister(), Operand(slot));
4496 __ mov(StoreGlobalViaContextDescriptor::NameRegister(),
4497 Operand(instr->name()));
4498 Handle<Code> stub = CodeFactory::StoreGlobalViaContext(
4499 isolate(), depth, instr->language_mode()).code();
4500 CallCode(stub, RelocInfo::CODE_TARGET, instr);
4502 __ Push(Smi::FromInt(slot));
4503 __ Push(instr->name());
4504 __ push(StoreGlobalViaContextDescriptor::ValueRegister());
4505 __ CallRuntime(is_strict(instr->language_mode())
4506 ? Runtime::kStoreGlobalViaContext_Strict
4507 : Runtime::kStoreGlobalViaContext_Sloppy,
4513 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4514 Representation representation = instr->hydrogen()->length()->representation();
4515 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4516 DCHECK(representation.IsSmiOrInteger32());
4518 Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
4519 if (instr->length()->IsConstantOperand()) {
4520 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4521 Register index = ToRegister(instr->index());
4522 if (representation.IsSmi()) {
4523 __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
4525 __ Cmplwi(index, Operand(length), r0);
4527 cc = CommuteCondition(cc);
4528 } else if (instr->index()->IsConstantOperand()) {
4529 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4530 Register length = ToRegister(instr->length());
4531 if (representation.IsSmi()) {
4532 __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
4534 __ Cmplwi(length, Operand(index), r0);
4537 Register index = ToRegister(instr->index());
4538 Register length = ToRegister(instr->length());
4539 if (representation.IsSmi()) {
4540 __ cmpl(length, index);
4542 __ cmplw(length, index);
4545 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4547 __ b(NegateCondition(cc), &done);
4548 __ stop("eliminated bounds check failed");
4551 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4556 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4557 Register external_pointer = ToRegister(instr->elements());
4558 Register key = no_reg;
4559 ElementsKind elements_kind = instr->elements_kind();
4560 bool key_is_constant = instr->key()->IsConstantOperand();
4561 int constant_key = 0;
4562 if (key_is_constant) {
4563 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4564 if (constant_key & 0xF0000000) {
4565 Abort(kArrayIndexConstantValueTooBig);
4568 key = ToRegister(instr->key());
4570 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4571 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4572 int base_offset = instr->base_offset();
4574 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4575 elements_kind == FLOAT32_ELEMENTS ||
4576 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4577 elements_kind == FLOAT64_ELEMENTS) {
4578 Register address = scratch0();
4579 DoubleRegister value(ToDoubleRegister(instr->value()));
4580 if (key_is_constant) {
4581 if (constant_key != 0) {
4582 __ Add(address, external_pointer, constant_key << element_size_shift,
4585 address = external_pointer;
4588 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
4589 __ add(address, external_pointer, r0);
4591 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4592 elements_kind == FLOAT32_ELEMENTS) {
4593 __ frsp(double_scratch0(), value);
4594 __ stfs(double_scratch0(), MemOperand(address, base_offset));
4595 } else { // Storing doubles, not floats.
4596 __ stfd(value, MemOperand(address, base_offset));
4599 Register value(ToRegister(instr->value()));
4600 MemOperand mem_operand =
4601 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
4602 constant_key, element_size_shift, base_offset);
4603 switch (elements_kind) {
4604 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4605 case EXTERNAL_INT8_ELEMENTS:
4606 case EXTERNAL_UINT8_ELEMENTS:
4607 case UINT8_ELEMENTS:
4608 case UINT8_CLAMPED_ELEMENTS:
4610 if (key_is_constant) {
4611 __ StoreByte(value, mem_operand, r0);
4613 __ stbx(value, mem_operand);
4616 case EXTERNAL_INT16_ELEMENTS:
4617 case EXTERNAL_UINT16_ELEMENTS:
4618 case INT16_ELEMENTS:
4619 case UINT16_ELEMENTS:
4620 if (key_is_constant) {
4621 __ StoreHalfWord(value, mem_operand, r0);
4623 __ sthx(value, mem_operand);
4626 case EXTERNAL_INT32_ELEMENTS:
4627 case EXTERNAL_UINT32_ELEMENTS:
4628 case INT32_ELEMENTS:
4629 case UINT32_ELEMENTS:
4630 if (key_is_constant) {
4631 __ StoreWord(value, mem_operand, r0);
4633 __ stwx(value, mem_operand);
4636 case FLOAT32_ELEMENTS:
4637 case FLOAT64_ELEMENTS:
4638 case EXTERNAL_FLOAT32_ELEMENTS:
4639 case EXTERNAL_FLOAT64_ELEMENTS:
4640 case FAST_DOUBLE_ELEMENTS:
4642 case FAST_SMI_ELEMENTS:
4643 case FAST_HOLEY_DOUBLE_ELEMENTS:
4644 case FAST_HOLEY_ELEMENTS:
4645 case FAST_HOLEY_SMI_ELEMENTS:
4646 case DICTIONARY_ELEMENTS:
4647 case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
4648 case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
4656 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4657 DoubleRegister value = ToDoubleRegister(instr->value());
4658 Register elements = ToRegister(instr->elements());
4659 Register key = no_reg;
4660 Register scratch = scratch0();
4661 DoubleRegister double_scratch = double_scratch0();
4662 bool key_is_constant = instr->key()->IsConstantOperand();
4663 int constant_key = 0;
4665 // Calculate the effective address of the slot in the array to store the
4667 if (key_is_constant) {
4668 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4669 if (constant_key & 0xF0000000) {
4670 Abort(kArrayIndexConstantValueTooBig);
4673 key = ToRegister(instr->key());
4675 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4676 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4677 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
4678 if (!key_is_constant) {
4679 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
4680 __ add(scratch, elements, scratch);
4683 if (!is_int16(base_offset)) {
4684 __ Add(scratch, elements, base_offset, r0);
4689 if (instr->NeedsCanonicalization()) {
4690 // Turn potential sNaN value into qNaN.
4691 __ CanonicalizeNaN(double_scratch, value);
4692 __ stfd(double_scratch, MemOperand(elements, base_offset));
4694 __ stfd(value, MemOperand(elements, base_offset));
4699 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4700 HStoreKeyed* hinstr = instr->hydrogen();
4701 Register value = ToRegister(instr->value());
4702 Register elements = ToRegister(instr->elements());
4703 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4704 Register scratch = scratch0();
4705 Register store_base = scratch;
4706 int offset = instr->base_offset();
4709 if (instr->key()->IsConstantOperand()) {
4710 DCHECK(!hinstr->NeedsWriteBarrier());
4711 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4712 offset += ToInteger32(const_operand) * kPointerSize;
4713 store_base = elements;
4715 // Even though the HLoadKeyed instruction forces the input
4716 // representation for the key to be an integer, the input gets replaced
4717 // during bound check elimination with the index argument to the bounds
4718 // check, which can be tagged, so that case must be handled here, too.
4719 if (hinstr->key()->representation().IsSmi()) {
4720 __ SmiToPtrArrayOffset(scratch, key);
4722 __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
4724 __ add(scratch, elements, scratch);
4727 Representation representation = hinstr->value()->representation();
4729 #if V8_TARGET_ARCH_PPC64
4730 // 64-bit Smi optimization
4731 if (representation.IsInteger32()) {
4732 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4733 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4734 // Store int value directly to upper half of the smi.
4735 offset = SmiWordOffset(offset);
4739 __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
4742 if (hinstr->NeedsWriteBarrier()) {
4743 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4746 // Compute address of modified element and store it into key register.
4747 __ Add(key, store_base, offset, r0);
4748 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4749 EMIT_REMEMBERED_SET, check_needed,
4750 hinstr->PointersToHereCheckForValue());
4755 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4756 // By cases: external, fast double
4757 if (instr->is_typed_elements()) {
4758 DoStoreKeyedExternalArray(instr);
4759 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4760 DoStoreKeyedFixedDoubleArray(instr);
4762 DoStoreKeyedFixedArray(instr);
4767 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4768 DCHECK(ToRegister(instr->context()).is(cp));
4769 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4770 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4771 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4773 if (instr->hydrogen()->HasVectorAndSlot()) {
4774 EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
4777 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4778 isolate(), instr->language_mode(),
4779 instr->hydrogen()->initialization_state()).code();
4780 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4784 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
4785 class DeferredMaybeGrowElements final : public LDeferredCode {
4787 DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
4788 : LDeferredCode(codegen), instr_(instr) {}
4789 void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
4790 LInstruction* instr() override { return instr_; }
4793 LMaybeGrowElements* instr_;
4796 Register result = r3;
4797 DeferredMaybeGrowElements* deferred =
4798 new (zone()) DeferredMaybeGrowElements(this, instr);
4799 LOperand* key = instr->key();
4800 LOperand* current_capacity = instr->current_capacity();
4802 DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
4803 DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
4804 DCHECK(key->IsConstantOperand() || key->IsRegister());
4805 DCHECK(current_capacity->IsConstantOperand() ||
4806 current_capacity->IsRegister());
4808 if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
4809 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4810 int32_t constant_capacity =
4811 ToInteger32(LConstantOperand::cast(current_capacity));
4812 if (constant_key >= constant_capacity) {
4814 __ b(deferred->entry());
4816 } else if (key->IsConstantOperand()) {
4817 int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
4818 __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0);
4819 __ ble(deferred->entry());
4820 } else if (current_capacity->IsConstantOperand()) {
4821 int32_t constant_capacity =
4822 ToInteger32(LConstantOperand::cast(current_capacity));
4823 __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0);
4824 __ bge(deferred->entry());
4826 __ cmpw(ToRegister(key), ToRegister(current_capacity));
4827 __ bge(deferred->entry());
4830 if (instr->elements()->IsRegister()) {
4831 __ Move(result, ToRegister(instr->elements()));
4833 __ LoadP(result, ToMemOperand(instr->elements()));
4836 __ bind(deferred->exit());
4840 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
4841 // TODO(3095996): Get rid of this. For now, we need to make the
4842 // result register contain a valid pointer because it is already
4843 // contained in the register pointer map.
4844 Register result = r3;
4845 __ li(result, Operand::Zero());
4847 // We have to call a stub.
4849 PushSafepointRegistersScope scope(this);
4850 if (instr->object()->IsRegister()) {
4851 __ Move(result, ToRegister(instr->object()));
4853 __ LoadP(result, ToMemOperand(instr->object()));
4856 LOperand* key = instr->key();
4857 if (key->IsConstantOperand()) {
4858 __ LoadSmiLiteral(r6, ToSmi(LConstantOperand::cast(key)));
4860 __ SmiTag(r6, ToRegister(key));
4863 GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
4864 instr->hydrogen()->kind());
4866 RecordSafepointWithLazyDeopt(
4867 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
4868 __ StoreToSafepointRegisterSlot(result, result);
4871 // Deopt on smi, which means the elements array changed to dictionary mode.
4872 __ TestIfSmi(result, r0);
4873 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
4877 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4878 Register object_reg = ToRegister(instr->object());
4879 Register scratch = scratch0();
4881 Handle<Map> from_map = instr->original_map();
4882 Handle<Map> to_map = instr->transitioned_map();
4883 ElementsKind from_kind = instr->from_kind();
4884 ElementsKind to_kind = instr->to_kind();
4886 Label not_applicable;
4887 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4888 __ Cmpi(scratch, Operand(from_map), r0);
4889 __ bne(¬_applicable);
4891 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4892 Register new_map_reg = ToRegister(instr->new_map_temp());
4893 __ mov(new_map_reg, Operand(to_map));
4894 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
4897 __ RecordWriteForMap(object_reg, new_map_reg, scratch,
4898 GetLinkRegisterState(), kDontSaveFPRegs);
4900 DCHECK(ToRegister(instr->context()).is(cp));
4901 DCHECK(object_reg.is(r3));
4902 PushSafepointRegistersScope scope(this);
4903 __ Move(r4, to_map);
4904 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4905 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4907 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4908 Safepoint::kLazyDeopt);
4910 __ bind(¬_applicable);
4914 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4915 Register object = ToRegister(instr->object());
4916 Register temp = ToRegister(instr->temp());
4917 Label no_memento_found;
4918 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4919 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
4920 __ bind(&no_memento_found);
4924 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4925 DCHECK(ToRegister(instr->context()).is(cp));
4926 DCHECK(ToRegister(instr->left()).is(r4));
4927 DCHECK(ToRegister(instr->right()).is(r3));
4928 StringAddStub stub(isolate(), instr->hydrogen()->flags(),
4929 instr->hydrogen()->pretenure_flag());
4930 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4934 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4935 class DeferredStringCharCodeAt final : public LDeferredCode {
4937 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4938 : LDeferredCode(codegen), instr_(instr) {}
4939 void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
4940 LInstruction* instr() override { return instr_; }
4943 LStringCharCodeAt* instr_;
4946 DeferredStringCharCodeAt* deferred =
4947 new (zone()) DeferredStringCharCodeAt(this, instr);
4949 StringCharLoadGenerator::Generate(
4950 masm(), ToRegister(instr->string()), ToRegister(instr->index()),
4951 ToRegister(instr->result()), deferred->entry());
4952 __ bind(deferred->exit());
4956 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4957 Register string = ToRegister(instr->string());
4958 Register result = ToRegister(instr->result());
4959 Register scratch = scratch0();
4961 // TODO(3095996): Get rid of this. For now, we need to make the
4962 // result register contain a valid pointer because it is already
4963 // contained in the register pointer map.
4964 __ li(result, Operand::Zero());
4966 PushSafepointRegistersScope scope(this);
4968 // Push the index as a smi. This is safe because of the checks in
4969 // DoStringCharCodeAt above.
4970 if (instr->index()->IsConstantOperand()) {
4971 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4972 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
4975 Register index = ToRegister(instr->index());
4979 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4983 __ StoreToSafepointRegisterSlot(r3, result);
4987 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4988 class DeferredStringCharFromCode final : public LDeferredCode {
4990 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4991 : LDeferredCode(codegen), instr_(instr) {}
4992 void Generate() override {
4993 codegen()->DoDeferredStringCharFromCode(instr_);
4995 LInstruction* instr() override { return instr_; }
4998 LStringCharFromCode* instr_;
5001 DeferredStringCharFromCode* deferred =
5002 new (zone()) DeferredStringCharFromCode(this, instr);
5004 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
5005 Register char_code = ToRegister(instr->char_code());
5006 Register result = ToRegister(instr->result());
5007 DCHECK(!char_code.is(result));
5009 __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
5010 __ bgt(deferred->entry());
5011 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
5012 __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
5013 __ add(result, result, r0);
5014 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
5015 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5017 __ beq(deferred->entry());
5018 __ bind(deferred->exit());
5022 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
5023 Register char_code = ToRegister(instr->char_code());
5024 Register result = ToRegister(instr->result());
5026 // TODO(3095996): Get rid of this. For now, we need to make the
5027 // result register contain a valid pointer because it is already
5028 // contained in the register pointer map.
5029 __ li(result, Operand::Zero());
5031 PushSafepointRegistersScope scope(this);
5032 __ SmiTag(char_code);
5034 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
5035 __ StoreToSafepointRegisterSlot(r3, result);
5039 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
5040 LOperand* input = instr->value();
5041 DCHECK(input->IsRegister() || input->IsStackSlot());
5042 LOperand* output = instr->result();
5043 DCHECK(output->IsDoubleRegister());
5044 if (input->IsStackSlot()) {
5045 Register scratch = scratch0();
5046 __ LoadP(scratch, ToMemOperand(input));
5047 __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
5049 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
5054 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5055 LOperand* input = instr->value();
5056 LOperand* output = instr->result();
5057 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
5061 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
5062 class DeferredNumberTagI final : public LDeferredCode {
5064 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
5065 : LDeferredCode(codegen), instr_(instr) {}
5066 void Generate() override {
5067 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
5068 instr_->temp2(), SIGNED_INT32);
5070 LInstruction* instr() override { return instr_; }
5073 LNumberTagI* instr_;
5076 Register src = ToRegister(instr->value());
5077 Register dst = ToRegister(instr->result());
5079 DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
5080 #if V8_TARGET_ARCH_PPC64
5081 __ SmiTag(dst, src);
5083 __ SmiTagCheckOverflow(dst, src, r0);
5084 __ BranchOnOverflow(deferred->entry());
5086 __ bind(deferred->exit());
5090 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
5091 class DeferredNumberTagU final : public LDeferredCode {
5093 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
5094 : LDeferredCode(codegen), instr_(instr) {}
5095 void Generate() override {
5096 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
5097 instr_->temp2(), UNSIGNED_INT32);
5099 LInstruction* instr() override { return instr_; }
5102 LNumberTagU* instr_;
5105 Register input = ToRegister(instr->value());
5106 Register result = ToRegister(instr->result());
5108 DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
5109 __ Cmpli(input, Operand(Smi::kMaxValue), r0);
5110 __ bgt(deferred->entry());
5111 __ SmiTag(result, input);
5112 __ bind(deferred->exit());
5116 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
5117 LOperand* temp1, LOperand* temp2,
5118 IntegerSignedness signedness) {
5120 Register src = ToRegister(value);
5121 Register dst = ToRegister(instr->result());
5122 Register tmp1 = scratch0();
5123 Register tmp2 = ToRegister(temp1);
5124 Register tmp3 = ToRegister(temp2);
5125 DoubleRegister dbl_scratch = double_scratch0();
5127 if (signedness == SIGNED_INT32) {
5128 // There was overflow, so bits 30 and 31 of the original integer
5129 // disagree. Try to allocate a heap number in new space and store
5130 // the value in there. If that fails, call the runtime system.
5132 __ SmiUntag(src, dst);
5133 __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
5135 __ ConvertIntToDouble(src, dbl_scratch);
5137 __ ConvertUnsignedIntToDouble(src, dbl_scratch);
5140 if (FLAG_inline_new) {
5141 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
5142 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
5146 // Slow case: Call the runtime system to do the number allocation.
5149 // TODO(3095996): Put a valid pointer value in the stack slot where the
5150 // result register is stored, as this register is in the pointer map, but
5151 // contains an integer value.
5152 __ li(dst, Operand::Zero());
5154 // Preserve the value of all registers.
5155 PushSafepointRegistersScope scope(this);
5157 // NumberTagI and NumberTagD use the context from the frame, rather than
5158 // the environment's HContext or HInlinedContext value.
5159 // They only call Runtime::kAllocateHeapNumber.
5160 // The corresponding HChange instructions are added in a phase that does
5161 // not have easy access to the local context.
5162 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
5163 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5164 RecordSafepointWithRegisters(instr->pointer_map(), 0,
5165 Safepoint::kNoLazyDeopt);
5166 __ StoreToSafepointRegisterSlot(r3, dst);
5169 // Done. Put the value in dbl_scratch into the value of the allocated heap
5172 __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
5176 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5177 class DeferredNumberTagD final : public LDeferredCode {
5179 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
5180 : LDeferredCode(codegen), instr_(instr) {}
5181 void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
5182 LInstruction* instr() override { return instr_; }
5185 LNumberTagD* instr_;
5188 DoubleRegister input_reg = ToDoubleRegister(instr->value());
5189 Register scratch = scratch0();
5190 Register reg = ToRegister(instr->result());
5191 Register temp1 = ToRegister(instr->temp());
5192 Register temp2 = ToRegister(instr->temp2());
5194 DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
5195 if (FLAG_inline_new) {
5196 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
5197 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
5199 __ b(deferred->entry());
5201 __ bind(deferred->exit());
5202 __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
5206 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5207 // TODO(3095996): Get rid of this. For now, we need to make the
5208 // result register contain a valid pointer because it is already
5209 // contained in the register pointer map.
5210 Register reg = ToRegister(instr->result());
5211 __ li(reg, Operand::Zero());
5213 PushSafepointRegistersScope scope(this);
5214 // NumberTagI and NumberTagD use the context from the frame, rather than
5215 // the environment's HContext or HInlinedContext value.
5216 // They only call Runtime::kAllocateHeapNumber.
5217 // The corresponding HChange instructions are added in a phase that does
5218 // not have easy access to the local context.
5219 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
5220 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5221 RecordSafepointWithRegisters(instr->pointer_map(), 0,
5222 Safepoint::kNoLazyDeopt);
5223 __ StoreToSafepointRegisterSlot(r3, reg);
5227 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5228 HChange* hchange = instr->hydrogen();
5229 Register input = ToRegister(instr->value());
5230 Register output = ToRegister(instr->result());
5231 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5232 hchange->value()->CheckFlag(HValue::kUint32)) {
5233 __ TestUnsignedSmiCandidate(input, r0);
5234 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
5236 #if !V8_TARGET_ARCH_PPC64
5237 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5238 !hchange->value()->CheckFlag(HValue::kUint32)) {
5239 __ SmiTagCheckOverflow(output, input, r0);
5240 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
5243 __ SmiTag(output, input);
5244 #if !V8_TARGET_ARCH_PPC64
5250 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5251 Register scratch = scratch0();
5252 Register input = ToRegister(instr->value());
5253 Register result = ToRegister(instr->result());
5254 if (instr->needs_check()) {
5255 // If the input is a HeapObject, value of scratch won't be zero.
5256 __ andi(scratch, input, Operand(kHeapObjectTag));
5257 __ SmiUntag(result, input);
5258 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
5260 __ SmiUntag(result, input);
5265 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
5266 DoubleRegister result_reg,
5267 NumberUntagDMode mode) {
5268 bool can_convert_undefined_to_nan =
5269 instr->hydrogen()->can_convert_undefined_to_nan();
5270 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
5272 Register scratch = scratch0();
5273 DCHECK(!result_reg.is(double_scratch0()));
5275 Label convert, load_smi, done;
5277 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5279 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
5281 // Heap number map check.
5282 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5283 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5284 __ cmp(scratch, ip);
5285 if (can_convert_undefined_to_nan) {
5288 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
5291 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5292 if (deoptimize_on_minus_zero) {
5293 #if V8_TARGET_ARCH_PPC64
5294 __ MovDoubleToInt64(scratch, result_reg);
5295 // rotate left by one for simple compare.
5296 __ rldicl(scratch, scratch, 1, 0);
5297 __ cmpi(scratch, Operand(1));
5299 __ MovDoubleToInt64(scratch, ip, result_reg);
5300 __ cmpi(ip, Operand::Zero());
5302 __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
5304 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
5307 if (can_convert_undefined_to_nan) {
5309 // Convert undefined (and hole) to NaN.
5310 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5311 __ cmp(input_reg, ip);
5312 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5313 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
5314 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
5318 __ SmiUntag(scratch, input_reg);
5319 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
5321 // Smi to double register conversion
5323 // scratch: untagged value of input_reg
5324 __ ConvertIntToDouble(scratch, result_reg);
5329 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
5330 Register input_reg = ToRegister(instr->value());
5331 Register scratch1 = scratch0();
5332 Register scratch2 = ToRegister(instr->temp());
5333 DoubleRegister double_scratch = double_scratch0();
5334 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
5336 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
5337 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
5341 // Heap number map check.
5342 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5343 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5344 __ cmp(scratch1, ip);
5346 if (instr->truncating()) {
5347 // Performs a truncating conversion of a floating point number as used by
5348 // the JS bitwise operations.
5349 Label no_heap_number, check_bools, check_false;
5350 __ bne(&no_heap_number);
5351 __ mr(scratch2, input_reg);
5352 __ TruncateHeapNumberToI(input_reg, scratch2);
5355 // Check for Oddballs. Undefined/False is converted to zero and True to one
5356 // for truncating conversions.
5357 __ bind(&no_heap_number);
5358 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5359 __ cmp(input_reg, ip);
5360 __ bne(&check_bools);
5361 __ li(input_reg, Operand::Zero());
5364 __ bind(&check_bools);
5365 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5366 __ cmp(input_reg, ip);
5367 __ bne(&check_false);
5368 __ li(input_reg, Operand(1));
5371 __ bind(&check_false);
5372 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5373 __ cmp(input_reg, ip);
5374 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5375 __ li(input_reg, Operand::Zero());
5377 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
5379 __ lfd(double_scratch2,
5380 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5381 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5382 // preserve heap number pointer in scratch2 for minus zero check below
5383 __ mr(scratch2, input_reg);
5385 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
5387 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5389 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5390 __ cmpi(input_reg, Operand::Zero());
5393 FieldMemOperand(scratch2, HeapNumber::kValueOffset +
5394 Register::kExponentOffset));
5395 __ cmpwi(scratch1, Operand::Zero());
5396 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5403 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5404 class DeferredTaggedToI final : public LDeferredCode {
5406 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5407 : LDeferredCode(codegen), instr_(instr) {}
5408 void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
5409 LInstruction* instr() override { return instr_; }
5415 LOperand* input = instr->value();
5416 DCHECK(input->IsRegister());
5417 DCHECK(input->Equals(instr->result()));
5419 Register input_reg = ToRegister(input);
5421 if (instr->hydrogen()->value()->representation().IsSmi()) {
5422 __ SmiUntag(input_reg);
5424 DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
5426 // Branch to deferred code if the input is a HeapObject.
5427 __ JumpIfNotSmi(input_reg, deferred->entry());
5429 __ SmiUntag(input_reg);
5430 __ bind(deferred->exit());
5435 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5436 LOperand* input = instr->value();
5437 DCHECK(input->IsRegister());
5438 LOperand* result = instr->result();
5439 DCHECK(result->IsDoubleRegister());
5441 Register input_reg = ToRegister(input);
5442 DoubleRegister result_reg = ToDoubleRegister(result);
5444 HValue* value = instr->hydrogen()->value();
5445 NumberUntagDMode mode = value->representation().IsSmi()
5446 ? NUMBER_CANDIDATE_IS_SMI
5447 : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5449 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5453 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5454 Register result_reg = ToRegister(instr->result());
5455 Register scratch1 = scratch0();
5456 DoubleRegister double_input = ToDoubleRegister(instr->value());
5457 DoubleRegister double_scratch = double_scratch0();
5459 if (instr->truncating()) {
5460 __ TruncateDoubleToI(result_reg, double_input);
5462 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5464 // Deoptimize if the input wasn't a int32 (inside a double).
5465 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5466 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5468 __ cmpi(result_reg, Operand::Zero());
5470 #if V8_TARGET_ARCH_PPC64
5471 __ MovDoubleToInt64(scratch1, double_input);
5473 __ MovDoubleHighToInt(scratch1, double_input);
5475 __ cmpi(scratch1, Operand::Zero());
5476 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5483 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5484 Register result_reg = ToRegister(instr->result());
5485 Register scratch1 = scratch0();
5486 DoubleRegister double_input = ToDoubleRegister(instr->value());
5487 DoubleRegister double_scratch = double_scratch0();
5489 if (instr->truncating()) {
5490 __ TruncateDoubleToI(result_reg, double_input);
5492 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5494 // Deoptimize if the input wasn't a int32 (inside a double).
5495 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5496 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5498 __ cmpi(result_reg, Operand::Zero());
5500 #if V8_TARGET_ARCH_PPC64
5501 __ MovDoubleToInt64(scratch1, double_input);
5503 __ MovDoubleHighToInt(scratch1, double_input);
5505 __ cmpi(scratch1, Operand::Zero());
5506 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5510 #if V8_TARGET_ARCH_PPC64
5511 __ SmiTag(result_reg);
5513 __ SmiTagCheckOverflow(result_reg, r0);
5514 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
5519 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5520 LOperand* input = instr->value();
5521 __ TestIfSmi(ToRegister(input), r0);
5522 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
5526 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5527 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5528 LOperand* input = instr->value();
5529 __ TestIfSmi(ToRegister(input), r0);
5530 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
5535 void LCodeGen::DoCheckArrayBufferNotNeutered(
5536 LCheckArrayBufferNotNeutered* instr) {
5537 Register view = ToRegister(instr->view());
5538 Register scratch = scratch0();
5540 __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
5541 __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
5542 __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
5543 DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
5547 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5548 Register input = ToRegister(instr->value());
5549 Register scratch = scratch0();
5551 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5552 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5554 if (instr->hydrogen()->is_interval_check()) {
5557 instr->hydrogen()->GetCheckInterval(&first, &last);
5559 __ cmpli(scratch, Operand(first));
5561 // If there is only one type in the interval check for equality.
5562 if (first == last) {
5563 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5565 DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
5566 // Omit check for the last type.
5567 if (last != LAST_TYPE) {
5568 __ cmpli(scratch, Operand(last));
5569 DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
5575 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5577 if (base::bits::IsPowerOfTwo32(mask)) {
5578 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5579 __ andi(r0, scratch, Operand(mask));
5580 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
5583 __ andi(scratch, scratch, Operand(mask));
5584 __ cmpi(scratch, Operand(tag));
5585 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5591 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5592 Register reg = ToRegister(instr->value());
5593 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5594 AllowDeferredHandleDereference smi_check;
5595 if (isolate()->heap()->InNewSpace(*object)) {
5596 Register reg = ToRegister(instr->value());
5597 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5598 __ mov(ip, Operand(cell));
5599 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
5602 __ Cmpi(reg, Operand(object), r0);
5604 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
5608 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5609 Register temp = ToRegister(instr->temp());
5611 PushSafepointRegistersScope scope(this);
5613 __ li(cp, Operand::Zero());
5614 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5615 RecordSafepointWithRegisters(instr->pointer_map(), 1,
5616 Safepoint::kNoLazyDeopt);
5617 __ StoreToSafepointRegisterSlot(r3, temp);
5619 __ TestIfSmi(temp, r0);
5620 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
5624 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5625 class DeferredCheckMaps final : public LDeferredCode {
5627 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5628 : LDeferredCode(codegen), instr_(instr), object_(object) {
5629 SetExit(check_maps());
5631 void Generate() override {
5632 codegen()->DoDeferredInstanceMigration(instr_, object_);
5634 Label* check_maps() { return &check_maps_; }
5635 LInstruction* instr() override { return instr_; }
5643 if (instr->hydrogen()->IsStabilityCheck()) {
5644 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5645 for (int i = 0; i < maps->size(); ++i) {
5646 AddStabilityDependency(maps->at(i).handle());
5651 Register object = ToRegister(instr->value());
5652 Register map_reg = ToRegister(instr->temp());
5654 __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
5656 DeferredCheckMaps* deferred = NULL;
5657 if (instr->hydrogen()->HasMigrationTarget()) {
5658 deferred = new (zone()) DeferredCheckMaps(this, instr, object);
5659 __ bind(deferred->check_maps());
5662 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5664 for (int i = 0; i < maps->size() - 1; i++) {
5665 Handle<Map> map = maps->at(i).handle();
5666 __ CompareMap(map_reg, map, &success);
5670 Handle<Map> map = maps->at(maps->size() - 1).handle();
5671 __ CompareMap(map_reg, map, &success);
5672 if (instr->hydrogen()->HasMigrationTarget()) {
5673 __ bne(deferred->entry());
5675 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5682 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5683 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5684 Register result_reg = ToRegister(instr->result());
5685 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5689 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5690 Register unclamped_reg = ToRegister(instr->unclamped());
5691 Register result_reg = ToRegister(instr->result());
5692 __ ClampUint8(result_reg, unclamped_reg);
5696 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5697 Register scratch = scratch0();
5698 Register input_reg = ToRegister(instr->unclamped());
5699 Register result_reg = ToRegister(instr->result());
5700 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5701 Label is_smi, done, heap_number;
5703 // Both smi and heap number cases are handled.
5704 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5706 // Check for heap number
5707 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5708 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
5709 __ beq(&heap_number);
5711 // Check for undefined. Undefined is converted to zero for clamping
5713 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
5714 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5715 __ li(result_reg, Operand::Zero());
5719 __ bind(&heap_number);
5720 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5721 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5726 __ ClampUint8(result_reg, result_reg);
5732 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5733 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5734 Register result_reg = ToRegister(instr->result());
5736 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5737 __ MovDoubleHighToInt(result_reg, value_reg);
5739 __ MovDoubleLowToInt(result_reg, value_reg);
5744 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5745 Register hi_reg = ToRegister(instr->hi());
5746 Register lo_reg = ToRegister(instr->lo());
5747 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5748 #if V8_TARGET_ARCH_PPC64
5749 __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
5751 __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
5756 void LCodeGen::DoAllocate(LAllocate* instr) {
5757 class DeferredAllocate final : public LDeferredCode {
5759 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5760 : LDeferredCode(codegen), instr_(instr) {}
5761 void Generate() override { codegen()->DoDeferredAllocate(instr_); }
5762 LInstruction* instr() override { return instr_; }
5768 DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
5770 Register result = ToRegister(instr->result());
5771 Register scratch = ToRegister(instr->temp1());
5772 Register scratch2 = ToRegister(instr->temp2());
5774 // Allocate memory for the object.
5775 AllocationFlags flags = TAG_OBJECT;
5776 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5777 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5779 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5780 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5781 flags = static_cast<AllocationFlags>(flags | PRETENURE);
5784 if (instr->size()->IsConstantOperand()) {
5785 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5786 if (size <= Page::kMaxRegularHeapObjectSize) {
5787 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5789 __ b(deferred->entry());
5792 Register size = ToRegister(instr->size());
5793 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5796 __ bind(deferred->exit());
5798 if (instr->hydrogen()->MustPrefillWithFiller()) {
5799 if (instr->size()->IsConstantOperand()) {
5800 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5801 __ LoadIntLiteral(scratch, size - kHeapObjectTag);
5803 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5805 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5808 __ subi(scratch, scratch, Operand(kPointerSize));
5809 __ StorePX(scratch2, MemOperand(result, scratch));
5810 __ cmpi(scratch, Operand::Zero());
5816 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5817 Register result = ToRegister(instr->result());
5819 // TODO(3095996): Get rid of this. For now, we need to make the
5820 // result register contain a valid pointer because it is already
5821 // contained in the register pointer map.
5822 __ LoadSmiLiteral(result, Smi::FromInt(0));
5824 PushSafepointRegistersScope scope(this);
5825 if (instr->size()->IsRegister()) {
5826 Register size = ToRegister(instr->size());
5827 DCHECK(!size.is(result));
5831 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5832 #if !V8_TARGET_ARCH_PPC64
5833 if (size >= 0 && size <= Smi::kMaxValue) {
5835 __ Push(Smi::FromInt(size));
5836 #if !V8_TARGET_ARCH_PPC64
5838 // We should never get here at runtime => abort
5839 __ stop("invalid allocation size");
5845 int flags = AllocateDoubleAlignFlag::encode(
5846 instr->hydrogen()->MustAllocateDoubleAligned());
5847 if (instr->hydrogen()->IsOldSpaceAllocation()) {
5848 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5849 flags = AllocateTargetSpace::update(flags, OLD_SPACE);
5851 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5853 __ Push(Smi::FromInt(flags));
5855 CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
5857 __ StoreToSafepointRegisterSlot(r3, result);
5861 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5862 DCHECK(ToRegister(instr->value()).is(r3));
5864 CallRuntime(Runtime::kToFastProperties, 1, instr);
5868 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5869 DCHECK(ToRegister(instr->context()).is(cp));
5871 // Registers will be used as follows:
5872 // r10 = literals array.
5873 // r4 = regexp literal.
5874 // r3 = regexp literal clone.
5875 // r5 and r7-r9 are used as temporaries.
5876 int literal_offset =
5877 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5878 __ Move(r10, instr->hydrogen()->literals());
5879 __ LoadP(r4, FieldMemOperand(r10, literal_offset));
5880 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5882 __ bne(&materialized);
5884 // Create regexp literal using runtime function
5885 // Result will be in r3.
5886 __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index()));
5887 __ mov(r8, Operand(instr->hydrogen()->pattern()));
5888 __ mov(r7, Operand(instr->hydrogen()->flags()));
5889 __ Push(r10, r9, r8, r7);
5890 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5893 __ bind(&materialized);
5894 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5895 Label allocated, runtime_allocate;
5897 __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
5900 __ bind(&runtime_allocate);
5901 __ LoadSmiLiteral(r3, Smi::FromInt(size));
5903 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5906 __ bind(&allocated);
5907 // Copy the content into the newly allocated memory.
5908 __ CopyFields(r3, r4, r5.bit(), size / kPointerSize);
5912 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5913 DCHECK(ToRegister(instr->context()).is(cp));
5914 // Use the fast case closure allocation code that allocates in new
5915 // space for nested functions that don't need literals cloning.
5916 bool pretenure = instr->hydrogen()->pretenure();
5917 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5918 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
5919 instr->hydrogen()->kind());
5920 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5921 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5923 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5924 __ mov(r4, Operand(pretenure ? factory()->true_value()
5925 : factory()->false_value()));
5926 __ Push(cp, r5, r4);
5927 CallRuntime(Runtime::kNewClosure, 3, instr);
5932 void LCodeGen::DoTypeof(LTypeof* instr) {
5933 DCHECK(ToRegister(instr->value()).is(r6));
5934 DCHECK(ToRegister(instr->result()).is(r3));
5936 Register value_register = ToRegister(instr->value());
5937 __ JumpIfNotSmi(value_register, &do_call);
5938 __ mov(r3, Operand(isolate()->factory()->number_string()));
5941 TypeofStub stub(isolate());
5942 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5947 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5948 Register input = ToRegister(instr->value());
5950 Condition final_branch_condition =
5951 EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
5952 instr->type_literal());
5953 if (final_branch_condition != kNoCondition) {
5954 EmitBranch(instr, final_branch_condition);
5959 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
5960 Register input, Handle<String> type_name) {
5961 Condition final_branch_condition = kNoCondition;
5962 Register scratch = scratch0();
5963 Factory* factory = isolate()->factory();
5964 if (String::Equals(type_name, factory->number_string())) {
5965 __ JumpIfSmi(input, true_label);
5966 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5967 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5968 final_branch_condition = eq;
5970 } else if (String::Equals(type_name, factory->string_string())) {
5971 __ JumpIfSmi(input, false_label);
5972 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5973 __ bge(false_label);
5974 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5975 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5976 __ cmpi(r0, Operand::Zero());
5977 final_branch_condition = eq;
5979 } else if (String::Equals(type_name, factory->symbol_string())) {
5980 __ JumpIfSmi(input, false_label);
5981 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5982 final_branch_condition = eq;
5984 } else if (String::Equals(type_name, factory->boolean_string())) {
5985 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5987 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5988 final_branch_condition = eq;
5990 } else if (String::Equals(type_name, factory->undefined_string())) {
5991 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5993 __ JumpIfSmi(input, false_label);
5994 // Check for undetectable objects => true.
5995 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5996 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5997 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5998 __ cmpi(r0, Operand::Zero());
5999 final_branch_condition = ne;
6001 } else if (String::Equals(type_name, factory->function_string())) {
6002 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
6003 Register type_reg = scratch;
6004 __ JumpIfSmi(input, false_label);
6005 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
6007 __ cmpi(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
6008 final_branch_condition = eq;
6010 } else if (String::Equals(type_name, factory->object_string())) {
6011 Register map = scratch;
6012 __ JumpIfSmi(input, false_label);
6013 __ CompareRoot(input, Heap::kNullValueRootIndex);
6015 __ CheckObjectTypeRange(input, map, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
6016 LAST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label);
6017 // Check for undetectable objects => false.
6018 __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
6019 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
6020 __ cmpi(r0, Operand::Zero());
6021 final_branch_condition = eq;
6023 } else if (String::Equals(type_name, factory->float32x4_string())) {
6024 __ JumpIfSmi(input, false_label);
6025 __ CompareObjectType(input, scratch, no_reg, FLOAT32X4_TYPE);
6026 final_branch_condition = eq;
6032 return final_branch_condition;
6036 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6037 Register temp1 = ToRegister(instr->temp());
6039 EmitIsConstructCall(temp1, scratch0());
6040 EmitBranch(instr, eq);
6044 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
6045 DCHECK(!temp1.is(temp2));
6046 // Get the frame pointer for the calling frame.
6047 __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
6049 // Skip the arguments adaptor frame if it exists.
6050 Label check_frame_marker;
6051 __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
6052 __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
6053 __ bne(&check_frame_marker);
6054 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
6056 // Check the marker in the calling frame.
6057 __ bind(&check_frame_marker);
6058 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
6059 __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0);
6063 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6064 if (!info()->IsStub()) {
6065 // Ensure that we have enough space after the previous lazy-bailout
6066 // instruction for patching the code here.
6067 int current_pc = masm()->pc_offset();
6068 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6069 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6070 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
6071 while (padding_size > 0) {
6073 padding_size -= Assembler::kInstrSize;
6077 last_lazy_deopt_pc_ = masm()->pc_offset();
6081 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6082 last_lazy_deopt_pc_ = masm()->pc_offset();
6083 DCHECK(instr->HasEnvironment());
6084 LEnvironment* env = instr->environment();
6085 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6086 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6090 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6091 Deoptimizer::BailoutType type = instr->hydrogen()->type();
6092 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6093 // needed return address), even though the implementation of LAZY and EAGER is
6094 // now identical. When LAZY is eventually completely folded into EAGER, remove
6095 // the special case below.
6096 if (info()->IsStub() && type == Deoptimizer::EAGER) {
6097 type = Deoptimizer::LAZY;
6100 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
6104 void LCodeGen::DoDummy(LDummy* instr) {
6105 // Nothing to see here, move on!
6109 void LCodeGen::DoDummyUse(LDummyUse* instr) {
6110 // Nothing to see here, move on!
6114 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6115 PushSafepointRegistersScope scope(this);
6116 LoadContextFromDeferred(instr->context());
6117 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
6118 RecordSafepointWithLazyDeopt(
6119 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
6120 DCHECK(instr->HasEnvironment());
6121 LEnvironment* env = instr->environment();
6122 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6126 void LCodeGen::DoStackCheck(LStackCheck* instr) {
6127 class DeferredStackCheck final : public LDeferredCode {
6129 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
6130 : LDeferredCode(codegen), instr_(instr) {}
6131 void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
6132 LInstruction* instr() override { return instr_; }
6135 LStackCheck* instr_;
6138 DCHECK(instr->HasEnvironment());
6139 LEnvironment* env = instr->environment();
6140 // There is no LLazyBailout instruction for stack-checks. We have to
6141 // prepare for lazy deoptimization explicitly here.
6142 if (instr->hydrogen()->is_function_entry()) {
6143 // Perform stack overflow check.
6145 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
6148 DCHECK(instr->context()->IsRegister());
6149 DCHECK(ToRegister(instr->context()).is(cp));
6150 CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
6154 DCHECK(instr->hydrogen()->is_backwards_branch());
6155 // Perform stack overflow check if this goto needs it before jumping.
6156 DeferredStackCheck* deferred_stack_check =
6157 new (zone()) DeferredStackCheck(this, instr);
6158 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
6160 __ blt(deferred_stack_check->entry());
6161 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6162 __ bind(instr->done_label());
6163 deferred_stack_check->SetExit(instr->done_label());
6164 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6165 // Don't record a deoptimization index for the safepoint here.
6166 // This will be done explicitly when emitting call and the safepoint in
6167 // the deferred code.
6172 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6173 // This is a pseudo-instruction that ensures that the environment here is
6174 // properly registered for deoptimization and records the assembler's PC
6176 LEnvironment* environment = instr->environment();
6178 // If the environment were already registered, we would have no way of
6179 // backpatching it with the spill slot operands.
6180 DCHECK(!environment->HasBeenRegistered());
6181 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6183 GenerateOsrPrologue();
6187 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6188 __ TestIfSmi(r3, r0);
6189 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
6191 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6192 __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
6193 DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
6195 Label use_cache, call_runtime;
6196 Register null_value = r8;
6197 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
6198 __ CheckEnumCache(null_value, &call_runtime);
6200 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
6203 // Get the set of properties to enumerate.
6204 __ bind(&call_runtime);
6206 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6208 __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
6209 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
6211 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
6212 __ bind(&use_cache);
6216 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6217 Register map = ToRegister(instr->map());
6218 Register result = ToRegister(instr->result());
6219 Label load_cache, done;
6220 __ EnumLength(result, map);
6221 __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
6222 __ bne(&load_cache);
6223 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
6226 __ bind(&load_cache);
6227 __ LoadInstanceDescriptors(map, result);
6228 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
6229 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
6230 __ cmpi(result, Operand::Zero());
6231 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
6237 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6238 Register object = ToRegister(instr->value());
6239 Register map = ToRegister(instr->map());
6240 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
6241 __ cmp(map, scratch0());
6242 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
6246 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
6247 Register result, Register object,
6249 PushSafepointRegistersScope scope(this);
6250 __ Push(object, index);
6251 __ li(cp, Operand::Zero());
6252 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
6253 RecordSafepointWithRegisters(instr->pointer_map(), 2,
6254 Safepoint::kNoLazyDeopt);
6255 __ StoreToSafepointRegisterSlot(r3, result);
6259 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6260 class DeferredLoadMutableDouble final : public LDeferredCode {
6262 DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
6263 Register result, Register object, Register index)
6264 : LDeferredCode(codegen),
6269 void Generate() override {
6270 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
6272 LInstruction* instr() override { return instr_; }
6275 LLoadFieldByIndex* instr_;
6281 Register object = ToRegister(instr->object());
6282 Register index = ToRegister(instr->index());
6283 Register result = ToRegister(instr->result());
6284 Register scratch = scratch0();
6286 DeferredLoadMutableDouble* deferred;
6287 deferred = new (zone())
6288 DeferredLoadMutableDouble(this, instr, result, object, index);
6290 Label out_of_object, done;
6292 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
6293 __ bne(deferred->entry(), cr0);
6294 __ ShiftRightArithImm(index, index, 1);
6296 __ cmpi(index, Operand::Zero());
6297 __ blt(&out_of_object);
6299 __ SmiToPtrArrayOffset(r0, index);
6300 __ add(scratch, object, r0);
6301 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
6305 __ bind(&out_of_object);
6306 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6307 // Index is equal to negated out of object property index plus 1.
6308 __ SmiToPtrArrayOffset(r0, index);
6309 __ sub(scratch, result, r0);
6311 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
6312 __ bind(deferred->exit());
6317 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6318 Register context = ToRegister(instr->context());
6319 __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6323 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6324 Handle<ScopeInfo> scope_info = instr->scope_info();
6325 __ Push(scope_info);
6326 __ push(ToRegister(instr->function()));
6327 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6328 RecordSafepoint(Safepoint::kNoLazyDeopt);
6333 } // namespace internal