1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/bits.h"
8 #include "src/code-factory.h"
9 #include "src/code-stubs.h"
10 #include "src/cpu-profiler.h"
11 #include "src/hydrogen-osr.h"
12 #include "src/ic/ic.h"
13 #include "src/ic/stub-cache.h"
14 #include "src/ppc/lithium-codegen-ppc.h"
15 #include "src/ppc/lithium-gap-resolver-ppc.h"
21 class SafepointGenerator FINAL : public CallWrapper {
23 SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
24 Safepoint::DeoptMode mode)
25 : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
26 virtual ~SafepointGenerator() {}
28 void BeforeCall(int call_size) const OVERRIDE {}
30 void AfterCall() const OVERRIDE {
31 codegen_->RecordSafepoint(pointers_, deopt_mode_);
36 LPointerMap* pointers_;
37 Safepoint::DeoptMode deopt_mode_;
43 bool LCodeGen::GenerateCode() {
44 LPhase phase("Z_Code generation", chunk());
48 // Open a frame scope to indicate that there is a frame on the stack. The
49 // NONE indicates that the scope shouldn't actually generate code to set up
50 // the frame (that is done in GeneratePrologue).
51 FrameScope frame_scope(masm_, StackFrame::NONE);
53 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
54 GenerateJumpTable() && GenerateSafepointTable();
58 void LCodeGen::FinishCode(Handle<Code> code) {
60 code->set_stack_slots(GetStackSlotCount());
61 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
62 PopulateDeoptimizationData(code);
66 void LCodeGen::SaveCallerDoubles() {
67 DCHECK(info()->saves_caller_doubles());
68 DCHECK(NeedsEagerFrame());
69 Comment(";;; Save clobbered callee double registers");
71 BitVector* doubles = chunk()->allocated_double_registers();
72 BitVector::Iterator save_iterator(doubles);
73 while (!save_iterator.Done()) {
74 __ stfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
75 MemOperand(sp, count * kDoubleSize));
76 save_iterator.Advance();
82 void LCodeGen::RestoreCallerDoubles() {
83 DCHECK(info()->saves_caller_doubles());
84 DCHECK(NeedsEagerFrame());
85 Comment(";;; Restore clobbered callee double registers");
86 BitVector* doubles = chunk()->allocated_double_registers();
87 BitVector::Iterator save_iterator(doubles);
89 while (!save_iterator.Done()) {
90 __ lfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
91 MemOperand(sp, count * kDoubleSize));
92 save_iterator.Advance();
98 bool LCodeGen::GeneratePrologue() {
99 DCHECK(is_generating());
101 if (info()->IsOptimizing()) {
102 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
105 if (strlen(FLAG_stop_at) > 0 &&
106 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
111 // r4: Callee's JS function.
112 // cp: Callee's context.
113 // pp: Callee's constant pool pointer (if enabled)
114 // fp: Caller's frame pointer.
116 // ip: Our own function entry (required by the prologue)
118 // Sloppy mode functions and builtins need to replace the receiver with the
119 // global proxy when called as functions (without an explicit receiver
121 if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
122 !info_->is_native()) {
124 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
125 __ LoadP(r5, MemOperand(sp, receiver_offset));
126 __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
129 __ LoadP(r5, GlobalObjectOperand());
130 __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
132 __ StoreP(r5, MemOperand(sp, receiver_offset));
138 int prologue_offset = masm_->pc_offset();
140 if (prologue_offset) {
141 // Prologue logic requires it's starting address in ip and the
142 // corresponding offset from the function entry.
143 prologue_offset += Instruction::kInstrSize;
144 __ addi(ip, ip, Operand(prologue_offset));
146 info()->set_prologue_offset(prologue_offset);
147 if (NeedsEagerFrame()) {
148 if (info()->IsStub()) {
149 __ StubPrologue(prologue_offset);
151 __ Prologue(info()->IsCodePreAgingActive(), prologue_offset);
153 frame_is_built_ = true;
154 info_->AddNoFrameRange(0, masm_->pc_offset());
157 // Reserve space for the stack slots needed by the code.
158 int slots = GetStackSlotCount();
160 __ subi(sp, sp, Operand(slots * kPointerSize));
161 if (FLAG_debug_code) {
163 __ li(r0, Operand(slots));
165 __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
166 __ mov(r4, Operand(kSlotsZapValue));
169 __ StorePU(r4, MemOperand(r3, -kPointerSize));
175 if (info()->saves_caller_doubles()) {
179 // Possibly allocate a local context.
180 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
181 if (heap_slots > 0) {
182 Comment(";;; Allocate local context");
183 bool need_write_barrier = true;
184 // Argument to NewContext is the function, which is in r4.
185 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
186 FastNewContextStub stub(isolate(), heap_slots);
188 // Result of FastNewContextStub is always in new space.
189 need_write_barrier = false;
192 __ CallRuntime(Runtime::kNewFunctionContext, 1);
194 RecordSafepoint(Safepoint::kNoLazyDeopt);
195 // Context is returned in both r3 and cp. It replaces the context
196 // passed to us. It's saved in the stack and kept live in cp.
198 __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
199 // Copy any necessary parameters into the context.
200 int num_parameters = scope()->num_parameters();
201 for (int i = 0; i < num_parameters; i++) {
202 Variable* var = scope()->parameter(i);
203 if (var->IsContextSlot()) {
204 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
205 (num_parameters - 1 - i) * kPointerSize;
206 // Load parameter from stack.
207 __ LoadP(r3, MemOperand(fp, parameter_offset));
208 // Store it in the context.
209 MemOperand target = ContextOperand(cp, var->index());
210 __ StoreP(r3, target, r0);
211 // Update the write barrier. This clobbers r6 and r3.
212 if (need_write_barrier) {
213 __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
214 GetLinkRegisterState(), kSaveFPRegs);
215 } else if (FLAG_debug_code) {
217 __ JumpIfInNewSpace(cp, r3, &done);
218 __ Abort(kExpectedNewSpaceObject);
223 Comment(";;; End allocate local context");
227 if (FLAG_trace && info()->IsOptimizing()) {
228 // We have not executed any compiled code yet, so cp still holds the
230 __ CallRuntime(Runtime::kTraceEnter, 0);
232 return !is_aborted();
236 void LCodeGen::GenerateOsrPrologue() {
237 // Generate the OSR entry prologue at the first unknown OSR value, or if there
238 // are none, at the OSR entrypoint instruction.
239 if (osr_pc_offset_ >= 0) return;
241 osr_pc_offset_ = masm()->pc_offset();
243 // Adjust the frame size, subsuming the unoptimized frame into the
245 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
247 __ subi(sp, sp, Operand(slots * kPointerSize));
251 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
252 if (instr->IsCall()) {
253 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
255 if (!instr->IsLazyBailout() && !instr->IsGap()) {
256 safepoints_.BumpLastLazySafepointIndex();
261 bool LCodeGen::GenerateDeferredCode() {
262 DCHECK(is_generating());
263 if (deferred_.length() > 0) {
264 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
265 LDeferredCode* code = deferred_[i];
268 instructions_->at(code->instruction_index())->hydrogen_value();
269 RecordAndWritePosition(
270 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
274 "-------------------- Deferred %s --------------------",
275 code->instruction_index(), code->instr()->hydrogen_value()->id(),
276 code->instr()->Mnemonic());
277 __ bind(code->entry());
278 if (NeedsDeferredFrame()) {
279 Comment(";;; Build frame");
280 DCHECK(!frame_is_built_);
281 DCHECK(info()->IsStub());
282 frame_is_built_ = true;
283 __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
284 __ PushFixedFrame(scratch0());
285 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
286 Comment(";;; Deferred code");
289 if (NeedsDeferredFrame()) {
290 Comment(";;; Destroy frame");
291 DCHECK(frame_is_built_);
292 __ PopFixedFrame(ip);
293 frame_is_built_ = false;
299 return !is_aborted();
303 bool LCodeGen::GenerateJumpTable() {
304 // Check that the jump table is accessible from everywhere in the function
305 // code, i.e. that offsets to the table can be encoded in the 24bit signed
306 // immediate of a branch instruction.
307 // To simplify we consider the code size from the first instruction to the
308 // end of the jump table. We also don't consider the pc load delta.
309 // Each entry in the jump table generates one instruction and inlines one
310 // 32bit data after it.
311 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
312 jump_table_.length() * 7)) {
313 Abort(kGeneratedCodeIsTooLarge);
316 if (jump_table_.length() > 0) {
317 Label needs_frame, call_deopt_entry;
319 Comment(";;; -------------------- Jump table --------------------");
320 Address base = jump_table_[0].address;
322 Register entry_offset = scratch0();
324 int length = jump_table_.length();
325 for (int i = 0; i < length; i++) {
326 Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
327 __ bind(&table_entry->label);
329 DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
330 Address entry = table_entry->address;
331 DeoptComment(table_entry->deopt_info);
333 // Second-level deopt table entries are contiguous and small, so instead
334 // of loading the full, absolute address of each one, load an immediate
335 // offset which will be added to the base address later.
336 __ mov(entry_offset, Operand(entry - base));
338 if (table_entry->needs_frame) {
339 DCHECK(!info()->saves_caller_doubles());
340 Comment(";;; call deopt with frame");
342 __ b(&needs_frame, SetLK);
344 __ b(&call_deopt_entry, SetLK);
346 info()->LogDeoptCallPosition(masm()->pc_offset(),
347 table_entry->deopt_info.inlining_id);
350 if (needs_frame.is_linked()) {
351 __ bind(&needs_frame);
352 // This variant of deopt can only be used with stubs. Since we don't
353 // have a function pointer to install in the stack frame that we're
354 // building, install a special marker there instead.
355 DCHECK(info()->IsStub());
356 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
358 __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
361 Comment(";;; call deopt");
362 __ bind(&call_deopt_entry);
364 if (info()->saves_caller_doubles()) {
365 DCHECK(info()->IsStub());
366 RestoreCallerDoubles();
369 // Add the base address to the offset previously loaded in entry_offset.
370 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
371 __ add(ip, entry_offset, ip);
375 // The deoptimization jump table is the last part of the instruction
376 // sequence. Mark the generated code as done unless we bailed out.
377 if (!is_aborted()) status_ = DONE;
378 return !is_aborted();
382 bool LCodeGen::GenerateSafepointTable() {
384 safepoints_.Emit(masm(), GetStackSlotCount());
385 return !is_aborted();
389 Register LCodeGen::ToRegister(int index) const {
390 return Register::FromAllocationIndex(index);
394 DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
395 return DoubleRegister::FromAllocationIndex(index);
399 Register LCodeGen::ToRegister(LOperand* op) const {
400 DCHECK(op->IsRegister());
401 return ToRegister(op->index());
405 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
406 if (op->IsRegister()) {
407 return ToRegister(op->index());
408 } else if (op->IsConstantOperand()) {
409 LConstantOperand* const_op = LConstantOperand::cast(op);
410 HConstant* constant = chunk_->LookupConstant(const_op);
411 Handle<Object> literal = constant->handle(isolate());
412 Representation r = chunk_->LookupLiteralRepresentation(const_op);
413 if (r.IsInteger32()) {
414 DCHECK(literal->IsNumber());
415 __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
416 } else if (r.IsDouble()) {
417 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
419 DCHECK(r.IsSmiOrTagged());
420 __ Move(scratch, literal);
423 } else if (op->IsStackSlot()) {
424 __ LoadP(scratch, ToMemOperand(op));
432 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
434 DCHECK(IsInteger32(const_op));
435 HConstant* constant = chunk_->LookupConstant(const_op);
436 int32_t value = constant->Integer32Value();
437 if (IsSmi(const_op)) {
438 __ LoadSmiLiteral(dst, Smi::FromInt(value));
440 __ LoadIntLiteral(dst, value);
445 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
446 DCHECK(op->IsDoubleRegister());
447 return ToDoubleRegister(op->index());
451 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
452 HConstant* constant = chunk_->LookupConstant(op);
453 DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
454 return constant->handle(isolate());
458 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
459 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
463 bool LCodeGen::IsSmi(LConstantOperand* op) const {
464 return chunk_->LookupLiteralRepresentation(op).IsSmi();
468 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
469 return ToRepresentation(op, Representation::Integer32());
473 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
474 const Representation& r) const {
475 HConstant* constant = chunk_->LookupConstant(op);
476 int32_t value = constant->Integer32Value();
477 if (r.IsInteger32()) return value;
478 DCHECK(r.IsSmiOrTagged());
479 return reinterpret_cast<intptr_t>(Smi::FromInt(value));
483 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
484 HConstant* constant = chunk_->LookupConstant(op);
485 return Smi::FromInt(constant->Integer32Value());
489 double LCodeGen::ToDouble(LConstantOperand* op) const {
490 HConstant* constant = chunk_->LookupConstant(op);
491 DCHECK(constant->HasDoubleValue());
492 return constant->DoubleValue();
496 Operand LCodeGen::ToOperand(LOperand* op) {
497 if (op->IsConstantOperand()) {
498 LConstantOperand* const_op = LConstantOperand::cast(op);
499 HConstant* constant = chunk()->LookupConstant(const_op);
500 Representation r = chunk_->LookupLiteralRepresentation(const_op);
502 DCHECK(constant->HasSmiValue());
503 return Operand(Smi::FromInt(constant->Integer32Value()));
504 } else if (r.IsInteger32()) {
505 DCHECK(constant->HasInteger32Value());
506 return Operand(constant->Integer32Value());
507 } else if (r.IsDouble()) {
508 Abort(kToOperandUnsupportedDoubleImmediate);
510 DCHECK(r.IsTagged());
511 return Operand(constant->handle(isolate()));
512 } else if (op->IsRegister()) {
513 return Operand(ToRegister(op));
514 } else if (op->IsDoubleRegister()) {
515 Abort(kToOperandIsDoubleRegisterUnimplemented);
516 return Operand::Zero();
518 // Stack slots not implemented, use ToMemOperand instead.
520 return Operand::Zero();
524 static int ArgumentsOffsetWithoutFrame(int index) {
526 return -(index + 1) * kPointerSize;
530 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
531 DCHECK(!op->IsRegister());
532 DCHECK(!op->IsDoubleRegister());
533 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
534 if (NeedsEagerFrame()) {
535 return MemOperand(fp, StackSlotOffset(op->index()));
537 // Retrieve parameter without eager stack-frame relative to the
539 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
544 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
545 DCHECK(op->IsDoubleStackSlot());
546 if (NeedsEagerFrame()) {
547 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
549 // Retrieve parameter without eager stack-frame relative to the
551 return MemOperand(sp,
552 ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
557 void LCodeGen::WriteTranslation(LEnvironment* environment,
558 Translation* translation) {
559 if (environment == NULL) return;
561 // The translation includes one command per value in the environment.
562 int translation_size = environment->translation_size();
563 // The output frame height does not include the parameters.
564 int height = translation_size - environment->parameter_count();
566 WriteTranslation(environment->outer(), translation);
567 bool has_closure_id =
568 !info()->closure().is_null() &&
569 !info()->closure().is_identical_to(environment->closure());
570 int closure_id = has_closure_id
571 ? DefineDeoptimizationLiteral(environment->closure())
572 : Translation::kSelfLiteralId;
574 switch (environment->frame_type()) {
576 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
579 translation->BeginConstructStubFrame(closure_id, translation_size);
582 DCHECK(translation_size == 1);
584 translation->BeginGetterStubFrame(closure_id);
587 DCHECK(translation_size == 2);
589 translation->BeginSetterStubFrame(closure_id);
592 translation->BeginCompiledStubFrame();
594 case ARGUMENTS_ADAPTOR:
595 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
599 int object_index = 0;
600 int dematerialized_index = 0;
601 for (int i = 0; i < translation_size; ++i) {
602 LOperand* value = environment->values()->at(i);
604 environment, translation, value, environment->HasTaggedValueAt(i),
605 environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
610 void LCodeGen::AddToTranslation(LEnvironment* environment,
611 Translation* translation, LOperand* op,
612 bool is_tagged, bool is_uint32,
613 int* object_index_pointer,
614 int* dematerialized_index_pointer) {
615 if (op == LEnvironment::materialization_marker()) {
616 int object_index = (*object_index_pointer)++;
617 if (environment->ObjectIsDuplicateAt(object_index)) {
618 int dupe_of = environment->ObjectDuplicateOfAt(object_index);
619 translation->DuplicateObject(dupe_of);
622 int object_length = environment->ObjectLengthAt(object_index);
623 if (environment->ObjectIsArgumentsAt(object_index)) {
624 translation->BeginArgumentsObject(object_length);
626 translation->BeginCapturedObject(object_length);
628 int dematerialized_index = *dematerialized_index_pointer;
629 int env_offset = environment->translation_size() + dematerialized_index;
630 *dematerialized_index_pointer += object_length;
631 for (int i = 0; i < object_length; ++i) {
632 LOperand* value = environment->values()->at(env_offset + i);
633 AddToTranslation(environment, translation, value,
634 environment->HasTaggedValueAt(env_offset + i),
635 environment->HasUint32ValueAt(env_offset + i),
636 object_index_pointer, dematerialized_index_pointer);
641 if (op->IsStackSlot()) {
643 translation->StoreStackSlot(op->index());
644 } else if (is_uint32) {
645 translation->StoreUint32StackSlot(op->index());
647 translation->StoreInt32StackSlot(op->index());
649 } else if (op->IsDoubleStackSlot()) {
650 translation->StoreDoubleStackSlot(op->index());
651 } else if (op->IsRegister()) {
652 Register reg = ToRegister(op);
654 translation->StoreRegister(reg);
655 } else if (is_uint32) {
656 translation->StoreUint32Register(reg);
658 translation->StoreInt32Register(reg);
660 } else if (op->IsDoubleRegister()) {
661 DoubleRegister reg = ToDoubleRegister(op);
662 translation->StoreDoubleRegister(reg);
663 } else if (op->IsConstantOperand()) {
664 HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
665 int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
666 translation->StoreLiteral(src_index);
673 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
674 LInstruction* instr) {
675 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
679 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
681 SafepointMode safepoint_mode) {
682 DCHECK(instr != NULL);
684 RecordSafepointWithLazyDeopt(instr, safepoint_mode);
686 // Signal that we don't inline smi code before these stubs in the
687 // optimizing code generator.
688 if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
694 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
695 LInstruction* instr, SaveFPRegsMode save_doubles) {
696 DCHECK(instr != NULL);
698 __ CallRuntime(function, num_arguments, save_doubles);
700 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
704 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
705 if (context->IsRegister()) {
706 __ Move(cp, ToRegister(context));
707 } else if (context->IsStackSlot()) {
708 __ LoadP(cp, ToMemOperand(context));
709 } else if (context->IsConstantOperand()) {
710 HConstant* constant =
711 chunk_->LookupConstant(LConstantOperand::cast(context));
712 __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
719 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
720 LInstruction* instr, LOperand* context) {
721 LoadContextFromDeferred(context);
722 __ CallRuntimeSaveDoubles(id);
723 RecordSafepointWithRegisters(instr->pointer_map(), argc,
724 Safepoint::kNoLazyDeopt);
728 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
729 Safepoint::DeoptMode mode) {
730 environment->set_has_been_used();
731 if (!environment->HasBeenRegistered()) {
732 // Physical stack frame layout:
733 // -x ............. -4 0 ..................................... y
734 // [incoming arguments] [spill slots] [pushed outgoing arguments]
736 // Layout of the environment:
737 // 0 ..................................................... size-1
738 // [parameters] [locals] [expression stack including arguments]
740 // Layout of the translation:
741 // 0 ........................................................ size - 1 + 4
742 // [expression stack including arguments] [locals] [4 words] [parameters]
743 // |>------------ translation_size ------------<|
746 int jsframe_count = 0;
747 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
749 if (e->frame_type() == JS_FUNCTION) {
753 Translation translation(&translations_, frame_count, jsframe_count, zone());
754 WriteTranslation(environment, &translation);
755 int deoptimization_index = deoptimizations_.length();
756 int pc_offset = masm()->pc_offset();
757 environment->Register(deoptimization_index, translation.index(),
758 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
759 deoptimizations_.Add(environment, zone());
764 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
765 Deoptimizer::DeoptReason deopt_reason,
766 Deoptimizer::BailoutType bailout_type,
768 LEnvironment* environment = instr->environment();
769 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
770 DCHECK(environment->HasBeenRegistered());
771 int id = environment->deoptimization_index();
772 DCHECK(info()->IsOptimizing() || info()->IsStub());
774 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
776 Abort(kBailoutWasNotPrepared);
780 if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
781 CRegister alt_cr = cr6;
782 Register scratch = scratch0();
783 ExternalReference count = ExternalReference::stress_deopt_count(isolate());
785 DCHECK(!alt_cr.is(cr));
786 __ Push(r4, scratch);
787 __ mov(scratch, Operand(count));
788 __ lwz(r4, MemOperand(scratch));
789 __ subi(r4, r4, Operand(1));
790 __ cmpi(r4, Operand::Zero(), alt_cr);
791 __ bne(&no_deopt, alt_cr);
792 __ li(r4, Operand(FLAG_deopt_every_n_times));
793 __ stw(r4, MemOperand(scratch));
796 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
798 __ stw(r4, MemOperand(scratch));
802 if (info()->ShouldTrapOnDeopt()) {
803 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
806 Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
808 DCHECK(info()->IsStub() || frame_is_built_);
809 // Go through jump table if we need to handle condition, build frame, or
810 // restore caller doubles.
811 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
812 DeoptComment(deopt_info);
813 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
814 info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
816 Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
818 // We often have several deopts to the same entry, reuse the last
819 // jump entry if this is the case.
820 if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
821 jump_table_.is_empty() ||
822 !table_entry.IsEquivalentTo(jump_table_.last())) {
823 jump_table_.Add(table_entry, zone());
825 __ b(cond, &jump_table_.last().label, cr);
830 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
831 Deoptimizer::DeoptReason deopt_reason,
833 Deoptimizer::BailoutType bailout_type =
834 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
835 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
839 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
840 int length = deoptimizations_.length();
841 if (length == 0) return;
842 Handle<DeoptimizationInputData> data =
843 DeoptimizationInputData::New(isolate(), length, TENURED);
845 Handle<ByteArray> translations =
846 translations_.CreateByteArray(isolate()->factory());
847 data->SetTranslationByteArray(*translations);
848 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
849 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
850 if (info_->IsOptimizing()) {
851 // Reference to shared function info does not change between phases.
852 AllowDeferredHandleDereference allow_handle_dereference;
853 data->SetSharedFunctionInfo(*info_->shared_info());
855 data->SetSharedFunctionInfo(Smi::FromInt(0));
857 data->SetWeakCellCache(Smi::FromInt(0));
859 Handle<FixedArray> literals =
860 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
862 AllowDeferredHandleDereference copy_handles;
863 for (int i = 0; i < deoptimization_literals_.length(); i++) {
864 literals->set(i, *deoptimization_literals_[i]);
866 data->SetLiteralArray(*literals);
869 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
870 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
872 // Populate the deoptimization entries.
873 for (int i = 0; i < length; i++) {
874 LEnvironment* env = deoptimizations_[i];
875 data->SetAstId(i, env->ast_id());
876 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
877 data->SetArgumentsStackHeight(i,
878 Smi::FromInt(env->arguments_stack_height()));
879 data->SetPc(i, Smi::FromInt(env->pc_offset()));
881 code->set_deoptimization_data(*data);
885 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
886 int result = deoptimization_literals_.length();
887 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
888 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
890 deoptimization_literals_.Add(literal, zone());
895 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
896 DCHECK(deoptimization_literals_.length() == 0);
898 const ZoneList<Handle<JSFunction> >* inlined_closures =
899 chunk()->inlined_closures();
901 for (int i = 0, length = inlined_closures->length(); i < length; i++) {
902 DefineDeoptimizationLiteral(inlined_closures->at(i));
905 inlined_function_count_ = deoptimization_literals_.length();
909 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
910 SafepointMode safepoint_mode) {
911 if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
912 RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
914 DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
915 RecordSafepointWithRegisters(instr->pointer_map(), 0,
916 Safepoint::kLazyDeopt);
921 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
922 int arguments, Safepoint::DeoptMode deopt_mode) {
923 DCHECK(expected_safepoint_kind_ == kind);
925 const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
926 Safepoint safepoint =
927 safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
928 for (int i = 0; i < operands->length(); i++) {
929 LOperand* pointer = operands->at(i);
930 if (pointer->IsStackSlot()) {
931 safepoint.DefinePointerSlot(pointer->index(), zone());
932 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
933 safepoint.DefinePointerRegister(ToRegister(pointer), zone());
939 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
940 Safepoint::DeoptMode deopt_mode) {
941 RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
945 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
946 LPointerMap empty_pointers(zone());
947 RecordSafepoint(&empty_pointers, deopt_mode);
951 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
953 Safepoint::DeoptMode deopt_mode) {
954 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
958 void LCodeGen::RecordAndWritePosition(int position) {
959 if (position == RelocInfo::kNoPosition) return;
960 masm()->positions_recorder()->RecordPosition(position);
961 masm()->positions_recorder()->WriteRecordedPositions();
965 static const char* LabelType(LLabel* label) {
966 if (label->is_loop_header()) return " (loop header)";
967 if (label->is_osr_entry()) return " (OSR entry)";
972 void LCodeGen::DoLabel(LLabel* label) {
973 Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
974 current_instruction_, label->hydrogen_value()->id(),
975 label->block_id(), LabelType(label));
976 __ bind(label->label());
977 current_block_ = label->block_id();
982 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
985 void LCodeGen::DoGap(LGap* gap) {
986 for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
988 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
989 LParallelMove* move = gap->GetParallelMove(inner_pos);
990 if (move != NULL) DoParallelMove(move);
995 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
998 void LCodeGen::DoParameter(LParameter* instr) {
1003 void LCodeGen::DoCallStub(LCallStub* instr) {
1004 DCHECK(ToRegister(instr->context()).is(cp));
1005 DCHECK(ToRegister(instr->result()).is(r3));
1006 switch (instr->hydrogen()->major_key()) {
1007 case CodeStub::RegExpExec: {
1008 RegExpExecStub stub(isolate());
1009 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1012 case CodeStub::SubString: {
1013 SubStringStub stub(isolate());
1014 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1017 case CodeStub::StringCompare: {
1018 StringCompareStub stub(isolate());
1019 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1028 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1029 GenerateOsrPrologue();
1033 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1034 Register dividend = ToRegister(instr->dividend());
1035 int32_t divisor = instr->divisor();
1036 DCHECK(dividend.is(ToRegister(instr->result())));
1038 // Theoretically, a variation of the branch-free code for integer division by
1039 // a power of 2 (calculating the remainder via an additional multiplication
1040 // (which gets simplified to an 'and') and subtraction) should be faster, and
1041 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1042 // indicate that positive dividends are heavily favored, so the branching
1043 // version performs better.
1044 HMod* hmod = instr->hydrogen();
1045 int32_t shift = WhichPowerOf2Abs(divisor);
1046 Label dividend_is_not_negative, done;
1047 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1048 __ cmpwi(dividend, Operand::Zero());
1049 __ bge(÷nd_is_not_negative);
1051 // Note that this is correct even for kMinInt operands.
1052 __ neg(dividend, dividend);
1053 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1054 __ neg(dividend, dividend, LeaveOE, SetRC);
1055 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1056 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
1058 } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1059 __ li(dividend, Operand::Zero());
1061 DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
1066 __ bind(÷nd_is_not_negative);
1068 __ ExtractBitRange(dividend, dividend, shift - 1, 0);
1070 __ li(dividend, Operand::Zero());
1076 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1077 Register dividend = ToRegister(instr->dividend());
1078 int32_t divisor = instr->divisor();
1079 Register result = ToRegister(instr->result());
1080 DCHECK(!dividend.is(result));
1083 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1087 __ TruncatingDiv(result, dividend, Abs(divisor));
1088 __ mov(ip, Operand(Abs(divisor)));
1089 __ mullw(result, result, ip);
1090 __ sub(result, dividend, result, LeaveOE, SetRC);
1092 // Check for negative zero.
1093 HMod* hmod = instr->hydrogen();
1094 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1095 Label remainder_not_zero;
1096 __ bne(&remainder_not_zero, cr0);
1097 __ cmpwi(dividend, Operand::Zero());
1098 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1099 __ bind(&remainder_not_zero);
1104 void LCodeGen::DoModI(LModI* instr) {
1105 HMod* hmod = instr->hydrogen();
1106 Register left_reg = ToRegister(instr->left());
1107 Register right_reg = ToRegister(instr->right());
1108 Register result_reg = ToRegister(instr->result());
1109 Register scratch = scratch0();
1110 bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
1114 __ li(r0, Operand::Zero()); // clear xer
1118 __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
1121 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1122 __ cmpwi(right_reg, Operand::Zero());
1123 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1126 // Check for kMinInt % -1, divw will return undefined, which is not what we
1127 // want. We have to deopt if we care about -0, because we can't return that.
1129 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1130 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
1132 if (CpuFeatures::IsSupported(ISELECT)) {
1133 __ isel(overflow, result_reg, r0, result_reg, cr0);
1134 __ boverflow(&done, cr0);
1136 Label no_overflow_possible;
1137 __ bnooverflow(&no_overflow_possible, cr0);
1138 __ li(result_reg, Operand::Zero());
1140 __ bind(&no_overflow_possible);
1145 __ mullw(scratch, right_reg, scratch);
1146 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
1148 // If we care about -0, test if the dividend is <0 and the result is 0.
1149 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1151 __ cmpwi(left_reg, Operand::Zero());
1152 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1159 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1160 Register dividend = ToRegister(instr->dividend());
1161 int32_t divisor = instr->divisor();
1162 Register result = ToRegister(instr->result());
1163 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1164 DCHECK(!result.is(dividend));
1166 // Check for (0 / -x) that will produce negative zero.
1167 HDiv* hdiv = instr->hydrogen();
1168 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1169 __ cmpwi(dividend, Operand::Zero());
1170 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1172 // Check for (kMinInt / -1).
1173 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1174 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1175 __ cmpw(dividend, r0);
1176 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1179 int32_t shift = WhichPowerOf2Abs(divisor);
1181 // Deoptimize if remainder will not be 0.
1182 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
1183 __ TestBitRange(dividend, shift - 1, 0, r0);
1184 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
1187 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1188 __ neg(result, dividend);
1192 __ mr(result, dividend);
1195 __ srwi(result, dividend, Operand(31));
1197 __ srawi(result, dividend, 31);
1198 __ srwi(result, result, Operand(32 - shift));
1200 __ add(result, dividend, result);
1201 __ srawi(result, result, shift);
1203 if (divisor < 0) __ neg(result, result);
1207 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1208 Register dividend = ToRegister(instr->dividend());
1209 int32_t divisor = instr->divisor();
1210 Register result = ToRegister(instr->result());
1211 DCHECK(!dividend.is(result));
1214 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1218 // Check for (0 / -x) that will produce negative zero.
1219 HDiv* hdiv = instr->hydrogen();
1220 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1221 __ cmpwi(dividend, Operand::Zero());
1222 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1225 __ TruncatingDiv(result, dividend, Abs(divisor));
1226 if (divisor < 0) __ neg(result, result);
1228 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1229 Register scratch = scratch0();
1230 __ mov(ip, Operand(divisor));
1231 __ mullw(scratch, result, ip);
1232 __ cmpw(scratch, dividend);
1233 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1238 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1239 void LCodeGen::DoDivI(LDivI* instr) {
1240 HBinaryOperation* hdiv = instr->hydrogen();
1241 const Register dividend = ToRegister(instr->dividend());
1242 const Register divisor = ToRegister(instr->divisor());
1243 Register result = ToRegister(instr->result());
1244 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1246 DCHECK(!dividend.is(result));
1247 DCHECK(!divisor.is(result));
1250 __ li(r0, Operand::Zero()); // clear xer
1254 __ divw(result, dividend, divisor, SetOE, SetRC);
1257 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1258 __ cmpwi(divisor, Operand::Zero());
1259 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1262 // Check for (0 / -x) that will produce negative zero.
1263 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1264 Label dividend_not_zero;
1265 __ cmpwi(dividend, Operand::Zero());
1266 __ bne(÷nd_not_zero);
1267 __ cmpwi(divisor, Operand::Zero());
1268 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1269 __ bind(÷nd_not_zero);
1272 // Check for (kMinInt / -1).
1274 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1275 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1277 // When truncating, we want kMinInt / -1 = kMinInt.
1278 if (CpuFeatures::IsSupported(ISELECT)) {
1279 __ isel(overflow, result, dividend, result, cr0);
1281 Label no_overflow_possible;
1282 __ bnooverflow(&no_overflow_possible, cr0);
1283 __ mr(result, dividend);
1284 __ bind(&no_overflow_possible);
1289 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1290 // Deoptimize if remainder is not 0.
1291 Register scratch = scratch0();
1292 __ mullw(scratch, divisor, result);
1293 __ cmpw(dividend, scratch);
1294 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1299 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1300 HBinaryOperation* hdiv = instr->hydrogen();
1301 Register dividend = ToRegister(instr->dividend());
1302 Register result = ToRegister(instr->result());
1303 int32_t divisor = instr->divisor();
1304 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
1306 // If the divisor is positive, things are easy: There can be no deopts and we
1307 // can simply do an arithmetic right shift.
1308 int32_t shift = WhichPowerOf2Abs(divisor);
1310 if (shift || !result.is(dividend)) {
1311 __ srawi(result, dividend, shift);
1316 // If the divisor is negative, we have to negate and handle edge cases.
1318 #if V8_TARGET_ARCH_PPC64
1319 if (divisor == -1 && can_overflow) {
1320 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1321 __ cmpw(dividend, r0);
1322 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1326 __ li(r0, Operand::Zero()); // clear xer
1332 __ neg(result, dividend, oe, SetRC);
1333 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1334 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
1337 // If the negation could not overflow, simply shifting is OK.
1338 #if !V8_TARGET_ARCH_PPC64
1339 if (!can_overflow) {
1342 __ ShiftRightArithImm(result, result, shift);
1345 #if !V8_TARGET_ARCH_PPC64
1348 // Dividing by -1 is basically negation, unless we overflow.
1349 if (divisor == -1) {
1350 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1354 Label overflow, done;
1355 __ boverflow(&overflow, cr0);
1356 __ srawi(result, result, shift);
1359 __ mov(result, Operand(kMinInt / divisor));
1365 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1366 Register dividend = ToRegister(instr->dividend());
1367 int32_t divisor = instr->divisor();
1368 Register result = ToRegister(instr->result());
1369 DCHECK(!dividend.is(result));
1372 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
1376 // Check for (0 / -x) that will produce negative zero.
1377 HMathFloorOfDiv* hdiv = instr->hydrogen();
1378 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1379 __ cmpwi(dividend, Operand::Zero());
1380 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1383 // Easy case: We need no dynamic check for the dividend and the flooring
1384 // division is the same as the truncating division.
1385 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1386 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1387 __ TruncatingDiv(result, dividend, Abs(divisor));
1388 if (divisor < 0) __ neg(result, result);
1392 // In the general case we may need to adjust before and after the truncating
1393 // division to get a flooring division.
1394 Register temp = ToRegister(instr->temp());
1395 DCHECK(!temp.is(dividend) && !temp.is(result));
1396 Label needs_adjustment, done;
1397 __ cmpwi(dividend, Operand::Zero());
1398 __ b(divisor > 0 ? lt : gt, &needs_adjustment);
1399 __ TruncatingDiv(result, dividend, Abs(divisor));
1400 if (divisor < 0) __ neg(result, result);
1402 __ bind(&needs_adjustment);
1403 __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1404 __ TruncatingDiv(result, temp, Abs(divisor));
1405 if (divisor < 0) __ neg(result, result);
1406 __ subi(result, result, Operand(1));
1411 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1412 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1413 HBinaryOperation* hdiv = instr->hydrogen();
1414 const Register dividend = ToRegister(instr->dividend());
1415 const Register divisor = ToRegister(instr->divisor());
1416 Register result = ToRegister(instr->result());
1417 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1419 DCHECK(!dividend.is(result));
1420 DCHECK(!divisor.is(result));
1423 __ li(r0, Operand::Zero()); // clear xer
1427 __ divw(result, dividend, divisor, SetOE, SetRC);
1430 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1431 __ cmpwi(divisor, Operand::Zero());
1432 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1435 // Check for (0 / -x) that will produce negative zero.
1436 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1437 Label dividend_not_zero;
1438 __ cmpwi(dividend, Operand::Zero());
1439 __ bne(÷nd_not_zero);
1440 __ cmpwi(divisor, Operand::Zero());
1441 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1442 __ bind(÷nd_not_zero);
1445 // Check for (kMinInt / -1).
1447 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1448 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1450 // When truncating, we want kMinInt / -1 = kMinInt.
1451 if (CpuFeatures::IsSupported(ISELECT)) {
1452 __ isel(overflow, result, dividend, result, cr0);
1454 Label no_overflow_possible;
1455 __ bnooverflow(&no_overflow_possible, cr0);
1456 __ mr(result, dividend);
1457 __ bind(&no_overflow_possible);
1463 Register scratch = scratch0();
1464 // If both operands have the same sign then we are done.
1465 #if V8_TARGET_ARCH_PPC64
1466 __ xor_(scratch, dividend, divisor);
1467 __ cmpwi(scratch, Operand::Zero());
1470 __ xor_(scratch, dividend, divisor, SetRC);
1474 // If there is no remainder then we are done.
1475 __ mullw(scratch, divisor, result);
1476 __ cmpw(dividend, scratch);
1479 // We performed a truncating division. Correct the result.
1480 __ subi(result, result, Operand(1));
1485 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1486 DoubleRegister addend = ToDoubleRegister(instr->addend());
1487 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1488 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1489 DoubleRegister result = ToDoubleRegister(instr->result());
1491 __ fmadd(result, multiplier, multiplicand, addend);
1495 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1496 DoubleRegister minuend = ToDoubleRegister(instr->minuend());
1497 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1498 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1499 DoubleRegister result = ToDoubleRegister(instr->result());
1501 __ fmsub(result, multiplier, multiplicand, minuend);
1505 void LCodeGen::DoMulI(LMulI* instr) {
1506 Register scratch = scratch0();
1507 Register result = ToRegister(instr->result());
1508 // Note that result may alias left.
1509 Register left = ToRegister(instr->left());
1510 LOperand* right_op = instr->right();
1512 bool bailout_on_minus_zero =
1513 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1514 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1516 if (right_op->IsConstantOperand()) {
1517 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1519 if (bailout_on_minus_zero && (constant < 0)) {
1520 // The case of a null constant will be handled separately.
1521 // If constant is negative and left is null, the result should be -0.
1522 __ cmpi(left, Operand::Zero());
1523 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1529 #if V8_TARGET_ARCH_PPC64
1530 if (instr->hydrogen()->representation().IsSmi()) {
1532 __ li(r0, Operand::Zero()); // clear xer
1534 __ neg(result, left, SetOE, SetRC);
1535 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1536 #if V8_TARGET_ARCH_PPC64
1538 __ neg(result, left);
1539 __ TestIfInt32(result, r0);
1540 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1544 __ neg(result, left);
1548 if (bailout_on_minus_zero) {
1549 // If left is strictly negative and the constant is null, the
1550 // result is -0. Deoptimize if required, otherwise return 0.
1551 #if V8_TARGET_ARCH_PPC64
1552 if (instr->hydrogen()->representation().IsSmi()) {
1554 __ cmpi(left, Operand::Zero());
1555 #if V8_TARGET_ARCH_PPC64
1557 __ cmpwi(left, Operand::Zero());
1560 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1562 __ li(result, Operand::Zero());
1565 __ Move(result, left);
1568 // Multiplying by powers of two and powers of two plus or minus
1569 // one can be done faster with shifted operands.
1570 // For other constants we emit standard code.
1571 int32_t mask = constant >> 31;
1572 uint32_t constant_abs = (constant + mask) ^ mask;
1574 if (base::bits::IsPowerOfTwo32(constant_abs)) {
1575 int32_t shift = WhichPowerOf2(constant_abs);
1576 __ ShiftLeftImm(result, left, Operand(shift));
1577 // Correct the sign of the result if the constant is negative.
1578 if (constant < 0) __ neg(result, result);
1579 } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
1580 int32_t shift = WhichPowerOf2(constant_abs - 1);
1581 __ ShiftLeftImm(scratch, left, Operand(shift));
1582 __ add(result, scratch, left);
1583 // Correct the sign of the result if the constant is negative.
1584 if (constant < 0) __ neg(result, result);
1585 } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
1586 int32_t shift = WhichPowerOf2(constant_abs + 1);
1587 __ ShiftLeftImm(scratch, left, Operand(shift));
1588 __ sub(result, scratch, left);
1589 // Correct the sign of the result if the constant is negative.
1590 if (constant < 0) __ neg(result, result);
1592 // Generate standard code.
1593 __ mov(ip, Operand(constant));
1594 __ Mul(result, left, ip);
1599 DCHECK(right_op->IsRegister());
1600 Register right = ToRegister(right_op);
1603 #if V8_TARGET_ARCH_PPC64
1604 // result = left * right.
1605 if (instr->hydrogen()->representation().IsSmi()) {
1606 __ SmiUntag(result, left);
1607 __ SmiUntag(scratch, right);
1608 __ Mul(result, result, scratch);
1610 __ Mul(result, left, right);
1612 __ TestIfInt32(result, r0);
1613 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1614 if (instr->hydrogen()->representation().IsSmi()) {
1618 // scratch:result = left * right.
1619 if (instr->hydrogen()->representation().IsSmi()) {
1620 __ SmiUntag(result, left);
1621 __ mulhw(scratch, result, right);
1622 __ mullw(result, result, right);
1624 __ mulhw(scratch, left, right);
1625 __ mullw(result, left, right);
1627 __ TestIfInt32(scratch, result, r0);
1628 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1631 if (instr->hydrogen()->representation().IsSmi()) {
1632 __ SmiUntag(result, left);
1633 __ Mul(result, result, right);
1635 __ Mul(result, left, right);
1639 if (bailout_on_minus_zero) {
1641 #if V8_TARGET_ARCH_PPC64
1642 if (instr->hydrogen()->representation().IsSmi()) {
1644 __ xor_(r0, left, right, SetRC);
1646 #if V8_TARGET_ARCH_PPC64
1648 __ xor_(r0, left, right);
1649 __ cmpwi(r0, Operand::Zero());
1653 // Bail out if the result is minus zero.
1654 __ cmpi(result, Operand::Zero());
1655 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
1662 void LCodeGen::DoBitI(LBitI* instr) {
1663 LOperand* left_op = instr->left();
1664 LOperand* right_op = instr->right();
1665 DCHECK(left_op->IsRegister());
1666 Register left = ToRegister(left_op);
1667 Register result = ToRegister(instr->result());
1668 Operand right(no_reg);
1670 if (right_op->IsStackSlot()) {
1671 right = Operand(EmitLoadRegister(right_op, ip));
1673 DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
1674 right = ToOperand(right_op);
1676 if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
1677 switch (instr->op()) {
1678 case Token::BIT_AND:
1679 __ andi(result, left, right);
1682 __ ori(result, left, right);
1684 case Token::BIT_XOR:
1685 __ xori(result, left, right);
1695 switch (instr->op()) {
1696 case Token::BIT_AND:
1697 __ And(result, left, right);
1700 __ Or(result, left, right);
1702 case Token::BIT_XOR:
1703 if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1704 __ notx(result, left);
1706 __ Xor(result, left, right);
1716 void LCodeGen::DoShiftI(LShiftI* instr) {
1717 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1718 // result may alias either of them.
1719 LOperand* right_op = instr->right();
1720 Register left = ToRegister(instr->left());
1721 Register result = ToRegister(instr->result());
1722 Register scratch = scratch0();
1723 if (right_op->IsRegister()) {
1724 // Mask the right_op operand.
1725 __ andi(scratch, ToRegister(right_op), Operand(0x1F));
1726 switch (instr->op()) {
1728 // rotate_right(a, b) == rotate_left(a, 32 - b)
1729 __ subfic(scratch, scratch, Operand(32));
1730 __ rotlw(result, left, scratch);
1733 __ sraw(result, left, scratch);
1736 if (instr->can_deopt()) {
1737 __ srw(result, left, scratch, SetRC);
1738 #if V8_TARGET_ARCH_PPC64
1739 __ extsw(result, result, SetRC);
1741 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
1743 __ srw(result, left, scratch);
1747 __ slw(result, left, scratch);
1748 #if V8_TARGET_ARCH_PPC64
1749 __ extsw(result, result);
1757 // Mask the right_op operand.
1758 int value = ToInteger32(LConstantOperand::cast(right_op));
1759 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1760 switch (instr->op()) {
1762 if (shift_count != 0) {
1763 __ rotrwi(result, left, shift_count);
1765 __ Move(result, left);
1769 if (shift_count != 0) {
1770 __ srawi(result, left, shift_count);
1772 __ Move(result, left);
1776 if (shift_count != 0) {
1777 __ srwi(result, left, Operand(shift_count));
1779 if (instr->can_deopt()) {
1780 __ cmpwi(left, Operand::Zero());
1781 DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
1783 __ Move(result, left);
1787 if (shift_count != 0) {
1788 #if V8_TARGET_ARCH_PPC64
1789 if (instr->hydrogen_value()->representation().IsSmi()) {
1790 __ sldi(result, left, Operand(shift_count));
1792 if (instr->hydrogen_value()->representation().IsSmi() &&
1793 instr->can_deopt()) {
1794 if (shift_count != 1) {
1795 __ slwi(result, left, Operand(shift_count - 1));
1796 __ SmiTagCheckOverflow(result, result, scratch);
1798 __ SmiTagCheckOverflow(result, left, scratch);
1800 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1803 __ slwi(result, left, Operand(shift_count));
1804 #if V8_TARGET_ARCH_PPC64
1805 __ extsw(result, result);
1809 __ Move(result, left);
1820 void LCodeGen::DoSubI(LSubI* instr) {
1821 LOperand* right = instr->right();
1822 Register left = ToRegister(instr->left());
1823 Register result = ToRegister(instr->result());
1824 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1825 #if V8_TARGET_ARCH_PPC64
1826 const bool isInteger = !instr->hydrogen()->representation().IsSmi();
1828 const bool isInteger = false;
1830 if (!can_overflow || isInteger) {
1831 if (right->IsConstantOperand()) {
1832 __ Add(result, left, -(ToOperand(right).immediate()), r0);
1834 __ sub(result, left, EmitLoadRegister(right, ip));
1836 #if V8_TARGET_ARCH_PPC64
1838 __ TestIfInt32(result, r0);
1839 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1843 if (right->IsConstantOperand()) {
1844 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
1847 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1850 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1855 void LCodeGen::DoRSubI(LRSubI* instr) {
1856 LOperand* left = instr->left();
1857 LOperand* right = instr->right();
1858 LOperand* result = instr->result();
1860 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1861 right->IsConstantOperand());
1863 Operand right_operand = ToOperand(right);
1864 if (is_int16(right_operand.immediate())) {
1865 __ subfic(ToRegister(result), ToRegister(left), right_operand);
1867 __ mov(r0, right_operand);
1868 __ sub(ToRegister(result), r0, ToRegister(left));
1873 void LCodeGen::DoConstantI(LConstantI* instr) {
1874 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1878 void LCodeGen::DoConstantS(LConstantS* instr) {
1879 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1883 void LCodeGen::DoConstantD(LConstantD* instr) {
1884 DCHECK(instr->result()->IsDoubleRegister());
1885 DoubleRegister result = ToDoubleRegister(instr->result());
1886 #if V8_HOST_ARCH_IA32
1887 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1889 uint64_t bits = instr->bits();
1890 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1891 V8_UINT64_C(0x7FF0000000000000)) {
1892 uint32_t lo = static_cast<uint32_t>(bits);
1893 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1894 __ mov(ip, Operand(lo));
1895 __ mov(scratch0(), Operand(hi));
1896 __ MovInt64ToDouble(result, scratch0(), ip);
1900 double v = instr->value();
1901 __ LoadDoubleLiteral(result, v, scratch0());
1905 void LCodeGen::DoConstantE(LConstantE* instr) {
1906 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1910 void LCodeGen::DoConstantT(LConstantT* instr) {
1911 Handle<Object> object = instr->value(isolate());
1912 AllowDeferredHandleDereference smi_check;
1913 __ Move(ToRegister(instr->result()), object);
1917 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1918 Register result = ToRegister(instr->result());
1919 Register map = ToRegister(instr->value());
1920 __ EnumLength(result, map);
1924 void LCodeGen::DoDateField(LDateField* instr) {
1925 Register object = ToRegister(instr->date());
1926 Register result = ToRegister(instr->result());
1927 Register scratch = ToRegister(instr->temp());
1928 Smi* index = instr->index();
1929 Label runtime, done;
1930 DCHECK(object.is(result));
1931 DCHECK(object.is(r3));
1932 DCHECK(!scratch.is(scratch0()));
1933 DCHECK(!scratch.is(object));
1935 __ TestIfSmi(object, r0);
1936 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
1937 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1938 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
1940 if (index->value() == 0) {
1941 __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
1943 if (index->value() < JSDate::kFirstUncachedField) {
1944 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1945 __ mov(scratch, Operand(stamp));
1946 __ LoadP(scratch, MemOperand(scratch));
1947 __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1948 __ cmp(scratch, scratch0());
1951 FieldMemOperand(object, JSDate::kValueOffset +
1952 kPointerSize * index->value()));
1956 __ PrepareCallCFunction(2, scratch);
1957 __ LoadSmiLiteral(r4, index);
1958 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1964 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
1965 String::Encoding encoding) {
1966 if (index->IsConstantOperand()) {
1967 int offset = ToInteger32(LConstantOperand::cast(index));
1968 if (encoding == String::TWO_BYTE_ENCODING) {
1969 offset *= kUC16Size;
1971 STATIC_ASSERT(kCharSize == 1);
1972 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1974 Register scratch = scratch0();
1975 DCHECK(!scratch.is(string));
1976 DCHECK(!scratch.is(ToRegister(index)));
1977 if (encoding == String::ONE_BYTE_ENCODING) {
1978 __ add(scratch, string, ToRegister(index));
1980 STATIC_ASSERT(kUC16Size == 2);
1981 __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
1982 __ add(scratch, string, scratch);
1984 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1988 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1989 String::Encoding encoding = instr->hydrogen()->encoding();
1990 Register string = ToRegister(instr->string());
1991 Register result = ToRegister(instr->result());
1993 if (FLAG_debug_code) {
1994 Register scratch = scratch0();
1995 __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1996 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1998 __ andi(scratch, scratch,
1999 Operand(kStringRepresentationMask | kStringEncodingMask));
2000 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2001 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2003 Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
2004 : two_byte_seq_type));
2005 __ Check(eq, kUnexpectedStringType);
2008 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2009 if (encoding == String::ONE_BYTE_ENCODING) {
2010 __ lbz(result, operand);
2012 __ lhz(result, operand);
2017 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2018 String::Encoding encoding = instr->hydrogen()->encoding();
2019 Register string = ToRegister(instr->string());
2020 Register value = ToRegister(instr->value());
2022 if (FLAG_debug_code) {
2023 Register index = ToRegister(instr->index());
2024 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2025 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2027 instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
2029 : two_byte_seq_type;
2030 __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
2033 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
2034 if (encoding == String::ONE_BYTE_ENCODING) {
2035 __ stb(value, operand);
2037 __ sth(value, operand);
2042 void LCodeGen::DoAddI(LAddI* instr) {
2043 LOperand* right = instr->right();
2044 Register left = ToRegister(instr->left());
2045 Register result = ToRegister(instr->result());
2046 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2047 #if V8_TARGET_ARCH_PPC64
2048 const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
2049 instr->hydrogen()->representation().IsExternal());
2051 const bool isInteger = false;
2054 if (!can_overflow || isInteger) {
2055 if (right->IsConstantOperand()) {
2056 __ Add(result, left, ToOperand(right).immediate(), r0);
2058 __ add(result, left, EmitLoadRegister(right, ip));
2060 #if V8_TARGET_ARCH_PPC64
2062 __ TestIfInt32(result, r0);
2063 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
2067 if (right->IsConstantOperand()) {
2068 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
2071 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
2074 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
2079 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2080 LOperand* left = instr->left();
2081 LOperand* right = instr->right();
2082 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2083 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
2084 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2085 Register left_reg = ToRegister(left);
2086 Register right_reg = EmitLoadRegister(right, ip);
2087 Register result_reg = ToRegister(instr->result());
2088 Label return_left, done;
2089 #if V8_TARGET_ARCH_PPC64
2090 if (instr->hydrogen_value()->representation().IsSmi()) {
2092 __ cmp(left_reg, right_reg);
2093 #if V8_TARGET_ARCH_PPC64
2095 __ cmpw(left_reg, right_reg);
2098 if (CpuFeatures::IsSupported(ISELECT)) {
2099 __ isel(cond, result_reg, left_reg, right_reg);
2101 __ b(cond, &return_left);
2102 __ Move(result_reg, right_reg);
2104 __ bind(&return_left);
2105 __ Move(result_reg, left_reg);
2109 DCHECK(instr->hydrogen()->representation().IsDouble());
2110 DoubleRegister left_reg = ToDoubleRegister(left);
2111 DoubleRegister right_reg = ToDoubleRegister(right);
2112 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2113 Label check_nan_left, check_zero, return_left, return_right, done;
2114 __ fcmpu(left_reg, right_reg);
2115 __ bunordered(&check_nan_left);
2116 __ beq(&check_zero);
2117 __ b(cond, &return_left);
2118 __ b(&return_right);
2120 __ bind(&check_zero);
2121 __ fcmpu(left_reg, kDoubleRegZero);
2122 __ bne(&return_left); // left == right != 0.
2124 // At this point, both left and right are either 0 or -0.
2125 // N.B. The following works because +0 + -0 == +0
2126 if (operation == HMathMinMax::kMathMin) {
2127 // For min we want logical-or of sign bit: -(-L + -R)
2128 __ fneg(left_reg, left_reg);
2129 __ fsub(result_reg, left_reg, right_reg);
2130 __ fneg(result_reg, result_reg);
2132 // For max we want logical-and of sign bit: (L + R)
2133 __ fadd(result_reg, left_reg, right_reg);
2137 __ bind(&check_nan_left);
2138 __ fcmpu(left_reg, left_reg);
2139 __ bunordered(&return_left); // left == NaN.
2141 __ bind(&return_right);
2142 if (!right_reg.is(result_reg)) {
2143 __ fmr(result_reg, right_reg);
2147 __ bind(&return_left);
2148 if (!left_reg.is(result_reg)) {
2149 __ fmr(result_reg, left_reg);
2156 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2157 DoubleRegister left = ToDoubleRegister(instr->left());
2158 DoubleRegister right = ToDoubleRegister(instr->right());
2159 DoubleRegister result = ToDoubleRegister(instr->result());
2160 switch (instr->op()) {
2162 __ fadd(result, left, right);
2165 __ fsub(result, left, right);
2168 __ fmul(result, left, right);
2171 __ fdiv(result, left, right);
2174 __ PrepareCallCFunction(0, 2, scratch0());
2175 __ MovToFloatParameters(left, right);
2176 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
2178 // Move the result in the double result register.
2179 __ MovFromFloatResult(result);
2189 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2190 DCHECK(ToRegister(instr->context()).is(cp));
2191 DCHECK(ToRegister(instr->left()).is(r4));
2192 DCHECK(ToRegister(instr->right()).is(r3));
2193 DCHECK(ToRegister(instr->result()).is(r3));
2195 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
2196 CallCode(code, RelocInfo::CODE_TARGET, instr);
2200 template <class InstrType>
2201 void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
2202 int left_block = instr->TrueDestination(chunk_);
2203 int right_block = instr->FalseDestination(chunk_);
2205 int next_block = GetNextEmittedBlock();
2207 if (right_block == left_block || cond == al) {
2208 EmitGoto(left_block);
2209 } else if (left_block == next_block) {
2210 __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
2211 } else if (right_block == next_block) {
2212 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2214 __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
2215 __ b(chunk_->GetAssemblyLabel(right_block));
2220 template <class InstrType>
2221 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
2222 int false_block = instr->FalseDestination(chunk_);
2223 __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
2227 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
2230 void LCodeGen::DoBranch(LBranch* instr) {
2231 Representation r = instr->hydrogen()->value()->representation();
2232 DoubleRegister dbl_scratch = double_scratch0();
2233 const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
2234 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
2236 if (r.IsInteger32()) {
2237 DCHECK(!info()->IsStub());
2238 Register reg = ToRegister(instr->value());
2239 __ cmpwi(reg, Operand::Zero());
2240 EmitBranch(instr, ne);
2241 } else if (r.IsSmi()) {
2242 DCHECK(!info()->IsStub());
2243 Register reg = ToRegister(instr->value());
2244 __ cmpi(reg, Operand::Zero());
2245 EmitBranch(instr, ne);
2246 } else if (r.IsDouble()) {
2247 DCHECK(!info()->IsStub());
2248 DoubleRegister reg = ToDoubleRegister(instr->value());
2249 // Test the double value. Zero and NaN are false.
2250 __ fcmpu(reg, kDoubleRegZero, cr7);
2252 __ andi(r0, r0, Operand(crZOrNaNBits));
2253 EmitBranch(instr, eq, cr0);
2255 DCHECK(r.IsTagged());
2256 Register reg = ToRegister(instr->value());
2257 HType type = instr->hydrogen()->value()->type();
2258 if (type.IsBoolean()) {
2259 DCHECK(!info()->IsStub());
2260 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2261 EmitBranch(instr, eq);
2262 } else if (type.IsSmi()) {
2263 DCHECK(!info()->IsStub());
2264 __ cmpi(reg, Operand::Zero());
2265 EmitBranch(instr, ne);
2266 } else if (type.IsJSArray()) {
2267 DCHECK(!info()->IsStub());
2268 EmitBranch(instr, al);
2269 } else if (type.IsHeapNumber()) {
2270 DCHECK(!info()->IsStub());
2271 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2272 // Test the double value. Zero and NaN are false.
2273 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2275 __ andi(r0, r0, Operand(crZOrNaNBits));
2276 EmitBranch(instr, eq, cr0);
2277 } else if (type.IsString()) {
2278 DCHECK(!info()->IsStub());
2279 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2280 __ cmpi(ip, Operand::Zero());
2281 EmitBranch(instr, ne);
2283 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2284 // Avoid deopts in the case where we've never executed this path before.
2285 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2287 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2288 // undefined -> false.
2289 __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2290 __ beq(instr->FalseLabel(chunk_));
2292 if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2293 // Boolean -> its value.
2294 __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2295 __ beq(instr->TrueLabel(chunk_));
2296 __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2297 __ beq(instr->FalseLabel(chunk_));
2299 if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2301 __ CompareRoot(reg, Heap::kNullValueRootIndex);
2302 __ beq(instr->FalseLabel(chunk_));
2305 if (expected.Contains(ToBooleanStub::SMI)) {
2306 // Smis: 0 -> false, all other -> true.
2307 __ cmpi(reg, Operand::Zero());
2308 __ beq(instr->FalseLabel(chunk_));
2309 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2310 } else if (expected.NeedsMap()) {
2311 // If we need a map later and have a Smi -> deopt.
2312 __ TestIfSmi(reg, r0);
2313 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
2316 const Register map = scratch0();
2317 if (expected.NeedsMap()) {
2318 __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2320 if (expected.CanBeUndetectable()) {
2321 // Undetectable -> false.
2322 __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2323 __ TestBit(ip, Map::kIsUndetectable, r0);
2324 __ bne(instr->FalseLabel(chunk_), cr0);
2328 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2329 // spec object -> true.
2330 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2331 __ bge(instr->TrueLabel(chunk_));
2334 if (expected.Contains(ToBooleanStub::STRING)) {
2335 // String value -> false iff empty.
2337 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2338 __ bge(¬_string);
2339 __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
2340 __ cmpi(ip, Operand::Zero());
2341 __ bne(instr->TrueLabel(chunk_));
2342 __ b(instr->FalseLabel(chunk_));
2343 __ bind(¬_string);
2346 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2347 // Symbol value -> true.
2348 __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2349 __ beq(instr->TrueLabel(chunk_));
2352 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2353 // heap number -> false iff +0, -0, or NaN.
2354 Label not_heap_number;
2355 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2356 __ bne(¬_heap_number);
2357 __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2358 // Test the double value. Zero and NaN are false.
2359 __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
2361 __ andi(r0, r0, Operand(crZOrNaNBits));
2362 __ bne(instr->FalseLabel(chunk_), cr0);
2363 __ b(instr->TrueLabel(chunk_));
2364 __ bind(¬_heap_number);
2367 if (!expected.IsGeneric()) {
2368 // We've seen something for the first time -> deopt.
2369 // This can only happen if we are not generic already.
2370 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
2377 void LCodeGen::EmitGoto(int block) {
2378 if (!IsNextEmittedBlock(block)) {
2379 __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
2384 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
2387 Condition LCodeGen::TokenToCondition(Token::Value op) {
2388 Condition cond = kNoCondition;
2391 case Token::EQ_STRICT:
2395 case Token::NE_STRICT:
2411 case Token::INSTANCEOF:
2419 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2420 LOperand* left = instr->left();
2421 LOperand* right = instr->right();
2423 instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
2424 instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
2425 Condition cond = TokenToCondition(instr->op());
2427 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2428 // We can statically evaluate the comparison.
2429 double left_val = ToDouble(LConstantOperand::cast(left));
2430 double right_val = ToDouble(LConstantOperand::cast(right));
2431 int next_block = EvalComparison(instr->op(), left_val, right_val)
2432 ? instr->TrueDestination(chunk_)
2433 : instr->FalseDestination(chunk_);
2434 EmitGoto(next_block);
2436 if (instr->is_double()) {
2437 // Compare left and right operands as doubles and load the
2438 // resulting flags into the normal status register.
2439 __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
2440 // If a NaN is involved, i.e. the result is unordered,
2441 // jump to false block label.
2442 __ bunordered(instr->FalseLabel(chunk_));
2444 if (right->IsConstantOperand()) {
2445 int32_t value = ToInteger32(LConstantOperand::cast(right));
2446 if (instr->hydrogen_value()->representation().IsSmi()) {
2448 __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2450 __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
2454 __ Cmplwi(ToRegister(left), Operand(value), r0);
2456 __ Cmpwi(ToRegister(left), Operand(value), r0);
2459 } else if (left->IsConstantOperand()) {
2460 int32_t value = ToInteger32(LConstantOperand::cast(left));
2461 if (instr->hydrogen_value()->representation().IsSmi()) {
2463 __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2465 __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
2469 __ Cmplwi(ToRegister(right), Operand(value), r0);
2471 __ Cmpwi(ToRegister(right), Operand(value), r0);
2474 // We commuted the operands, so commute the condition.
2475 cond = CommuteCondition(cond);
2476 } else if (instr->hydrogen_value()->representation().IsSmi()) {
2478 __ cmpl(ToRegister(left), ToRegister(right));
2480 __ cmp(ToRegister(left), ToRegister(right));
2484 __ cmplw(ToRegister(left), ToRegister(right));
2486 __ cmpw(ToRegister(left), ToRegister(right));
2490 EmitBranch(instr, cond);
2495 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2496 Register left = ToRegister(instr->left());
2497 Register right = ToRegister(instr->right());
2499 __ cmp(left, right);
2500 EmitBranch(instr, eq);
2504 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2505 if (instr->hydrogen()->representation().IsTagged()) {
2506 Register input_reg = ToRegister(instr->object());
2507 __ mov(ip, Operand(factory()->the_hole_value()));
2508 __ cmp(input_reg, ip);
2509 EmitBranch(instr, eq);
2513 DoubleRegister input_reg = ToDoubleRegister(instr->object());
2514 __ fcmpu(input_reg, input_reg);
2515 EmitFalseBranch(instr, ordered);
2517 Register scratch = scratch0();
2518 __ MovDoubleHighToInt(scratch, input_reg);
2519 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
2520 EmitBranch(instr, eq);
2524 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2525 Representation rep = instr->hydrogen()->value()->representation();
2526 DCHECK(!rep.IsInteger32());
2527 Register scratch = ToRegister(instr->temp());
2529 if (rep.IsDouble()) {
2530 DoubleRegister value = ToDoubleRegister(instr->value());
2531 __ fcmpu(value, kDoubleRegZero);
2532 EmitFalseBranch(instr, ne);
2533 #if V8_TARGET_ARCH_PPC64
2534 __ MovDoubleToInt64(scratch, value);
2536 __ MovDoubleHighToInt(scratch, value);
2538 __ cmpi(scratch, Operand::Zero());
2539 EmitBranch(instr, lt);
2541 Register value = ToRegister(instr->value());
2542 __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
2543 instr->FalseLabel(chunk()), DO_SMI_CHECK);
2544 #if V8_TARGET_ARCH_PPC64
2545 __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
2546 __ li(ip, Operand(1));
2547 __ rotrdi(ip, ip, 1); // ip = 0x80000000_00000000
2548 __ cmp(scratch, ip);
2550 __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2551 __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2553 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
2554 __ cmp(scratch, r0);
2556 __ cmpi(ip, Operand::Zero());
2559 EmitBranch(instr, eq);
2564 Condition LCodeGen::EmitIsObject(Register input, Register temp1,
2565 Label* is_not_object, Label* is_object) {
2566 Register temp2 = scratch0();
2567 __ JumpIfSmi(input, is_not_object);
2569 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2570 __ cmp(input, temp2);
2574 __ LoadP(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2575 // Undetectable objects behave like undefined.
2576 __ lbz(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2577 __ TestBit(temp2, Map::kIsUndetectable, r0);
2578 __ bne(is_not_object, cr0);
2580 // Load instance type and check that it is in object type range.
2581 __ lbz(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2582 __ cmpi(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2583 __ blt(is_not_object);
2584 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2589 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2590 Register reg = ToRegister(instr->value());
2591 Register temp1 = ToRegister(instr->temp());
2593 Condition true_cond = EmitIsObject(reg, temp1, instr->FalseLabel(chunk_),
2594 instr->TrueLabel(chunk_));
2596 EmitBranch(instr, true_cond);
2600 Condition LCodeGen::EmitIsString(Register input, Register temp1,
2601 Label* is_not_string,
2602 SmiCheck check_needed = INLINE_SMI_CHECK) {
2603 if (check_needed == INLINE_SMI_CHECK) {
2604 __ JumpIfSmi(input, is_not_string);
2606 __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2612 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2613 Register reg = ToRegister(instr->value());
2614 Register temp1 = ToRegister(instr->temp());
2616 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
2619 Condition true_cond =
2620 EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2622 EmitBranch(instr, true_cond);
2626 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2627 Register input_reg = EmitLoadRegister(instr->value(), ip);
2628 __ TestIfSmi(input_reg, r0);
2629 EmitBranch(instr, eq, cr0);
2633 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2634 Register input = ToRegister(instr->value());
2635 Register temp = ToRegister(instr->temp());
2637 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2638 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2640 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2641 __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2642 __ TestBit(temp, Map::kIsUndetectable, r0);
2643 EmitBranch(instr, ne, cr0);
2647 static Condition ComputeCompareCondition(Token::Value op) {
2649 case Token::EQ_STRICT:
2662 return kNoCondition;
2667 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2668 DCHECK(ToRegister(instr->context()).is(cp));
2669 Token::Value op = instr->op();
2671 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2672 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2673 // This instruction also signals no smi code inlined
2674 __ cmpi(r3, Operand::Zero());
2676 Condition condition = ComputeCompareCondition(op);
2678 EmitBranch(instr, condition);
2682 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2683 InstanceType from = instr->from();
2684 InstanceType to = instr->to();
2685 if (from == FIRST_TYPE) return to;
2686 DCHECK(from == to || to == LAST_TYPE);
2691 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2692 InstanceType from = instr->from();
2693 InstanceType to = instr->to();
2694 if (from == to) return eq;
2695 if (to == LAST_TYPE) return ge;
2696 if (from == FIRST_TYPE) return le;
2702 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2703 Register scratch = scratch0();
2704 Register input = ToRegister(instr->value());
2706 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2707 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2710 __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2711 EmitBranch(instr, BranchCondition(instr->hydrogen()));
2715 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2716 Register input = ToRegister(instr->value());
2717 Register result = ToRegister(instr->result());
2719 __ AssertString(input);
2721 __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
2722 __ IndexFromHash(result, result);
2726 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2727 LHasCachedArrayIndexAndBranch* instr) {
2728 Register input = ToRegister(instr->value());
2729 Register scratch = scratch0();
2731 __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
2732 __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
2733 __ and_(r0, scratch, r0, SetRC);
2734 EmitBranch(instr, eq, cr0);
2738 // Branches to a label or falls through with the answer in flags. Trashes
2739 // the temp registers, but not the input.
2740 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
2741 Handle<String> class_name, Register input,
2742 Register temp, Register temp2) {
2743 DCHECK(!input.is(temp));
2744 DCHECK(!input.is(temp2));
2745 DCHECK(!temp.is(temp2));
2747 __ JumpIfSmi(input, is_false);
2749 if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
2750 // Assuming the following assertions, we can use the same compares to test
2751 // for both being a function type and being in the object type range.
2752 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2753 STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2754 FIRST_SPEC_OBJECT_TYPE + 1);
2755 STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2756 LAST_SPEC_OBJECT_TYPE - 1);
2757 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2758 __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2761 __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2764 // Faster code path to avoid two compares: subtract lower bound from the
2765 // actual type and do a signed compare with the width of the type range.
2766 __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2767 __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2768 __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2769 __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2770 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2774 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2775 // Check if the constructor in the map is a function.
2776 Register instance_type = ip;
2777 __ GetMapConstructor(temp, temp, temp2, instance_type);
2779 // Objects with a non-function constructor have class 'Object'.
2780 __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
2781 if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
2787 // temp now contains the constructor function. Grab the
2788 // instance class name from there.
2789 __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2791 FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
2792 // The class name we are testing against is internalized since it's a literal.
2793 // The name in the constructor is internalized because of the way the context
2794 // is booted. This routine isn't expected to work for random API-created
2795 // classes and it doesn't have to because you can't access it with natives
2796 // syntax. Since both sides are internalized it is sufficient to use an
2797 // identity comparison.
2798 __ Cmpi(temp, Operand(class_name), r0);
2799 // End with the answer in flags.
2803 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2804 Register input = ToRegister(instr->value());
2805 Register temp = scratch0();
2806 Register temp2 = ToRegister(instr->temp());
2807 Handle<String> class_name = instr->hydrogen()->class_name();
2809 EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2810 class_name, input, temp, temp2);
2812 EmitBranch(instr, eq);
2816 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2817 Register reg = ToRegister(instr->value());
2818 Register temp = ToRegister(instr->temp());
2820 __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2821 __ Cmpi(temp, Operand(instr->map()), r0);
2822 EmitBranch(instr, eq);
2826 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2827 DCHECK(ToRegister(instr->context()).is(cp));
2828 DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
2829 DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
2831 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2832 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2834 if (CpuFeatures::IsSupported(ISELECT)) {
2835 __ mov(r4, Operand(factory()->true_value()));
2836 __ mov(r5, Operand(factory()->false_value()));
2837 __ cmpi(r3, Operand::Zero());
2838 __ isel(eq, r3, r4, r5);
2841 __ cmpi(r3, Operand::Zero());
2843 __ mov(r3, Operand(factory()->false_value()));
2847 __ mov(r3, Operand(factory()->true_value()));
2853 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2854 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2856 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2857 LInstanceOfKnownGlobal* instr)
2858 : LDeferredCode(codegen), instr_(instr) {}
2859 void Generate() OVERRIDE {
2860 codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2862 LInstruction* instr() OVERRIDE { return instr_; }
2863 Label* map_check() { return &map_check_; }
2866 LInstanceOfKnownGlobal* instr_;
2870 DeferredInstanceOfKnownGlobal* deferred;
2871 deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr);
2873 Label done, false_result;
2874 Register object = ToRegister(instr->value());
2875 Register temp = ToRegister(instr->temp());
2876 Register result = ToRegister(instr->result());
2878 // A Smi is not instance of anything.
2879 __ JumpIfSmi(object, &false_result);
2881 // This is the inlined call site instanceof cache. The two occurences of the
2882 // hole value will be patched to the last map/result pair generated by the
2885 Register map = temp;
2886 __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
2888 // Block trampoline emission to ensure the positions of instructions are
2889 // as expected by the patcher. See InstanceofStub::Generate().
2890 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2891 __ bind(deferred->map_check()); // Label for calculating code patching.
2892 // We use Factory::the_hole_value() on purpose instead of loading from the
2893 // root array to force relocation to be able to later patch with
2895 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2896 __ mov(ip, Operand(cell));
2897 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
2899 __ bc_short(ne, &cache_miss);
2900 // We use Factory::the_hole_value() on purpose instead of loading from the
2901 // root array to force relocation to be able to later patch
2902 // with true or false.
2903 __ mov(result, Operand(factory()->the_hole_value()));
2907 // The inlined call site cache did not match. Check null and string before
2908 // calling the deferred code.
2909 __ bind(&cache_miss);
2910 // Null is not instance of anything.
2911 __ LoadRoot(ip, Heap::kNullValueRootIndex);
2913 __ beq(&false_result);
2915 // String values is not instance of anything.
2916 Condition is_string = masm_->IsObjectStringType(object, temp);
2917 __ b(is_string, &false_result, cr0);
2919 // Go to the deferred code.
2920 __ b(deferred->entry());
2922 __ bind(&false_result);
2923 __ LoadRoot(result, Heap::kFalseValueRootIndex);
2925 // Here result has either true or false. Deferred code also produces true or
2927 __ bind(deferred->exit());
2932 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2934 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2935 flags = static_cast<InstanceofStub::Flags>(flags |
2936 InstanceofStub::kArgsInRegisters);
2937 flags = static_cast<InstanceofStub::Flags>(
2938 flags | InstanceofStub::kCallSiteInlineCheck);
2939 flags = static_cast<InstanceofStub::Flags>(
2940 flags | InstanceofStub::kReturnTrueFalseObject);
2941 InstanceofStub stub(isolate(), flags);
2943 PushSafepointRegistersScope scope(this);
2944 LoadContextFromDeferred(instr->context());
2946 __ Move(InstanceofStub::right(), instr->function());
2948 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2949 Handle<Code> code = stub.GetCode();
2950 // Include instructions below in delta: bitwise_mov32 + call
2951 int delta = (masm_->InstructionsGeneratedSince(map_check) + 2) *
2952 Instruction::kInstrSize +
2953 masm_->CallSize(code);
2954 // r8 is used to communicate the offset to the location of the map check.
2955 if (is_int16(delta)) {
2956 delta -= Instruction::kInstrSize;
2957 __ li(r8, Operand(delta));
2959 __ bitwise_mov32(r8, delta);
2961 CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr,
2962 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2963 DCHECK(delta / Instruction::kInstrSize ==
2964 masm_->InstructionsGeneratedSince(map_check));
2966 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2967 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2968 // Put the result value (r3) into the result register slot and
2969 // restore all registers.
2970 __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
2974 void LCodeGen::DoCmpT(LCmpT* instr) {
2975 DCHECK(ToRegister(instr->context()).is(cp));
2976 Token::Value op = instr->op();
2978 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2979 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2980 // This instruction also signals no smi code inlined
2981 __ cmpi(r3, Operand::Zero());
2983 Condition condition = ComputeCompareCondition(op);
2984 if (CpuFeatures::IsSupported(ISELECT)) {
2985 __ LoadRoot(r4, Heap::kTrueValueRootIndex);
2986 __ LoadRoot(r5, Heap::kFalseValueRootIndex);
2987 __ isel(condition, ToRegister(instr->result()), r4, r5);
2989 Label true_value, done;
2991 __ b(condition, &true_value);
2993 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2996 __ bind(&true_value);
2997 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
3004 void LCodeGen::DoReturn(LReturn* instr) {
3005 if (FLAG_trace && info()->IsOptimizing()) {
3006 // Push the return value on the stack as the parameter.
3007 // Runtime::TraceExit returns its parameter in r3. We're leaving the code
3008 // managed by the register allocator and tearing down the frame, it's
3009 // safe to write to the context register.
3011 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3012 __ CallRuntime(Runtime::kTraceExit, 1);
3014 if (info()->saves_caller_doubles()) {
3015 RestoreCallerDoubles();
3017 int no_frame_start = -1;
3018 if (instr->has_constant_parameter_count()) {
3019 int parameter_count = ToInteger32(instr->constant_parameter_count());
3020 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
3021 if (NeedsEagerFrame()) {
3022 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
3023 } else if (sp_delta != 0) {
3024 __ addi(sp, sp, Operand(sp_delta));
3027 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
3028 Register reg = ToRegister(instr->parameter_count());
3029 // The argument count parameter is a smi
3030 if (NeedsEagerFrame()) {
3031 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
3033 __ SmiToPtrArrayOffset(r0, reg);
3039 if (no_frame_start != -1) {
3040 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
3046 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
3047 DCHECK(FLAG_vector_ics);
3048 Register vector_register = ToRegister(instr->temp_vector());
3049 Register slot_register = VectorLoadICDescriptor::SlotRegister();
3050 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
3051 DCHECK(slot_register.is(r3));
3053 AllowDeferredHandleDereference vector_structure_check;
3054 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3055 __ Move(vector_register, vector);
3056 // No need to allocate this register.
3057 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
3058 int index = vector->GetIndex(slot);
3059 __ mov(slot_register, Operand(Smi::FromInt(index)));
3063 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3064 DCHECK(ToRegister(instr->context()).is(cp));
3065 DCHECK(ToRegister(instr->global_object())
3066 .is(LoadDescriptor::ReceiverRegister()));
3067 DCHECK(ToRegister(instr->result()).is(r3));
3069 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3070 if (FLAG_vector_ics) {
3071 EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
3073 ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
3074 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
3075 PREMONOMORPHIC).code();
3076 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3080 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3081 Register context = ToRegister(instr->context());
3082 Register result = ToRegister(instr->result());
3083 __ LoadP(result, ContextOperand(context, instr->slot_index()));
3084 if (instr->hydrogen()->RequiresHoleCheck()) {
3085 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3086 if (instr->hydrogen()->DeoptimizesOnHole()) {
3088 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3090 if (CpuFeatures::IsSupported(ISELECT)) {
3091 Register scratch = scratch0();
3092 __ mov(scratch, Operand(factory()->undefined_value()));
3094 __ isel(eq, result, scratch, result);
3099 __ mov(result, Operand(factory()->undefined_value()));
3107 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3108 Register context = ToRegister(instr->context());
3109 Register value = ToRegister(instr->value());
3110 Register scratch = scratch0();
3111 MemOperand target = ContextOperand(context, instr->slot_index());
3113 Label skip_assignment;
3115 if (instr->hydrogen()->RequiresHoleCheck()) {
3116 __ LoadP(scratch, target);
3117 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3118 __ cmp(scratch, ip);
3119 if (instr->hydrogen()->DeoptimizesOnHole()) {
3120 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3122 __ bne(&skip_assignment);
3126 __ StoreP(value, target, r0);
3127 if (instr->hydrogen()->NeedsWriteBarrier()) {
3128 SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
3131 __ RecordWriteContextSlot(context, target.offset(), value, scratch,
3132 GetLinkRegisterState(), kSaveFPRegs,
3133 EMIT_REMEMBERED_SET, check_needed);
3136 __ bind(&skip_assignment);
3140 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3141 HObjectAccess access = instr->hydrogen()->access();
3142 int offset = access.offset();
3143 Register object = ToRegister(instr->object());
3145 if (access.IsExternalMemory()) {
3146 Register result = ToRegister(instr->result());
3147 MemOperand operand = MemOperand(object, offset);
3148 __ LoadRepresentation(result, operand, access.representation(), r0);
3152 if (instr->hydrogen()->representation().IsDouble()) {
3153 DCHECK(access.IsInobject());
3154 DoubleRegister result = ToDoubleRegister(instr->result());
3155 __ lfd(result, FieldMemOperand(object, offset));
3159 Register result = ToRegister(instr->result());
3160 if (!access.IsInobject()) {
3161 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3165 Representation representation = access.representation();
3167 #if V8_TARGET_ARCH_PPC64
3168 // 64-bit Smi optimization
3169 if (representation.IsSmi() &&
3170 instr->hydrogen()->representation().IsInteger32()) {
3171 // Read int value directly from upper half of the smi.
3172 offset = SmiWordOffset(offset);
3173 representation = Representation::Integer32();
3177 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
3182 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3183 DCHECK(ToRegister(instr->context()).is(cp));
3184 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3185 DCHECK(ToRegister(instr->result()).is(r3));
3187 // Name is always in r5.
3188 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
3189 if (FLAG_vector_ics) {
3190 EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
3192 Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
3193 isolate(), NOT_CONTEXTUAL,
3194 instr->hydrogen()->initialization_state()).code();
3195 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3199 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3200 Register scratch = scratch0();
3201 Register function = ToRegister(instr->function());
3202 Register result = ToRegister(instr->result());
3204 // Get the prototype or initial map from the function.
3206 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3208 // Check that the function has a prototype or an initial map.
3209 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3211 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3213 // If the function does not have an initial map, we're done.
3214 if (CpuFeatures::IsSupported(ISELECT)) {
3215 // Get the prototype from the initial map (optimistic).
3216 __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
3217 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3218 __ isel(eq, result, ip, result);
3221 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3224 // Get the prototype from the initial map.
3225 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
3233 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3234 Register result = ToRegister(instr->result());
3235 __ LoadRoot(result, instr->index());
3239 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3240 Register arguments = ToRegister(instr->arguments());
3241 Register result = ToRegister(instr->result());
3242 // There are two words between the frame pointer and the last argument.
3243 // Subtracting from length accounts for one of them add one more.
3244 if (instr->length()->IsConstantOperand()) {
3245 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3246 if (instr->index()->IsConstantOperand()) {
3247 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3248 int index = (const_length - const_index) + 1;
3249 __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
3251 Register index = ToRegister(instr->index());
3252 __ subfic(result, index, Operand(const_length + 1));
3253 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3254 __ LoadPX(result, MemOperand(arguments, result));
3256 } else if (instr->index()->IsConstantOperand()) {
3257 Register length = ToRegister(instr->length());
3258 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3259 int loc = const_index - 1;
3261 __ subi(result, length, Operand(loc));
3262 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3263 __ LoadPX(result, MemOperand(arguments, result));
3265 __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
3266 __ LoadPX(result, MemOperand(arguments, result));
3269 Register length = ToRegister(instr->length());
3270 Register index = ToRegister(instr->index());
3271 __ sub(result, length, index);
3272 __ addi(result, result, Operand(1));
3273 __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
3274 __ LoadPX(result, MemOperand(arguments, result));
3279 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3280 Register external_pointer = ToRegister(instr->elements());
3281 Register key = no_reg;
3282 ElementsKind elements_kind = instr->elements_kind();
3283 bool key_is_constant = instr->key()->IsConstantOperand();
3284 int constant_key = 0;
3285 if (key_is_constant) {
3286 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3287 if (constant_key & 0xF0000000) {
3288 Abort(kArrayIndexConstantValueTooBig);
3291 key = ToRegister(instr->key());
3293 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3294 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3295 int base_offset = instr->base_offset();
3297 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3298 elements_kind == FLOAT32_ELEMENTS ||
3299 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3300 elements_kind == FLOAT64_ELEMENTS) {
3301 DoubleRegister result = ToDoubleRegister(instr->result());
3302 if (key_is_constant) {
3303 __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
3306 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3307 __ add(scratch0(), external_pointer, r0);
3309 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3310 elements_kind == FLOAT32_ELEMENTS) {
3311 __ lfs(result, MemOperand(scratch0(), base_offset));
3312 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3313 __ lfd(result, MemOperand(scratch0(), base_offset));
3316 Register result = ToRegister(instr->result());
3317 MemOperand mem_operand =
3318 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
3319 constant_key, element_size_shift, base_offset);
3320 switch (elements_kind) {
3321 case EXTERNAL_INT8_ELEMENTS:
3323 if (key_is_constant) {
3324 __ LoadByte(result, mem_operand, r0);
3326 __ lbzx(result, mem_operand);
3328 __ extsb(result, result);
3330 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
3331 case EXTERNAL_UINT8_ELEMENTS:
3332 case UINT8_ELEMENTS:
3333 case UINT8_CLAMPED_ELEMENTS:
3334 if (key_is_constant) {
3335 __ LoadByte(result, mem_operand, r0);
3337 __ lbzx(result, mem_operand);
3340 case EXTERNAL_INT16_ELEMENTS:
3341 case INT16_ELEMENTS:
3342 if (key_is_constant) {
3343 __ LoadHalfWordArith(result, mem_operand, r0);
3345 __ lhax(result, mem_operand);
3348 case EXTERNAL_UINT16_ELEMENTS:
3349 case UINT16_ELEMENTS:
3350 if (key_is_constant) {
3351 __ LoadHalfWord(result, mem_operand, r0);
3353 __ lhzx(result, mem_operand);
3356 case EXTERNAL_INT32_ELEMENTS:
3357 case INT32_ELEMENTS:
3358 if (key_is_constant) {
3359 __ LoadWordArith(result, mem_operand, r0);
3361 __ lwax(result, mem_operand);
3364 case EXTERNAL_UINT32_ELEMENTS:
3365 case UINT32_ELEMENTS:
3366 if (key_is_constant) {
3367 __ LoadWord(result, mem_operand, r0);
3369 __ lwzx(result, mem_operand);
3371 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3372 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3373 __ cmplw(result, r0);
3374 DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
3377 case FLOAT32_ELEMENTS:
3378 case FLOAT64_ELEMENTS:
3379 case EXTERNAL_FLOAT32_ELEMENTS:
3380 case EXTERNAL_FLOAT64_ELEMENTS:
3381 case FAST_HOLEY_DOUBLE_ELEMENTS:
3382 case FAST_HOLEY_ELEMENTS:
3383 case FAST_HOLEY_SMI_ELEMENTS:
3384 case FAST_DOUBLE_ELEMENTS:
3386 case FAST_SMI_ELEMENTS:
3387 case DICTIONARY_ELEMENTS:
3388 case SLOPPY_ARGUMENTS_ELEMENTS:
3396 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3397 Register elements = ToRegister(instr->elements());
3398 bool key_is_constant = instr->key()->IsConstantOperand();
3399 Register key = no_reg;
3400 DoubleRegister result = ToDoubleRegister(instr->result());
3401 Register scratch = scratch0();
3403 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3404 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
3405 int constant_key = 0;
3406 if (key_is_constant) {
3407 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3408 if (constant_key & 0xF0000000) {
3409 Abort(kArrayIndexConstantValueTooBig);
3412 key = ToRegister(instr->key());
3415 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
3416 if (!key_is_constant) {
3417 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
3418 __ add(scratch, elements, r0);
3421 if (!is_int16(base_offset)) {
3422 __ Add(scratch, elements, base_offset, r0);
3426 __ lfd(result, MemOperand(elements, base_offset));
3428 if (instr->hydrogen()->RequiresHoleCheck()) {
3429 if (is_int16(base_offset + Register::kExponentOffset)) {
3431 MemOperand(elements, base_offset + Register::kExponentOffset));
3433 __ addi(scratch, elements, Operand(base_offset));
3434 __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
3436 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
3437 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3442 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3443 HLoadKeyed* hinstr = instr->hydrogen();
3444 Register elements = ToRegister(instr->elements());
3445 Register result = ToRegister(instr->result());
3446 Register scratch = scratch0();
3447 Register store_base = scratch;
3448 int offset = instr->base_offset();
3450 if (instr->key()->IsConstantOperand()) {
3451 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3452 offset += ToInteger32(const_operand) * kPointerSize;
3453 store_base = elements;
3455 Register key = ToRegister(instr->key());
3456 // Even though the HLoadKeyed instruction forces the input
3457 // representation for the key to be an integer, the input gets replaced
3458 // during bound check elimination with the index argument to the bounds
3459 // check, which can be tagged, so that case must be handled here, too.
3460 if (hinstr->key()->representation().IsSmi()) {
3461 __ SmiToPtrArrayOffset(r0, key);
3463 __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
3465 __ add(scratch, elements, r0);
3468 bool requires_hole_check = hinstr->RequiresHoleCheck();
3469 Representation representation = hinstr->representation();
3471 #if V8_TARGET_ARCH_PPC64
3472 // 64-bit Smi optimization
3473 if (representation.IsInteger32() &&
3474 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3475 DCHECK(!requires_hole_check);
3476 // Read int value directly from upper half of the smi.
3477 offset = SmiWordOffset(offset);
3481 __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
3484 // Check for the hole value.
3485 if (requires_hole_check) {
3486 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3487 __ TestIfSmi(result, r0);
3488 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
3490 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3491 __ cmp(result, scratch);
3492 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3498 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3499 if (instr->is_typed_elements()) {
3500 DoLoadKeyedExternalArray(instr);
3501 } else if (instr->hydrogen()->representation().IsDouble()) {
3502 DoLoadKeyedFixedDoubleArray(instr);
3504 DoLoadKeyedFixedArray(instr);
3509 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
3510 bool key_is_constant, bool key_is_smi,
3512 int element_size_shift,
3514 Register scratch = scratch0();
3516 if (key_is_constant) {
3517 return MemOperand(base, (constant_key << element_size_shift) + base_offset);
3521 (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
3523 if (!(base_offset || needs_shift)) {
3524 return MemOperand(base, key);
3528 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
3533 __ Add(scratch, key, base_offset, r0);
3536 return MemOperand(base, scratch);
3540 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3541 DCHECK(ToRegister(instr->context()).is(cp));
3542 DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
3543 DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
3545 if (FLAG_vector_ics) {
3546 EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
3550 CodeFactory::KeyedLoadICInOptimizedCode(
3551 isolate(), instr->hydrogen()->initialization_state()).code();
3552 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3556 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3557 Register scratch = scratch0();
3558 Register result = ToRegister(instr->result());
3560 if (instr->hydrogen()->from_inlined()) {
3561 __ subi(result, sp, Operand(2 * kPointerSize));
3563 // Check if the calling frame is an arguments adaptor frame.
3564 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3566 MemOperand(scratch, StandardFrameConstants::kContextOffset));
3567 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3569 // Result is the frame pointer for the frame if not adapted and for the real
3570 // frame below the adaptor frame if adapted.
3571 if (CpuFeatures::IsSupported(ISELECT)) {
3572 __ isel(eq, result, scratch, fp);
3574 Label done, adapted;
3580 __ mr(result, scratch);
3587 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3588 Register elem = ToRegister(instr->elements());
3589 Register result = ToRegister(instr->result());
3593 // If no arguments adaptor frame the number of arguments is fixed.
3595 __ mov(result, Operand(scope()->num_parameters()));
3598 // Arguments adaptor frame present. Get argument length from there.
3599 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3601 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3602 __ SmiUntag(result);
3604 // Argument length is in result register.
3609 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3610 Register receiver = ToRegister(instr->receiver());
3611 Register function = ToRegister(instr->function());
3612 Register result = ToRegister(instr->result());
3613 Register scratch = scratch0();
3615 // If the receiver is null or undefined, we have to pass the global
3616 // object as a receiver to normal functions. Values have to be
3617 // passed unchanged to builtins and strict-mode functions.
3618 Label global_object, result_in_receiver;
3620 if (!instr->hydrogen()->known_function()) {
3621 // Do not transform the receiver to object for strict mode
3624 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3626 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3628 #if V8_TARGET_ARCH_PPC64
3629 SharedFunctionInfo::kStrictModeFunction,
3631 SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
3634 __ bne(&result_in_receiver, cr0);
3636 // Do not transform the receiver to object for builtins.
3638 #if V8_TARGET_ARCH_PPC64
3639 SharedFunctionInfo::kNative,
3641 SharedFunctionInfo::kNative + kSmiTagSize,
3644 __ bne(&result_in_receiver, cr0);
3647 // Normal function. Replace undefined or null with global receiver.
3648 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3649 __ cmp(receiver, scratch);
3650 __ beq(&global_object);
3651 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3652 __ cmp(receiver, scratch);
3653 __ beq(&global_object);
3655 // Deoptimize if the receiver is not a JS object.
3656 __ TestIfSmi(receiver, r0);
3657 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
3658 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3659 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3661 __ b(&result_in_receiver);
3662 __ bind(&global_object);
3663 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3664 __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3665 __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3666 if (result.is(receiver)) {
3667 __ bind(&result_in_receiver);
3671 __ bind(&result_in_receiver);
3672 __ mr(result, receiver);
3673 __ bind(&result_ok);
3678 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3679 Register receiver = ToRegister(instr->receiver());
3680 Register function = ToRegister(instr->function());
3681 Register length = ToRegister(instr->length());
3682 Register elements = ToRegister(instr->elements());
3683 Register scratch = scratch0();
3684 DCHECK(receiver.is(r3)); // Used for parameter count.
3685 DCHECK(function.is(r4)); // Required by InvokeFunction.
3686 DCHECK(ToRegister(instr->result()).is(r3));
3688 // Copy the arguments to this function possibly from the
3689 // adaptor frame below it.
3690 const uint32_t kArgumentsLimit = 1 * KB;
3691 __ cmpli(length, Operand(kArgumentsLimit));
3692 DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
3694 // Push the receiver and use the register to keep the original
3695 // number of arguments.
3697 __ mr(receiver, length);
3698 // The arguments are at a one pointer size offset from elements.
3699 __ addi(elements, elements, Operand(1 * kPointerSize));
3701 // Loop through the arguments pushing them onto the execution
3704 // length is a small non-negative integer, due to the test above.
3705 __ cmpi(length, Operand::Zero());
3709 __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
3710 __ LoadPX(scratch, MemOperand(elements, r0));
3712 __ addi(length, length, Operand(-1));
3716 DCHECK(instr->HasPointerMap());
3717 LPointerMap* pointers = instr->pointer_map();
3718 SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
3719 // The number of arguments is stored in receiver which is r3, as expected
3720 // by InvokeFunction.
3721 ParameterCount actual(receiver);
3722 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
3726 void LCodeGen::DoPushArgument(LPushArgument* instr) {
3727 LOperand* argument = instr->value();
3728 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3729 Abort(kDoPushArgumentNotImplementedForDoubleType);
3731 Register argument_reg = EmitLoadRegister(argument, ip);
3732 __ push(argument_reg);
3737 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
3740 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3741 Register result = ToRegister(instr->result());
3742 __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3746 void LCodeGen::DoContext(LContext* instr) {
3747 // If there is a non-return use, the context must be moved to a register.
3748 Register result = ToRegister(instr->result());
3749 if (info()->IsOptimizing()) {
3750 __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3752 // If there is no frame, the context must be in cp.
3753 DCHECK(result.is(cp));
3758 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3759 DCHECK(ToRegister(instr->context()).is(cp));
3760 __ push(cp); // The context is the first argument.
3761 __ Move(scratch0(), instr->hydrogen()->pairs());
3762 __ push(scratch0());
3763 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3764 __ push(scratch0());
3765 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3769 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3770 int formal_parameter_count, int arity,
3771 LInstruction* instr) {
3772 bool dont_adapt_arguments =
3773 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3774 bool can_invoke_directly =
3775 dont_adapt_arguments || formal_parameter_count == arity;
3777 Register function_reg = r4;
3779 LPointerMap* pointers = instr->pointer_map();
3781 if (can_invoke_directly) {
3783 __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3785 // Set r3 to arguments count if adaption is not needed. Assumes that r3
3786 // is available to write to at this point.
3787 if (dont_adapt_arguments) {
3788 __ mov(r3, Operand(arity));
3791 bool is_self_call = function.is_identical_to(info()->closure());
3797 __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3801 // Set up deoptimization.
3802 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3804 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3805 ParameterCount count(arity);
3806 ParameterCount expected(formal_parameter_count);
3807 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3812 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3813 DCHECK(instr->context() != NULL);
3814 DCHECK(ToRegister(instr->context()).is(cp));
3815 Register input = ToRegister(instr->value());
3816 Register result = ToRegister(instr->result());
3817 Register scratch = scratch0();
3819 // Deoptimize if not a heap number.
3820 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3821 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3822 __ cmp(scratch, ip);
3823 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
3826 Register exponent = scratch0();
3828 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3829 // Check the sign of the argument. If the argument is positive, just
3831 __ cmpwi(exponent, Operand::Zero());
3832 // Move the input to the result if necessary.
3833 __ Move(result, input);
3836 // Input is negative. Reverse its sign.
3837 // Preserve the value of all registers.
3839 PushSafepointRegistersScope scope(this);
3841 // Registers were saved at the safepoint, so we can use
3842 // many scratch registers.
3843 Register tmp1 = input.is(r4) ? r3 : r4;
3844 Register tmp2 = input.is(r5) ? r3 : r5;
3845 Register tmp3 = input.is(r6) ? r3 : r6;
3846 Register tmp4 = input.is(r7) ? r3 : r7;
3848 // exponent: floating point exponent value.
3850 Label allocated, slow;
3851 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3852 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3855 // Slow case: Call the runtime system to do the number allocation.
3858 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3860 // Set the pointer to the new heap number in tmp.
3861 if (!tmp1.is(r3)) __ mr(tmp1, r3);
3862 // Restore input_reg after call to runtime.
3863 __ LoadFromSafepointRegisterSlot(input, input);
3864 __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3866 __ bind(&allocated);
3867 // exponent: floating point exponent value.
3868 // tmp1: allocated heap number.
3869 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
3870 __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit
3871 __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3872 __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3873 __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3875 __ StoreToSafepointRegisterSlot(tmp1, result);
3882 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
3883 Register input = ToRegister(instr->value());
3884 Register result = ToRegister(instr->result());
3886 __ cmpi(input, Operand::Zero());
3887 __ Move(result, input);
3889 __ li(r0, Operand::Zero()); // clear xer
3891 __ neg(result, result, SetOE, SetRC);
3892 // Deoptimize on overflow.
3893 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
3898 #if V8_TARGET_ARCH_PPC64
3899 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
3900 Register input = ToRegister(instr->value());
3901 Register result = ToRegister(instr->result());
3903 __ cmpwi(input, Operand::Zero());
3904 __ Move(result, input);
3907 // Deoptimize on overflow.
3908 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
3910 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
3912 __ neg(result, result);
3918 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3919 // Class for deferred case.
3920 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3922 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3923 : LDeferredCode(codegen), instr_(instr) {}
3924 void Generate() OVERRIDE {
3925 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3927 LInstruction* instr() OVERRIDE { return instr_; }
3933 Representation r = instr->hydrogen()->value()->representation();
3935 DoubleRegister input = ToDoubleRegister(instr->value());
3936 DoubleRegister result = ToDoubleRegister(instr->result());
3937 __ fabs(result, input);
3938 #if V8_TARGET_ARCH_PPC64
3939 } else if (r.IsInteger32()) {
3940 EmitInteger32MathAbs(instr);
3941 } else if (r.IsSmi()) {
3943 } else if (r.IsSmiOrInteger32()) {
3947 // Representation is tagged.
3948 DeferredMathAbsTaggedHeapNumber* deferred =
3949 new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3950 Register input = ToRegister(instr->value());
3952 __ JumpIfNotSmi(input, deferred->entry());
3953 // If smi, handle it directly.
3955 __ bind(deferred->exit());
3960 void LCodeGen::DoMathFloor(LMathFloor* instr) {
3961 DoubleRegister input = ToDoubleRegister(instr->value());
3962 Register result = ToRegister(instr->result());
3963 Register input_high = scratch0();
3964 Register scratch = ip;
3967 __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
3969 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
3972 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3974 __ cmpi(result, Operand::Zero());
3976 __ cmpwi(input_high, Operand::Zero());
3977 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
3983 void LCodeGen::DoMathRound(LMathRound* instr) {
3984 DoubleRegister input = ToDoubleRegister(instr->value());
3985 Register result = ToRegister(instr->result());
3986 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3987 DoubleRegister input_plus_dot_five = double_scratch1;
3988 Register scratch1 = scratch0();
3989 Register scratch2 = ip;
3990 DoubleRegister dot_five = double_scratch0();
3991 Label convert, done;
3993 __ LoadDoubleLiteral(dot_five, 0.5, r0);
3994 __ fabs(double_scratch1, input);
3995 __ fcmpu(double_scratch1, dot_five);
3996 DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
3997 // If input is in [-0.5, -0], the result is -0.
3998 // If input is in [+0, +0.5[, the result is +0.
3999 // If the input is +0.5, the result is 1.
4000 __ bgt(&convert); // Out of [-0.5, +0.5].
4001 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4002 #if V8_TARGET_ARCH_PPC64
4003 __ MovDoubleToInt64(scratch1, input);
4005 __ MovDoubleHighToInt(scratch1, input);
4007 __ cmpi(scratch1, Operand::Zero());
4009 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
4011 __ fcmpu(input, dot_five);
4012 if (CpuFeatures::IsSupported(ISELECT)) {
4013 __ li(result, Operand(1));
4014 __ isel(lt, result, r0, result);
4018 __ bne(&return_zero);
4019 __ li(result, Operand(1)); // +0.5.
4021 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
4022 // flag kBailoutOnMinusZero.
4023 __ bind(&return_zero);
4024 __ li(result, Operand::Zero());
4029 __ fadd(input_plus_dot_five, input, dot_five);
4030 // Reuse dot_five (double_scratch0) as we no longer need this value.
4031 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
4032 double_scratch0(), &done, &done);
4033 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
4038 void LCodeGen::DoMathFround(LMathFround* instr) {
4039 DoubleRegister input_reg = ToDoubleRegister(instr->value());
4040 DoubleRegister output_reg = ToDoubleRegister(instr->result());
4041 __ frsp(output_reg, input_reg);
4045 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4046 DoubleRegister input = ToDoubleRegister(instr->value());
4047 DoubleRegister result = ToDoubleRegister(instr->result());
4048 __ fsqrt(result, input);
4052 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4053 DoubleRegister input = ToDoubleRegister(instr->value());
4054 DoubleRegister result = ToDoubleRegister(instr->result());
4055 DoubleRegister temp = double_scratch0();
4057 // Note that according to ECMA-262 15.8.2.13:
4058 // Math.pow(-Infinity, 0.5) == Infinity
4059 // Math.sqrt(-Infinity) == NaN
4062 __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
4063 __ fcmpu(input, temp);
4065 __ fneg(result, temp);
4068 // Add +0 to convert -0 to +0.
4070 __ fadd(result, input, kDoubleRegZero);
4071 __ fsqrt(result, result);
4076 void LCodeGen::DoPower(LPower* instr) {
4077 Representation exponent_type = instr->hydrogen()->right()->representation();
4078 // Having marked this as a call, we can use any registers.
4079 // Just make sure that the input/output registers are the expected ones.
4080 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
4081 DCHECK(!instr->right()->IsDoubleRegister() ||
4082 ToDoubleRegister(instr->right()).is(d2));
4083 DCHECK(!instr->right()->IsRegister() ||
4084 ToRegister(instr->right()).is(tagged_exponent));
4085 DCHECK(ToDoubleRegister(instr->left()).is(d1));
4086 DCHECK(ToDoubleRegister(instr->result()).is(d3));
4088 if (exponent_type.IsSmi()) {
4089 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4091 } else if (exponent_type.IsTagged()) {
4093 __ JumpIfSmi(tagged_exponent, &no_deopt);
4094 DCHECK(!r10.is(tagged_exponent));
4095 __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
4096 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4098 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4100 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4102 } else if (exponent_type.IsInteger32()) {
4103 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4106 DCHECK(exponent_type.IsDouble());
4107 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
4113 void LCodeGen::DoMathExp(LMathExp* instr) {
4114 DoubleRegister input = ToDoubleRegister(instr->value());
4115 DoubleRegister result = ToDoubleRegister(instr->result());
4116 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
4117 DoubleRegister double_scratch2 = double_scratch0();
4118 Register temp1 = ToRegister(instr->temp1());
4119 Register temp2 = ToRegister(instr->temp2());
4121 MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
4122 double_scratch2, temp1, temp2, scratch0());
4126 void LCodeGen::DoMathLog(LMathLog* instr) {
4127 __ PrepareCallCFunction(0, 1, scratch0());
4128 __ MovToFloatParameter(ToDoubleRegister(instr->value()));
4129 __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
4131 __ MovFromFloatResult(ToDoubleRegister(instr->result()));
4135 void LCodeGen::DoMathClz32(LMathClz32* instr) {
4136 Register input = ToRegister(instr->value());
4137 Register result = ToRegister(instr->result());
4138 __ cntlzw_(result, input);
4142 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4143 DCHECK(ToRegister(instr->context()).is(cp));
4144 DCHECK(ToRegister(instr->function()).is(r4));
4145 DCHECK(instr->HasPointerMap());
4147 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4148 if (known_function.is_null()) {
4149 LPointerMap* pointers = instr->pointer_map();
4150 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4151 ParameterCount count(instr->arity());
4152 __ InvokeFunction(r4, count, CALL_FUNCTION, generator);
4154 CallKnownFunction(known_function,
4155 instr->hydrogen()->formal_parameter_count(),
4156 instr->arity(), instr);
4161 void LCodeGen::DoTailCallThroughMegamorphicCache(
4162 LTailCallThroughMegamorphicCache* instr) {
4163 Register receiver = ToRegister(instr->receiver());
4164 Register name = ToRegister(instr->name());
4165 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
4166 DCHECK(name.is(LoadDescriptor::NameRegister()));
4167 DCHECK(receiver.is(r4));
4168 DCHECK(name.is(r5));
4169 Register scratch = r7;
4170 Register extra = r8;
4171 Register extra2 = r9;
4172 Register extra3 = r10;
4175 Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
4176 Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
4177 DCHECK(!FLAG_vector_ics ||
4178 !AreAliased(slot, vector, scratch, extra, extra2, extra3));
4181 // Important for the tail-call.
4182 bool must_teardown_frame = NeedsEagerFrame();
4184 if (!instr->hydrogen()->is_just_miss()) {
4185 DCHECK(!instr->hydrogen()->is_keyed_load());
4187 // The probe will tail call to a handler if found.
4188 isolate()->stub_cache()->GenerateProbe(
4189 masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
4190 receiver, name, scratch, extra, extra2, extra3);
4193 // Tail call to miss if we ended up here.
4194 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
4195 if (instr->hydrogen()->is_keyed_load()) {
4196 KeyedLoadIC::GenerateMiss(masm());
4198 LoadIC::GenerateMiss(masm());
4203 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
4204 DCHECK(ToRegister(instr->result()).is(r3));
4206 if (instr->hydrogen()->IsTailCall()) {
4207 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
4209 if (instr->target()->IsConstantOperand()) {
4210 LConstantOperand* target = LConstantOperand::cast(instr->target());
4211 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4212 __ Jump(code, RelocInfo::CODE_TARGET);
4214 DCHECK(instr->target()->IsRegister());
4215 Register target = ToRegister(instr->target());
4216 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4217 __ JumpToJSEntry(ip);
4220 LPointerMap* pointers = instr->pointer_map();
4221 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4223 if (instr->target()->IsConstantOperand()) {
4224 LConstantOperand* target = LConstantOperand::cast(instr->target());
4225 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4226 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4227 __ Call(code, RelocInfo::CODE_TARGET);
4229 DCHECK(instr->target()->IsRegister());
4230 Register target = ToRegister(instr->target());
4231 generator.BeforeCall(__ CallSize(target));
4232 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4235 generator.AfterCall();
4240 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4241 DCHECK(ToRegister(instr->function()).is(r4));
4242 DCHECK(ToRegister(instr->result()).is(r3));
4244 if (instr->hydrogen()->pass_argument_count()) {
4245 __ mov(r3, Operand(instr->arity()));
4249 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
4251 bool is_self_call = false;
4252 if (instr->hydrogen()->function()->IsConstant()) {
4253 HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
4254 Handle<JSFunction> jsfun =
4255 Handle<JSFunction>::cast(fun_const->handle(isolate()));
4256 is_self_call = jsfun.is_identical_to(info()->closure());
4262 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
4266 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4270 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4271 DCHECK(ToRegister(instr->context()).is(cp));
4272 DCHECK(ToRegister(instr->function()).is(r4));
4273 DCHECK(ToRegister(instr->result()).is(r3));
4275 int arity = instr->arity();
4276 CallFunctionFlags flags = instr->hydrogen()->function_flags();
4277 if (instr->hydrogen()->HasVectorAndSlot()) {
4278 Register slot_register = ToRegister(instr->temp_slot());
4279 Register vector_register = ToRegister(instr->temp_vector());
4280 DCHECK(slot_register.is(r6));
4281 DCHECK(vector_register.is(r5));
4283 AllowDeferredHandleDereference vector_structure_check;
4284 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
4285 int index = vector->GetIndex(instr->hydrogen()->slot());
4287 __ Move(vector_register, vector);
4288 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
4290 CallICState::CallType call_type =
4291 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
4294 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
4295 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4297 CallFunctionStub stub(isolate(), arity, flags);
4298 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4303 void LCodeGen::DoCallNew(LCallNew* instr) {
4304 DCHECK(ToRegister(instr->context()).is(cp));
4305 DCHECK(ToRegister(instr->constructor()).is(r4));
4306 DCHECK(ToRegister(instr->result()).is(r3));
4308 __ mov(r3, Operand(instr->arity()));
4309 // No cell in r5 for construct type feedback in optimized code
4310 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4311 CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
4312 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4316 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4317 DCHECK(ToRegister(instr->context()).is(cp));
4318 DCHECK(ToRegister(instr->constructor()).is(r4));
4319 DCHECK(ToRegister(instr->result()).is(r3));
4321 __ mov(r3, Operand(instr->arity()));
4322 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
4323 ElementsKind kind = instr->hydrogen()->elements_kind();
4324 AllocationSiteOverrideMode override_mode =
4325 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4326 ? DISABLE_ALLOCATION_SITES
4329 if (instr->arity() == 0) {
4330 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
4331 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4332 } else if (instr->arity() == 1) {
4334 if (IsFastPackedElementsKind(kind)) {
4336 // We might need a change here
4337 // look at the first argument
4338 __ LoadP(r8, MemOperand(sp, 0));
4339 __ cmpi(r8, Operand::Zero());
4340 __ beq(&packed_case);
4342 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4343 ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
4345 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4347 __ bind(&packed_case);
4350 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4351 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4354 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4355 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4360 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4361 CallRuntime(instr->function(), instr->arity(), instr);
4365 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4366 Register function = ToRegister(instr->function());
4367 Register code_object = ToRegister(instr->code_object());
4368 __ addi(code_object, code_object,
4369 Operand(Code::kHeaderSize - kHeapObjectTag));
4370 __ StoreP(code_object,
4371 FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
4375 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4376 Register result = ToRegister(instr->result());
4377 Register base = ToRegister(instr->base_object());
4378 if (instr->offset()->IsConstantOperand()) {
4379 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4380 __ Add(result, base, ToInteger32(offset), r0);
4382 Register offset = ToRegister(instr->offset());
4383 __ add(result, base, offset);
4388 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4389 HStoreNamedField* hinstr = instr->hydrogen();
4390 Representation representation = instr->representation();
4392 Register object = ToRegister(instr->object());
4393 Register scratch = scratch0();
4394 HObjectAccess access = hinstr->access();
4395 int offset = access.offset();
4397 if (access.IsExternalMemory()) {
4398 Register value = ToRegister(instr->value());
4399 MemOperand operand = MemOperand(object, offset);
4400 __ StoreRepresentation(value, operand, representation, r0);
4404 __ AssertNotSmi(object);
4406 #if V8_TARGET_ARCH_PPC64
4407 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4408 IsInteger32(LConstantOperand::cast(instr->value())));
4410 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4411 IsSmi(LConstantOperand::cast(instr->value())));
4413 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
4414 DCHECK(access.IsInobject());
4415 DCHECK(!hinstr->has_transition());
4416 DCHECK(!hinstr->NeedsWriteBarrier());
4417 DoubleRegister value = ToDoubleRegister(instr->value());
4418 __ stfd(value, FieldMemOperand(object, offset));
4422 if (hinstr->has_transition()) {
4423 Handle<Map> transition = hinstr->transition_map();
4424 AddDeprecationDependency(transition);
4425 __ mov(scratch, Operand(transition));
4426 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
4427 if (hinstr->NeedsWriteBarrierForMap()) {
4428 Register temp = ToRegister(instr->temp());
4429 // Update the write barrier for the map field.
4430 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
4436 Register record_dest = object;
4437 Register record_value = no_reg;
4438 Register record_scratch = scratch;
4439 #if V8_TARGET_ARCH_PPC64
4440 if (FLAG_unbox_double_fields && representation.IsDouble()) {
4441 DCHECK(access.IsInobject());
4442 DoubleRegister value = ToDoubleRegister(instr->value());
4443 __ stfd(value, FieldMemOperand(object, offset));
4444 if (hinstr->NeedsWriteBarrier()) {
4445 record_value = ToRegister(instr->value());
4448 if (representation.IsSmi() &&
4449 hinstr->value()->representation().IsInteger32()) {
4450 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4451 // 64-bit Smi optimization
4452 // Store int value directly to upper half of the smi.
4453 offset = SmiWordOffset(offset);
4454 representation = Representation::Integer32();
4457 if (access.IsInobject()) {
4458 Register value = ToRegister(instr->value());
4459 MemOperand operand = FieldMemOperand(object, offset);
4460 __ StoreRepresentation(value, operand, representation, r0);
4461 record_value = value;
4463 Register value = ToRegister(instr->value());
4464 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4465 MemOperand operand = FieldMemOperand(scratch, offset);
4466 __ StoreRepresentation(value, operand, representation, r0);
4467 record_dest = scratch;
4468 record_value = value;
4469 record_scratch = object;
4471 #if V8_TARGET_ARCH_PPC64
4475 if (hinstr->NeedsWriteBarrier()) {
4476 __ RecordWriteField(record_dest, offset, record_value, record_scratch,
4477 GetLinkRegisterState(), kSaveFPRegs,
4478 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4479 hinstr->PointersToHereCheckForValue());
4484 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4485 DCHECK(ToRegister(instr->context()).is(cp));
4486 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4487 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4489 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4491 StoreIC::initialize_stub(isolate(), instr->language_mode(),
4492 instr->hydrogen()->initialization_state());
4493 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4497 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4498 Representation representation = instr->hydrogen()->length()->representation();
4499 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4500 DCHECK(representation.IsSmiOrInteger32());
4502 Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
4503 if (instr->length()->IsConstantOperand()) {
4504 int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
4505 Register index = ToRegister(instr->index());
4506 if (representation.IsSmi()) {
4507 __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
4509 __ Cmplwi(index, Operand(length), r0);
4511 cc = CommuteCondition(cc);
4512 } else if (instr->index()->IsConstantOperand()) {
4513 int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
4514 Register length = ToRegister(instr->length());
4515 if (representation.IsSmi()) {
4516 __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
4518 __ Cmplwi(length, Operand(index), r0);
4521 Register index = ToRegister(instr->index());
4522 Register length = ToRegister(instr->length());
4523 if (representation.IsSmi()) {
4524 __ cmpl(length, index);
4526 __ cmplw(length, index);
4529 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4531 __ b(NegateCondition(cc), &done);
4532 __ stop("eliminated bounds check failed");
4535 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
4540 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4541 Register external_pointer = ToRegister(instr->elements());
4542 Register key = no_reg;
4543 ElementsKind elements_kind = instr->elements_kind();
4544 bool key_is_constant = instr->key()->IsConstantOperand();
4545 int constant_key = 0;
4546 if (key_is_constant) {
4547 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4548 if (constant_key & 0xF0000000) {
4549 Abort(kArrayIndexConstantValueTooBig);
4552 key = ToRegister(instr->key());
4554 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4555 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4556 int base_offset = instr->base_offset();
4558 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4559 elements_kind == FLOAT32_ELEMENTS ||
4560 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4561 elements_kind == FLOAT64_ELEMENTS) {
4562 Register address = scratch0();
4563 DoubleRegister value(ToDoubleRegister(instr->value()));
4564 if (key_is_constant) {
4565 if (constant_key != 0) {
4566 __ Add(address, external_pointer, constant_key << element_size_shift,
4569 address = external_pointer;
4572 __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
4573 __ add(address, external_pointer, r0);
4575 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4576 elements_kind == FLOAT32_ELEMENTS) {
4577 __ frsp(double_scratch0(), value);
4578 __ stfs(double_scratch0(), MemOperand(address, base_offset));
4579 } else { // Storing doubles, not floats.
4580 __ stfd(value, MemOperand(address, base_offset));
4583 Register value(ToRegister(instr->value()));
4584 MemOperand mem_operand =
4585 PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
4586 constant_key, element_size_shift, base_offset);
4587 switch (elements_kind) {
4588 case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
4589 case EXTERNAL_INT8_ELEMENTS:
4590 case EXTERNAL_UINT8_ELEMENTS:
4591 case UINT8_ELEMENTS:
4592 case UINT8_CLAMPED_ELEMENTS:
4594 if (key_is_constant) {
4595 __ StoreByte(value, mem_operand, r0);
4597 __ stbx(value, mem_operand);
4600 case EXTERNAL_INT16_ELEMENTS:
4601 case EXTERNAL_UINT16_ELEMENTS:
4602 case INT16_ELEMENTS:
4603 case UINT16_ELEMENTS:
4604 if (key_is_constant) {
4605 __ StoreHalfWord(value, mem_operand, r0);
4607 __ sthx(value, mem_operand);
4610 case EXTERNAL_INT32_ELEMENTS:
4611 case EXTERNAL_UINT32_ELEMENTS:
4612 case INT32_ELEMENTS:
4613 case UINT32_ELEMENTS:
4614 if (key_is_constant) {
4615 __ StoreWord(value, mem_operand, r0);
4617 __ stwx(value, mem_operand);
4620 case FLOAT32_ELEMENTS:
4621 case FLOAT64_ELEMENTS:
4622 case EXTERNAL_FLOAT32_ELEMENTS:
4623 case EXTERNAL_FLOAT64_ELEMENTS:
4624 case FAST_DOUBLE_ELEMENTS:
4626 case FAST_SMI_ELEMENTS:
4627 case FAST_HOLEY_DOUBLE_ELEMENTS:
4628 case FAST_HOLEY_ELEMENTS:
4629 case FAST_HOLEY_SMI_ELEMENTS:
4630 case DICTIONARY_ELEMENTS:
4631 case SLOPPY_ARGUMENTS_ELEMENTS:
4639 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4640 DoubleRegister value = ToDoubleRegister(instr->value());
4641 Register elements = ToRegister(instr->elements());
4642 Register key = no_reg;
4643 Register scratch = scratch0();
4644 DoubleRegister double_scratch = double_scratch0();
4645 bool key_is_constant = instr->key()->IsConstantOperand();
4646 int constant_key = 0;
4648 // Calculate the effective address of the slot in the array to store the
4650 if (key_is_constant) {
4651 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4652 if (constant_key & 0xF0000000) {
4653 Abort(kArrayIndexConstantValueTooBig);
4656 key = ToRegister(instr->key());
4658 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4659 bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
4660 int base_offset = instr->base_offset() + constant_key * kDoubleSize;
4661 if (!key_is_constant) {
4662 __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
4663 __ add(scratch, elements, scratch);
4666 if (!is_int16(base_offset)) {
4667 __ Add(scratch, elements, base_offset, r0);
4672 if (instr->NeedsCanonicalization()) {
4673 // Turn potential sNaN value into qNaN.
4674 __ CanonicalizeNaN(double_scratch, value);
4675 __ stfd(double_scratch, MemOperand(elements, base_offset));
4677 __ stfd(value, MemOperand(elements, base_offset));
4682 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4683 HStoreKeyed* hinstr = instr->hydrogen();
4684 Register value = ToRegister(instr->value());
4685 Register elements = ToRegister(instr->elements());
4686 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4687 Register scratch = scratch0();
4688 Register store_base = scratch;
4689 int offset = instr->base_offset();
4692 if (instr->key()->IsConstantOperand()) {
4693 DCHECK(!hinstr->NeedsWriteBarrier());
4694 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4695 offset += ToInteger32(const_operand) * kPointerSize;
4696 store_base = elements;
4698 // Even though the HLoadKeyed instruction forces the input
4699 // representation for the key to be an integer, the input gets replaced
4700 // during bound check elimination with the index argument to the bounds
4701 // check, which can be tagged, so that case must be handled here, too.
4702 if (hinstr->key()->representation().IsSmi()) {
4703 __ SmiToPtrArrayOffset(scratch, key);
4705 __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
4707 __ add(scratch, elements, scratch);
4710 Representation representation = hinstr->value()->representation();
4712 #if V8_TARGET_ARCH_PPC64
4713 // 64-bit Smi optimization
4714 if (representation.IsInteger32()) {
4715 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4716 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4717 // Store int value directly to upper half of the smi.
4718 offset = SmiWordOffset(offset);
4722 __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
4725 if (hinstr->NeedsWriteBarrier()) {
4726 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4729 // Compute address of modified element and store it into key register.
4730 __ Add(key, store_base, offset, r0);
4731 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
4732 EMIT_REMEMBERED_SET, check_needed,
4733 hinstr->PointersToHereCheckForValue());
4738 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4739 // By cases: external, fast double
4740 if (instr->is_typed_elements()) {
4741 DoStoreKeyedExternalArray(instr);
4742 } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4743 DoStoreKeyedFixedDoubleArray(instr);
4745 DoStoreKeyedFixedArray(instr);
4750 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4751 DCHECK(ToRegister(instr->context()).is(cp));
4752 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4753 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4754 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4756 Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
4757 isolate(), instr->language_mode(),
4758 instr->hydrogen()->initialization_state()).code();
4759 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4763 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4764 Register object_reg = ToRegister(instr->object());
4765 Register scratch = scratch0();
4767 Handle<Map> from_map = instr->original_map();
4768 Handle<Map> to_map = instr->transitioned_map();
4769 ElementsKind from_kind = instr->from_kind();
4770 ElementsKind to_kind = instr->to_kind();
4772 Label not_applicable;
4773 __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4774 __ Cmpi(scratch, Operand(from_map), r0);
4775 __ bne(¬_applicable);
4777 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4778 Register new_map_reg = ToRegister(instr->new_map_temp());
4779 __ mov(new_map_reg, Operand(to_map));
4780 __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
4783 __ RecordWriteForMap(object_reg, new_map_reg, scratch,
4784 GetLinkRegisterState(), kDontSaveFPRegs);
4786 DCHECK(ToRegister(instr->context()).is(cp));
4787 DCHECK(object_reg.is(r3));
4788 PushSafepointRegistersScope scope(this);
4789 __ Move(r4, to_map);
4790 bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
4791 TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
4793 RecordSafepointWithRegisters(instr->pointer_map(), 0,
4794 Safepoint::kLazyDeopt);
4796 __ bind(¬_applicable);
4800 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4801 Register object = ToRegister(instr->object());
4802 Register temp = ToRegister(instr->temp());
4803 Label no_memento_found;
4804 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4805 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
4806 __ bind(&no_memento_found);
4810 void LCodeGen::DoStringAdd(LStringAdd* instr) {
4811 DCHECK(ToRegister(instr->context()).is(cp));
4812 DCHECK(ToRegister(instr->left()).is(r4));
4813 DCHECK(ToRegister(instr->right()).is(r3));
4814 StringAddStub stub(isolate(), instr->hydrogen()->flags(),
4815 instr->hydrogen()->pretenure_flag());
4816 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4820 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4821 class DeferredStringCharCodeAt FINAL : public LDeferredCode {
4823 DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4824 : LDeferredCode(codegen), instr_(instr) {}
4825 void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
4826 LInstruction* instr() OVERRIDE { return instr_; }
4829 LStringCharCodeAt* instr_;
4832 DeferredStringCharCodeAt* deferred =
4833 new (zone()) DeferredStringCharCodeAt(this, instr);
4835 StringCharLoadGenerator::Generate(
4836 masm(), ToRegister(instr->string()), ToRegister(instr->index()),
4837 ToRegister(instr->result()), deferred->entry());
4838 __ bind(deferred->exit());
4842 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4843 Register string = ToRegister(instr->string());
4844 Register result = ToRegister(instr->result());
4845 Register scratch = scratch0();
4847 // TODO(3095996): Get rid of this. For now, we need to make the
4848 // result register contain a valid pointer because it is already
4849 // contained in the register pointer map.
4850 __ li(result, Operand::Zero());
4852 PushSafepointRegistersScope scope(this);
4854 // Push the index as a smi. This is safe because of the checks in
4855 // DoStringCharCodeAt above.
4856 if (instr->index()->IsConstantOperand()) {
4857 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4858 __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
4861 Register index = ToRegister(instr->index());
4865 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4869 __ StoreToSafepointRegisterSlot(r3, result);
4873 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4874 class DeferredStringCharFromCode FINAL : public LDeferredCode {
4876 DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4877 : LDeferredCode(codegen), instr_(instr) {}
4878 void Generate() OVERRIDE {
4879 codegen()->DoDeferredStringCharFromCode(instr_);
4881 LInstruction* instr() OVERRIDE { return instr_; }
4884 LStringCharFromCode* instr_;
4887 DeferredStringCharFromCode* deferred =
4888 new (zone()) DeferredStringCharFromCode(this, instr);
4890 DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
4891 Register char_code = ToRegister(instr->char_code());
4892 Register result = ToRegister(instr->result());
4893 DCHECK(!char_code.is(result));
4895 __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
4896 __ bgt(deferred->entry());
4897 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4898 __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
4899 __ add(result, result, r0);
4900 __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4901 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4903 __ beq(deferred->entry());
4904 __ bind(deferred->exit());
4908 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4909 Register char_code = ToRegister(instr->char_code());
4910 Register result = ToRegister(instr->result());
4912 // TODO(3095996): Get rid of this. For now, we need to make the
4913 // result register contain a valid pointer because it is already
4914 // contained in the register pointer map.
4915 __ li(result, Operand::Zero());
4917 PushSafepointRegistersScope scope(this);
4918 __ SmiTag(char_code);
4920 CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4921 __ StoreToSafepointRegisterSlot(r3, result);
4925 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4926 LOperand* input = instr->value();
4927 DCHECK(input->IsRegister() || input->IsStackSlot());
4928 LOperand* output = instr->result();
4929 DCHECK(output->IsDoubleRegister());
4930 if (input->IsStackSlot()) {
4931 Register scratch = scratch0();
4932 __ LoadP(scratch, ToMemOperand(input));
4933 __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
4935 __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
4940 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4941 LOperand* input = instr->value();
4942 LOperand* output = instr->result();
4943 __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
4947 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4948 class DeferredNumberTagI FINAL : public LDeferredCode {
4950 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4951 : LDeferredCode(codegen), instr_(instr) {}
4952 void Generate() OVERRIDE {
4953 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4954 instr_->temp2(), SIGNED_INT32);
4956 LInstruction* instr() OVERRIDE { return instr_; }
4959 LNumberTagI* instr_;
4962 Register src = ToRegister(instr->value());
4963 Register dst = ToRegister(instr->result());
4965 DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
4966 #if V8_TARGET_ARCH_PPC64
4967 __ SmiTag(dst, src);
4969 __ SmiTagCheckOverflow(dst, src, r0);
4970 __ BranchOnOverflow(deferred->entry());
4972 __ bind(deferred->exit());
4976 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4977 class DeferredNumberTagU FINAL : public LDeferredCode {
4979 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4980 : LDeferredCode(codegen), instr_(instr) {}
4981 void Generate() OVERRIDE {
4982 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
4983 instr_->temp2(), UNSIGNED_INT32);
4985 LInstruction* instr() OVERRIDE { return instr_; }
4988 LNumberTagU* instr_;
4991 Register input = ToRegister(instr->value());
4992 Register result = ToRegister(instr->result());
4994 DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
4995 __ Cmpli(input, Operand(Smi::kMaxValue), r0);
4996 __ bgt(deferred->entry());
4997 __ SmiTag(result, input);
4998 __ bind(deferred->exit());
5002 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
5003 LOperand* temp1, LOperand* temp2,
5004 IntegerSignedness signedness) {
5006 Register src = ToRegister(value);
5007 Register dst = ToRegister(instr->result());
5008 Register tmp1 = scratch0();
5009 Register tmp2 = ToRegister(temp1);
5010 Register tmp3 = ToRegister(temp2);
5011 DoubleRegister dbl_scratch = double_scratch0();
5013 if (signedness == SIGNED_INT32) {
5014 // There was overflow, so bits 30 and 31 of the original integer
5015 // disagree. Try to allocate a heap number in new space and store
5016 // the value in there. If that fails, call the runtime system.
5018 __ SmiUntag(src, dst);
5019 __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
5021 __ ConvertIntToDouble(src, dbl_scratch);
5023 __ ConvertUnsignedIntToDouble(src, dbl_scratch);
5026 if (FLAG_inline_new) {
5027 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
5028 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
5032 // Slow case: Call the runtime system to do the number allocation.
5035 // TODO(3095996): Put a valid pointer value in the stack slot where the
5036 // result register is stored, as this register is in the pointer map, but
5037 // contains an integer value.
5038 __ li(dst, Operand::Zero());
5040 // Preserve the value of all registers.
5041 PushSafepointRegistersScope scope(this);
5043 // NumberTagI and NumberTagD use the context from the frame, rather than
5044 // the environment's HContext or HInlinedContext value.
5045 // They only call Runtime::kAllocateHeapNumber.
5046 // The corresponding HChange instructions are added in a phase that does
5047 // not have easy access to the local context.
5048 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
5049 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5050 RecordSafepointWithRegisters(instr->pointer_map(), 0,
5051 Safepoint::kNoLazyDeopt);
5052 __ StoreToSafepointRegisterSlot(r3, dst);
5055 // Done. Put the value in dbl_scratch into the value of the allocated heap
5058 __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
5062 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5063 class DeferredNumberTagD FINAL : public LDeferredCode {
5065 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
5066 : LDeferredCode(codegen), instr_(instr) {}
5067 void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
5068 LInstruction* instr() OVERRIDE { return instr_; }
5071 LNumberTagD* instr_;
5074 DoubleRegister input_reg = ToDoubleRegister(instr->value());
5075 Register scratch = scratch0();
5076 Register reg = ToRegister(instr->result());
5077 Register temp1 = ToRegister(instr->temp());
5078 Register temp2 = ToRegister(instr->temp2());
5080 DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
5081 if (FLAG_inline_new) {
5082 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
5083 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
5085 __ b(deferred->entry());
5087 __ bind(deferred->exit());
5088 __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
5092 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5093 // TODO(3095996): Get rid of this. For now, we need to make the
5094 // result register contain a valid pointer because it is already
5095 // contained in the register pointer map.
5096 Register reg = ToRegister(instr->result());
5097 __ li(reg, Operand::Zero());
5099 PushSafepointRegistersScope scope(this);
5100 // NumberTagI and NumberTagD use the context from the frame, rather than
5101 // the environment's HContext or HInlinedContext value.
5102 // They only call Runtime::kAllocateHeapNumber.
5103 // The corresponding HChange instructions are added in a phase that does
5104 // not have easy access to the local context.
5105 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
5106 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5107 RecordSafepointWithRegisters(instr->pointer_map(), 0,
5108 Safepoint::kNoLazyDeopt);
5109 __ StoreToSafepointRegisterSlot(r3, reg);
5113 void LCodeGen::DoSmiTag(LSmiTag* instr) {
5114 HChange* hchange = instr->hydrogen();
5115 Register input = ToRegister(instr->value());
5116 Register output = ToRegister(instr->result());
5117 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5118 hchange->value()->CheckFlag(HValue::kUint32)) {
5119 __ TestUnsignedSmiCandidate(input, r0);
5120 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
5122 #if !V8_TARGET_ARCH_PPC64
5123 if (hchange->CheckFlag(HValue::kCanOverflow) &&
5124 !hchange->value()->CheckFlag(HValue::kUint32)) {
5125 __ SmiTagCheckOverflow(output, input, r0);
5126 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
5129 __ SmiTag(output, input);
5130 #if !V8_TARGET_ARCH_PPC64
5136 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5137 Register scratch = scratch0();
5138 Register input = ToRegister(instr->value());
5139 Register result = ToRegister(instr->result());
5140 if (instr->needs_check()) {
5141 // If the input is a HeapObject, value of scratch won't be zero.
5142 __ andi(scratch, input, Operand(kHeapObjectTag));
5143 __ SmiUntag(result, input);
5144 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
5146 __ SmiUntag(result, input);
5151 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
5152 DoubleRegister result_reg,
5153 NumberUntagDMode mode) {
5154 bool can_convert_undefined_to_nan =
5155 instr->hydrogen()->can_convert_undefined_to_nan();
5156 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
5158 Register scratch = scratch0();
5159 DCHECK(!result_reg.is(double_scratch0()));
5161 Label convert, load_smi, done;
5163 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5165 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
5167 // Heap number map check.
5168 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5169 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5170 __ cmp(scratch, ip);
5171 if (can_convert_undefined_to_nan) {
5174 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
5177 __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5178 if (deoptimize_on_minus_zero) {
5179 #if V8_TARGET_ARCH_PPC64
5180 __ MovDoubleToInt64(scratch, result_reg);
5181 // rotate left by one for simple compare.
5182 __ rldicl(scratch, scratch, 1, 0);
5183 __ cmpi(scratch, Operand(1));
5185 __ MovDoubleToInt64(scratch, ip, result_reg);
5186 __ cmpi(ip, Operand::Zero());
5188 __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
5190 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
5193 if (can_convert_undefined_to_nan) {
5195 // Convert undefined (and hole) to NaN.
5196 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5197 __ cmp(input_reg, ip);
5198 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5199 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
5200 __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
5204 __ SmiUntag(scratch, input_reg);
5205 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
5207 // Smi to double register conversion
5209 // scratch: untagged value of input_reg
5210 __ ConvertIntToDouble(scratch, result_reg);
5215 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
5216 Register input_reg = ToRegister(instr->value());
5217 Register scratch1 = scratch0();
5218 Register scratch2 = ToRegister(instr->temp());
5219 DoubleRegister double_scratch = double_scratch0();
5220 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
5222 DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
5223 DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
5227 // Heap number map check.
5228 __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5229 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5230 __ cmp(scratch1, ip);
5232 if (instr->truncating()) {
5233 // Performs a truncating conversion of a floating point number as used by
5234 // the JS bitwise operations.
5235 Label no_heap_number, check_bools, check_false;
5236 __ bne(&no_heap_number);
5237 __ mr(scratch2, input_reg);
5238 __ TruncateHeapNumberToI(input_reg, scratch2);
5241 // Check for Oddballs. Undefined/False is converted to zero and True to one
5242 // for truncating conversions.
5243 __ bind(&no_heap_number);
5244 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5245 __ cmp(input_reg, ip);
5246 __ bne(&check_bools);
5247 __ li(input_reg, Operand::Zero());
5250 __ bind(&check_bools);
5251 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5252 __ cmp(input_reg, ip);
5253 __ bne(&check_false);
5254 __ li(input_reg, Operand(1));
5257 __ bind(&check_false);
5258 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5259 __ cmp(input_reg, ip);
5260 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5261 __ li(input_reg, Operand::Zero());
5263 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
5265 __ lfd(double_scratch2,
5266 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5267 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5268 // preserve heap number pointer in scratch2 for minus zero check below
5269 __ mr(scratch2, input_reg);
5271 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
5273 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5275 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5276 __ cmpi(input_reg, Operand::Zero());
5279 FieldMemOperand(scratch2, HeapNumber::kValueOffset +
5280 Register::kExponentOffset));
5281 __ cmpwi(scratch1, Operand::Zero());
5282 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5289 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5290 class DeferredTaggedToI FINAL : public LDeferredCode {
5292 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5293 : LDeferredCode(codegen), instr_(instr) {}
5294 void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_); }
5295 LInstruction* instr() OVERRIDE { return instr_; }
5301 LOperand* input = instr->value();
5302 DCHECK(input->IsRegister());
5303 DCHECK(input->Equals(instr->result()));
5305 Register input_reg = ToRegister(input);
5307 if (instr->hydrogen()->value()->representation().IsSmi()) {
5308 __ SmiUntag(input_reg);
5310 DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
5312 // Branch to deferred code if the input is a HeapObject.
5313 __ JumpIfNotSmi(input_reg, deferred->entry());
5315 __ SmiUntag(input_reg);
5316 __ bind(deferred->exit());
5321 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5322 LOperand* input = instr->value();
5323 DCHECK(input->IsRegister());
5324 LOperand* result = instr->result();
5325 DCHECK(result->IsDoubleRegister());
5327 Register input_reg = ToRegister(input);
5328 DoubleRegister result_reg = ToDoubleRegister(result);
5330 HValue* value = instr->hydrogen()->value();
5331 NumberUntagDMode mode = value->representation().IsSmi()
5332 ? NUMBER_CANDIDATE_IS_SMI
5333 : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5335 EmitNumberUntagD(instr, input_reg, result_reg, mode);
5339 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5340 Register result_reg = ToRegister(instr->result());
5341 Register scratch1 = scratch0();
5342 DoubleRegister double_input = ToDoubleRegister(instr->value());
5343 DoubleRegister double_scratch = double_scratch0();
5345 if (instr->truncating()) {
5346 __ TruncateDoubleToI(result_reg, double_input);
5348 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5350 // Deoptimize if the input wasn't a int32 (inside a double).
5351 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5352 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5354 __ cmpi(result_reg, Operand::Zero());
5356 #if V8_TARGET_ARCH_PPC64
5357 __ MovDoubleToInt64(scratch1, double_input);
5359 __ MovDoubleHighToInt(scratch1, double_input);
5361 __ cmpi(scratch1, Operand::Zero());
5362 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5369 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5370 Register result_reg = ToRegister(instr->result());
5371 Register scratch1 = scratch0();
5372 DoubleRegister double_input = ToDoubleRegister(instr->value());
5373 DoubleRegister double_scratch = double_scratch0();
5375 if (instr->truncating()) {
5376 __ TruncateDoubleToI(result_reg, double_input);
5378 __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
5380 // Deoptimize if the input wasn't a int32 (inside a double).
5381 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5382 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5384 __ cmpi(result_reg, Operand::Zero());
5386 #if V8_TARGET_ARCH_PPC64
5387 __ MovDoubleToInt64(scratch1, double_input);
5389 __ MovDoubleHighToInt(scratch1, double_input);
5391 __ cmpi(scratch1, Operand::Zero());
5392 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5396 #if V8_TARGET_ARCH_PPC64
5397 __ SmiTag(result_reg);
5399 __ SmiTagCheckOverflow(result_reg, r0);
5400 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
5405 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5406 LOperand* input = instr->value();
5407 __ TestIfSmi(ToRegister(input), r0);
5408 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
5412 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5413 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5414 LOperand* input = instr->value();
5415 __ TestIfSmi(ToRegister(input), r0);
5416 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
5421 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5422 Register input = ToRegister(instr->value());
5423 Register scratch = scratch0();
5425 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5426 __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5428 if (instr->hydrogen()->is_interval_check()) {
5431 instr->hydrogen()->GetCheckInterval(&first, &last);
5433 __ cmpli(scratch, Operand(first));
5435 // If there is only one type in the interval check for equality.
5436 if (first == last) {
5437 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5439 DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
5440 // Omit check for the last type.
5441 if (last != LAST_TYPE) {
5442 __ cmpli(scratch, Operand(last));
5443 DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
5449 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5451 if (base::bits::IsPowerOfTwo32(mask)) {
5452 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5453 __ andi(r0, scratch, Operand(mask));
5454 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
5457 __ andi(scratch, scratch, Operand(mask));
5458 __ cmpi(scratch, Operand(tag));
5459 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
5465 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5466 Register reg = ToRegister(instr->value());
5467 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5468 AllowDeferredHandleDereference smi_check;
5469 if (isolate()->heap()->InNewSpace(*object)) {
5470 Register reg = ToRegister(instr->value());
5471 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5472 __ mov(ip, Operand(cell));
5473 __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
5476 __ Cmpi(reg, Operand(object), r0);
5478 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
5482 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5483 Register temp = ToRegister(instr->temp());
5485 PushSafepointRegistersScope scope(this);
5487 __ li(cp, Operand::Zero());
5488 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5489 RecordSafepointWithRegisters(instr->pointer_map(), 1,
5490 Safepoint::kNoLazyDeopt);
5491 __ StoreToSafepointRegisterSlot(r3, temp);
5493 __ TestIfSmi(temp, r0);
5494 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
5498 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5499 class DeferredCheckMaps FINAL : public LDeferredCode {
5501 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5502 : LDeferredCode(codegen), instr_(instr), object_(object) {
5503 SetExit(check_maps());
5505 void Generate() OVERRIDE {
5506 codegen()->DoDeferredInstanceMigration(instr_, object_);
5508 Label* check_maps() { return &check_maps_; }
5509 LInstruction* instr() OVERRIDE { return instr_; }
5517 if (instr->hydrogen()->IsStabilityCheck()) {
5518 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5519 for (int i = 0; i < maps->size(); ++i) {
5520 AddStabilityDependency(maps->at(i).handle());
5525 Register object = ToRegister(instr->value());
5526 Register map_reg = ToRegister(instr->temp());
5528 __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
5530 DeferredCheckMaps* deferred = NULL;
5531 if (instr->hydrogen()->HasMigrationTarget()) {
5532 deferred = new (zone()) DeferredCheckMaps(this, instr, object);
5533 __ bind(deferred->check_maps());
5536 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5538 for (int i = 0; i < maps->size() - 1; i++) {
5539 Handle<Map> map = maps->at(i).handle();
5540 __ CompareMap(map_reg, map, &success);
5544 Handle<Map> map = maps->at(maps->size() - 1).handle();
5545 __ CompareMap(map_reg, map, &success);
5546 if (instr->hydrogen()->HasMigrationTarget()) {
5547 __ bne(deferred->entry());
5549 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
5556 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5557 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5558 Register result_reg = ToRegister(instr->result());
5559 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5563 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5564 Register unclamped_reg = ToRegister(instr->unclamped());
5565 Register result_reg = ToRegister(instr->result());
5566 __ ClampUint8(result_reg, unclamped_reg);
5570 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5571 Register scratch = scratch0();
5572 Register input_reg = ToRegister(instr->unclamped());
5573 Register result_reg = ToRegister(instr->result());
5574 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5575 Label is_smi, done, heap_number;
5577 // Both smi and heap number cases are handled.
5578 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5580 // Check for heap number
5581 __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5582 __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
5583 __ beq(&heap_number);
5585 // Check for undefined. Undefined is converted to zero for clamping
5587 __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
5588 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
5589 __ li(result_reg, Operand::Zero());
5593 __ bind(&heap_number);
5594 __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5595 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5600 __ ClampUint8(result_reg, result_reg);
5606 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5607 DoubleRegister value_reg = ToDoubleRegister(instr->value());
5608 Register result_reg = ToRegister(instr->result());
5610 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5611 __ MovDoubleHighToInt(result_reg, value_reg);
5613 __ MovDoubleLowToInt(result_reg, value_reg);
5618 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5619 Register hi_reg = ToRegister(instr->hi());
5620 Register lo_reg = ToRegister(instr->lo());
5621 DoubleRegister result_reg = ToDoubleRegister(instr->result());
5622 #if V8_TARGET_ARCH_PPC64
5623 __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
5625 __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
5630 void LCodeGen::DoAllocate(LAllocate* instr) {
5631 class DeferredAllocate FINAL : public LDeferredCode {
5633 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5634 : LDeferredCode(codegen), instr_(instr) {}
5635 void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
5636 LInstruction* instr() OVERRIDE { return instr_; }
5642 DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
5644 Register result = ToRegister(instr->result());
5645 Register scratch = ToRegister(instr->temp1());
5646 Register scratch2 = ToRegister(instr->temp2());
5648 // Allocate memory for the object.
5649 AllocationFlags flags = TAG_OBJECT;
5650 if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5651 flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5653 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5654 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5655 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5656 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5657 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5658 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5659 flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5662 if (instr->size()->IsConstantOperand()) {
5663 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5664 if (size <= Page::kMaxRegularHeapObjectSize) {
5665 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5667 __ b(deferred->entry());
5670 Register size = ToRegister(instr->size());
5671 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5674 __ bind(deferred->exit());
5676 if (instr->hydrogen()->MustPrefillWithFiller()) {
5677 if (instr->size()->IsConstantOperand()) {
5678 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5679 __ LoadIntLiteral(scratch, size - kHeapObjectTag);
5681 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5683 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5686 __ subi(scratch, scratch, Operand(kPointerSize));
5687 __ StorePX(scratch2, MemOperand(result, scratch));
5688 __ cmpi(scratch, Operand::Zero());
5694 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5695 Register result = ToRegister(instr->result());
5697 // TODO(3095996): Get rid of this. For now, we need to make the
5698 // result register contain a valid pointer because it is already
5699 // contained in the register pointer map.
5700 __ LoadSmiLiteral(result, Smi::FromInt(0));
5702 PushSafepointRegistersScope scope(this);
5703 if (instr->size()->IsRegister()) {
5704 Register size = ToRegister(instr->size());
5705 DCHECK(!size.is(result));
5709 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5710 #if !V8_TARGET_ARCH_PPC64
5711 if (size >= 0 && size <= Smi::kMaxValue) {
5713 __ Push(Smi::FromInt(size));
5714 #if !V8_TARGET_ARCH_PPC64
5716 // We should never get here at runtime => abort
5717 __ stop("invalid allocation size");
5723 int flags = AllocateDoubleAlignFlag::encode(
5724 instr->hydrogen()->MustAllocateDoubleAligned());
5725 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5726 DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
5727 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5728 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5729 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5730 DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
5731 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5733 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5735 __ Push(Smi::FromInt(flags));
5737 CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
5739 __ StoreToSafepointRegisterSlot(r3, result);
5743 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5744 DCHECK(ToRegister(instr->value()).is(r3));
5746 CallRuntime(Runtime::kToFastProperties, 1, instr);
5750 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5751 DCHECK(ToRegister(instr->context()).is(cp));
5753 // Registers will be used as follows:
5754 // r10 = literals array.
5755 // r4 = regexp literal.
5756 // r3 = regexp literal clone.
5757 // r5 and r7-r9 are used as temporaries.
5758 int literal_offset =
5759 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5760 __ Move(r10, instr->hydrogen()->literals());
5761 __ LoadP(r4, FieldMemOperand(r10, literal_offset));
5762 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5764 __ bne(&materialized);
5766 // Create regexp literal using runtime function
5767 // Result will be in r3.
5768 __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index()));
5769 __ mov(r8, Operand(instr->hydrogen()->pattern()));
5770 __ mov(r7, Operand(instr->hydrogen()->flags()));
5771 __ Push(r10, r9, r8, r7);
5772 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5775 __ bind(&materialized);
5776 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5777 Label allocated, runtime_allocate;
5779 __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
5782 __ bind(&runtime_allocate);
5783 __ LoadSmiLiteral(r3, Smi::FromInt(size));
5785 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5788 __ bind(&allocated);
5789 // Copy the content into the newly allocated memory.
5790 __ CopyFields(r3, r4, r5.bit(), size / kPointerSize);
5794 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5795 DCHECK(ToRegister(instr->context()).is(cp));
5796 // Use the fast case closure allocation code that allocates in new
5797 // space for nested functions that don't need literals cloning.
5798 bool pretenure = instr->hydrogen()->pretenure();
5799 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5800 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
5801 instr->hydrogen()->kind());
5802 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5803 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5805 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5806 __ mov(r4, Operand(pretenure ? factory()->true_value()
5807 : factory()->false_value()));
5808 __ Push(cp, r5, r4);
5809 CallRuntime(Runtime::kNewClosure, 3, instr);
5814 void LCodeGen::DoTypeof(LTypeof* instr) {
5815 Register input = ToRegister(instr->value());
5817 CallRuntime(Runtime::kTypeof, 1, instr);
5821 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5822 Register input = ToRegister(instr->value());
5824 Condition final_branch_condition =
5825 EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
5826 instr->type_literal());
5827 if (final_branch_condition != kNoCondition) {
5828 EmitBranch(instr, final_branch_condition);
5833 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
5834 Register input, Handle<String> type_name) {
5835 Condition final_branch_condition = kNoCondition;
5836 Register scratch = scratch0();
5837 Factory* factory = isolate()->factory();
5838 if (String::Equals(type_name, factory->number_string())) {
5839 __ JumpIfSmi(input, true_label);
5840 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5841 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
5842 final_branch_condition = eq;
5844 } else if (String::Equals(type_name, factory->string_string())) {
5845 __ JumpIfSmi(input, false_label);
5846 __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
5847 __ bge(false_label);
5848 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5849 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5850 __ cmpi(r0, Operand::Zero());
5851 final_branch_condition = eq;
5853 } else if (String::Equals(type_name, factory->symbol_string())) {
5854 __ JumpIfSmi(input, false_label);
5855 __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
5856 final_branch_condition = eq;
5858 } else if (String::Equals(type_name, factory->boolean_string())) {
5859 __ CompareRoot(input, Heap::kTrueValueRootIndex);
5861 __ CompareRoot(input, Heap::kFalseValueRootIndex);
5862 final_branch_condition = eq;
5864 } else if (String::Equals(type_name, factory->undefined_string())) {
5865 __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5867 __ JumpIfSmi(input, false_label);
5868 // Check for undetectable objects => true.
5869 __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5870 __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
5871 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5872 __ cmpi(r0, Operand::Zero());
5873 final_branch_condition = ne;
5875 } else if (String::Equals(type_name, factory->function_string())) {
5876 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5877 Register type_reg = scratch;
5878 __ JumpIfSmi(input, false_label);
5879 __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
5881 __ cmpi(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
5882 final_branch_condition = eq;
5884 } else if (String::Equals(type_name, factory->object_string())) {
5885 Register map = scratch;
5886 __ JumpIfSmi(input, false_label);
5887 __ CompareRoot(input, Heap::kNullValueRootIndex);
5889 __ CheckObjectTypeRange(input, map, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
5890 LAST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label);
5891 // Check for undetectable objects => false.
5892 __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
5893 __ ExtractBit(r0, scratch, Map::kIsUndetectable);
5894 __ cmpi(r0, Operand::Zero());
5895 final_branch_condition = eq;
5901 return final_branch_condition;
5905 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5906 Register temp1 = ToRegister(instr->temp());
5908 EmitIsConstructCall(temp1, scratch0());
5909 EmitBranch(instr, eq);
5913 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5914 DCHECK(!temp1.is(temp2));
5915 // Get the frame pointer for the calling frame.
5916 __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5918 // Skip the arguments adaptor frame if it exists.
5919 Label check_frame_marker;
5920 __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5921 __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
5922 __ bne(&check_frame_marker);
5923 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5925 // Check the marker in the calling frame.
5926 __ bind(&check_frame_marker);
5927 __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5928 __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0);
5932 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5933 if (!info()->IsStub()) {
5934 // Ensure that we have enough space after the previous lazy-bailout
5935 // instruction for patching the code here.
5936 int current_pc = masm()->pc_offset();
5937 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5938 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5939 DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
5940 while (padding_size > 0) {
5942 padding_size -= Assembler::kInstrSize;
5946 last_lazy_deopt_pc_ = masm()->pc_offset();
5950 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5951 last_lazy_deopt_pc_ = masm()->pc_offset();
5952 DCHECK(instr->HasEnvironment());
5953 LEnvironment* env = instr->environment();
5954 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5955 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5959 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5960 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5961 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5962 // needed return address), even though the implementation of LAZY and EAGER is
5963 // now identical. When LAZY is eventually completely folded into EAGER, remove
5964 // the special case below.
5965 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5966 type = Deoptimizer::LAZY;
5969 DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
5973 void LCodeGen::DoDummy(LDummy* instr) {
5974 // Nothing to see here, move on!
5978 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5979 // Nothing to see here, move on!
5983 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5984 PushSafepointRegistersScope scope(this);
5985 LoadContextFromDeferred(instr->context());
5986 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5987 RecordSafepointWithLazyDeopt(
5988 instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5989 DCHECK(instr->HasEnvironment());
5990 LEnvironment* env = instr->environment();
5991 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5995 void LCodeGen::DoStackCheck(LStackCheck* instr) {
5996 class DeferredStackCheck FINAL : public LDeferredCode {
5998 DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5999 : LDeferredCode(codegen), instr_(instr) {}
6000 void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
6001 LInstruction* instr() OVERRIDE { return instr_; }
6004 LStackCheck* instr_;
6007 DCHECK(instr->HasEnvironment());
6008 LEnvironment* env = instr->environment();
6009 // There is no LLazyBailout instruction for stack-checks. We have to
6010 // prepare for lazy deoptimization explicitly here.
6011 if (instr->hydrogen()->is_function_entry()) {
6012 // Perform stack overflow check.
6014 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
6017 DCHECK(instr->context()->IsRegister());
6018 DCHECK(ToRegister(instr->context()).is(cp));
6019 CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
6023 DCHECK(instr->hydrogen()->is_backwards_branch());
6024 // Perform stack overflow check if this goto needs it before jumping.
6025 DeferredStackCheck* deferred_stack_check =
6026 new (zone()) DeferredStackCheck(this, instr);
6027 __ LoadRoot(ip, Heap::kStackLimitRootIndex);
6029 __ blt(deferred_stack_check->entry());
6030 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6031 __ bind(instr->done_label());
6032 deferred_stack_check->SetExit(instr->done_label());
6033 RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6034 // Don't record a deoptimization index for the safepoint here.
6035 // This will be done explicitly when emitting call and the safepoint in
6036 // the deferred code.
6041 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6042 // This is a pseudo-instruction that ensures that the environment here is
6043 // properly registered for deoptimization and records the assembler's PC
6045 LEnvironment* environment = instr->environment();
6047 // If the environment were already registered, we would have no way of
6048 // backpatching it with the spill slot operands.
6049 DCHECK(!environment->HasBeenRegistered());
6050 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6052 GenerateOsrPrologue();
6056 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6057 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
6059 DeoptimizeIf(eq, instr, Deoptimizer::kUndefined);
6061 Register null_value = r8;
6062 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
6063 __ cmp(r3, null_value);
6064 DeoptimizeIf(eq, instr, Deoptimizer::kNull);
6066 __ TestIfSmi(r3, r0);
6067 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
6069 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6070 __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
6071 DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
6073 Label use_cache, call_runtime;
6074 __ CheckEnumCache(null_value, &call_runtime);
6076 __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
6079 // Get the set of properties to enumerate.
6080 __ bind(&call_runtime);
6082 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6084 __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
6085 __ LoadRoot(ip, Heap::kMetaMapRootIndex);
6087 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
6088 __ bind(&use_cache);
6092 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6093 Register map = ToRegister(instr->map());
6094 Register result = ToRegister(instr->result());
6095 Label load_cache, done;
6096 __ EnumLength(result, map);
6097 __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
6098 __ bne(&load_cache);
6099 __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
6102 __ bind(&load_cache);
6103 __ LoadInstanceDescriptors(map, result);
6104 __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
6105 __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
6106 __ cmpi(result, Operand::Zero());
6107 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
6113 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6114 Register object = ToRegister(instr->value());
6115 Register map = ToRegister(instr->map());
6116 __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
6117 __ cmp(map, scratch0());
6118 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
6122 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
6123 Register result, Register object,
6125 PushSafepointRegistersScope scope(this);
6126 __ Push(object, index);
6127 __ li(cp, Operand::Zero());
6128 __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
6129 RecordSafepointWithRegisters(instr->pointer_map(), 2,
6130 Safepoint::kNoLazyDeopt);
6131 __ StoreToSafepointRegisterSlot(r3, result);
6135 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6136 class DeferredLoadMutableDouble FINAL : public LDeferredCode {
6138 DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
6139 Register result, Register object, Register index)
6140 : LDeferredCode(codegen),
6145 void Generate() OVERRIDE {
6146 codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
6148 LInstruction* instr() OVERRIDE { return instr_; }
6151 LLoadFieldByIndex* instr_;
6157 Register object = ToRegister(instr->object());
6158 Register index = ToRegister(instr->index());
6159 Register result = ToRegister(instr->result());
6160 Register scratch = scratch0();
6162 DeferredLoadMutableDouble* deferred;
6163 deferred = new (zone())
6164 DeferredLoadMutableDouble(this, instr, result, object, index);
6166 Label out_of_object, done;
6168 __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
6169 __ bne(deferred->entry(), cr0);
6170 __ ShiftRightArithImm(index, index, 1);
6172 __ cmpi(index, Operand::Zero());
6173 __ blt(&out_of_object);
6175 __ SmiToPtrArrayOffset(r0, index);
6176 __ add(scratch, object, r0);
6177 __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
6181 __ bind(&out_of_object);
6182 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
6183 // Index is equal to negated out of object property index plus 1.
6184 __ SmiToPtrArrayOffset(r0, index);
6185 __ sub(scratch, result, r0);
6187 FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
6188 __ bind(deferred->exit());
6193 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
6194 Register context = ToRegister(instr->context());
6195 __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
6199 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
6200 Handle<ScopeInfo> scope_info = instr->scope_info();
6201 __ Push(scope_info);
6202 __ push(ToRegister(instr->function()));
6203 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6204 RecordSafepoint(Safepoint::kNoLazyDeopt);
6210 } // namespace v8::internal