deps: update v8 to 4.3.61.21
[platform/upstream/nodejs.git] / deps / v8 / src / compiler / ppc / code-generator-ppc.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/ppc/macro-assembler-ppc.h"
11 #include "src/scopes.h"
12
13 namespace v8 {
14 namespace internal {
15 namespace compiler {
16
17 #define __ masm()->
18
19
20 #define kScratchReg r11
21
22
23 // Adds PPC-specific methods to convert InstructionOperands.
24 class PPCOperandConverter FINAL : public InstructionOperandConverter {
25  public:
26   PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
27       : InstructionOperandConverter(gen, instr) {}
28
29   RCBit OutputRCBit() const {
30     switch (instr_->flags_mode()) {
31       case kFlags_branch:
32       case kFlags_set:
33         return SetRC;
34       case kFlags_none:
35         return LeaveRC;
36     }
37     UNREACHABLE();
38     return LeaveRC;
39   }
40
41   bool CompareLogical() const {
42     switch (instr_->flags_condition()) {
43       case kUnsignedLessThan:
44       case kUnsignedGreaterThanOrEqual:
45       case kUnsignedLessThanOrEqual:
46       case kUnsignedGreaterThan:
47         return true;
48       default:
49         return false;
50     }
51     UNREACHABLE();
52     return false;
53   }
54
55   Operand InputImmediate(size_t index) {
56     Constant constant = ToConstant(instr_->InputAt(index));
57     switch (constant.type()) {
58       case Constant::kInt32:
59         return Operand(constant.ToInt32());
60       case Constant::kFloat32:
61         return Operand(
62             isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
63       case Constant::kFloat64:
64         return Operand(
65             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
66       case Constant::kInt64:
67 #if V8_TARGET_ARCH_PPC64
68         return Operand(constant.ToInt64());
69 #endif
70       case Constant::kExternalReference:
71       case Constant::kHeapObject:
72       case Constant::kRpoNumber:
73         break;
74     }
75     UNREACHABLE();
76     return Operand::Zero();
77   }
78
79   MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
80     const size_t index = *first_index;
81     *mode = AddressingModeField::decode(instr_->opcode());
82     switch (*mode) {
83       case kMode_None:
84         break;
85       case kMode_MRI:
86         *first_index += 2;
87         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
88       case kMode_MRR:
89         *first_index += 2;
90         return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
91     }
92     UNREACHABLE();
93     return MemOperand(r0);
94   }
95
96   MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
97     return MemoryOperand(mode, &first_index);
98   }
99
100   MemOperand ToMemOperand(InstructionOperand* op) const {
101     DCHECK(op != NULL);
102     DCHECK(!op->IsRegister());
103     DCHECK(!op->IsDoubleRegister());
104     DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
105     // The linkage computes where all spill slots are located.
106     FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
107     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
108   }
109 };
110
111
112 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
113   return instr->InputAt(index)->IsRegister();
114 }
115
116
117 namespace {
118
119 class OutOfLineLoadNAN32 FINAL : public OutOfLineCode {
120  public:
121   OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
122       : OutOfLineCode(gen), result_(result) {}
123
124   void Generate() FINAL {
125     __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
126                          kScratchReg);
127   }
128
129  private:
130   DoubleRegister const result_;
131 };
132
133
134 class OutOfLineLoadNAN64 FINAL : public OutOfLineCode {
135  public:
136   OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
137       : OutOfLineCode(gen), result_(result) {}
138
139   void Generate() FINAL {
140     __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
141                          kScratchReg);
142   }
143
144  private:
145   DoubleRegister const result_;
146 };
147
148
149 class OutOfLineLoadZero FINAL : public OutOfLineCode {
150  public:
151   OutOfLineLoadZero(CodeGenerator* gen, Register result)
152       : OutOfLineCode(gen), result_(result) {}
153
154   void Generate() FINAL { __ li(result_, Operand::Zero()); }
155
156  private:
157   Register const result_;
158 };
159
160
161 Condition FlagsConditionToCondition(FlagsCondition condition) {
162   switch (condition) {
163     case kEqual:
164       return eq;
165     case kNotEqual:
166       return ne;
167     case kSignedLessThan:
168     case kUnsignedLessThan:
169       return lt;
170     case kSignedGreaterThanOrEqual:
171     case kUnsignedGreaterThanOrEqual:
172       return ge;
173     case kSignedLessThanOrEqual:
174     case kUnsignedLessThanOrEqual:
175       return le;
176     case kSignedGreaterThan:
177     case kUnsignedGreaterThan:
178       return gt;
179     case kOverflow:
180 #if V8_TARGET_ARCH_PPC64
181       return ne;
182 #else
183       return lt;
184 #endif
185     case kNotOverflow:
186 #if V8_TARGET_ARCH_PPC64
187       return eq;
188 #else
189       return ge;
190 #endif
191     case kUnorderedEqual:
192     case kUnorderedNotEqual:
193       break;
194   }
195   UNREACHABLE();
196   return kNoCondition;
197 }
198
199 }  // namespace
200
201 #define ASSEMBLE_FLOAT_UNOP_RC(asm_instr)                            \
202   do {                                                               \
203     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
204                  i.OutputRCBit());                                   \
205   } while (0)
206
207
208 #define ASSEMBLE_FLOAT_BINOP_RC(asm_instr)                           \
209   do {                                                               \
210     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
211                  i.InputDoubleRegister(1), i.OutputRCBit());         \
212   } while (0)
213
214
215 #define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm)           \
216   do {                                                         \
217     if (HasRegisterInput(instr, 1)) {                          \
218       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
219                        i.InputRegister(1));                    \
220     } else {                                                   \
221       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
222                        i.InputImmediate(1));                   \
223     }                                                          \
224   } while (0)
225
226
227 #define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm)        \
228   do {                                                         \
229     if (HasRegisterInput(instr, 1)) {                          \
230       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
231                        i.InputRegister(1), i.OutputRCBit());   \
232     } else {                                                   \
233       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
234                        i.InputImmediate(1), i.OutputRCBit());  \
235     }                                                          \
236   } while (0)
237
238
239 #define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm)    \
240   do {                                                         \
241     if (HasRegisterInput(instr, 1)) {                          \
242       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
243                        i.InputRegister(1), i.OutputRCBit());   \
244     } else {                                                   \
245       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
246                        i.InputInt32(1), i.OutputRCBit());      \
247     }                                                          \
248   } while (0)
249
250
251 #if V8_TARGET_ARCH_PPC64
252 #define ASSEMBLE_ADD_WITH_OVERFLOW()             \
253   do {                                           \
254     ASSEMBLE_BINOP(add, addi);                   \
255     __ TestIfInt32(i.OutputRegister(), r0, cr0); \
256   } while (0)
257 #else
258 #define ASSEMBLE_ADD_WITH_OVERFLOW()                                    \
259   do {                                                                  \
260     if (HasRegisterInput(instr, 1)) {                                   \
261       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
262                                 i.InputRegister(1), kScratchReg, r0);   \
263     } else {                                                            \
264       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
265                                 i.InputInt32(1), kScratchReg, r0);      \
266     }                                                                   \
267   } while (0)
268 #endif
269
270
271 #if V8_TARGET_ARCH_PPC64
272 #define ASSEMBLE_SUB_WITH_OVERFLOW()             \
273   do {                                           \
274     ASSEMBLE_BINOP(sub, subi);                   \
275     __ TestIfInt32(i.OutputRegister(), r0, cr0); \
276   } while (0)
277 #else
278 #define ASSEMBLE_SUB_WITH_OVERFLOW()                                    \
279   do {                                                                  \
280     if (HasRegisterInput(instr, 1)) {                                   \
281       __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
282                                 i.InputRegister(1), kScratchReg, r0);   \
283     } else {                                                            \
284       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
285                                 -i.InputInt32(1), kScratchReg, r0);     \
286     }                                                                   \
287   } while (0)
288 #endif
289
290
291 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                        \
292   do {                                                                 \
293     const CRegister cr = cr0;                                          \
294     if (HasRegisterInput(instr, 1)) {                                  \
295       if (i.CompareLogical()) {                                        \
296         __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr);     \
297       } else {                                                         \
298         __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr);      \
299       }                                                                \
300     } else {                                                           \
301       if (i.CompareLogical()) {                                        \
302         __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
303       } else {                                                         \
304         __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr);  \
305       }                                                                \
306     }                                                                  \
307     DCHECK_EQ(SetRC, i.OutputRCBit());                                 \
308   } while (0)
309
310
311 #define ASSEMBLE_FLOAT_COMPARE(cmp_instr)                                 \
312   do {                                                                    \
313     const CRegister cr = cr0;                                             \
314     __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
315     DCHECK_EQ(SetRC, i.OutputRCBit());                                    \
316   } while (0)
317
318
319 #define ASSEMBLE_MODULO(div_instr, mul_instr)                        \
320   do {                                                               \
321     const Register scratch = kScratchReg;                            \
322     __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1));   \
323     __ mul_instr(scratch, scratch, i.InputRegister(1));              \
324     __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
325            i.OutputRCBit());                                         \
326   } while (0)
327
328
329 #define ASSEMBLE_FLOAT_MODULO()                                               \
330   do {                                                                        \
331     FrameScope scope(masm(), StackFrame::MANUAL);                             \
332     __ PrepareCallCFunction(0, 2, kScratchReg);                               \
333     __ MovToFloatParameters(i.InputDoubleRegister(0),                         \
334                             i.InputDoubleRegister(1));                        \
335     __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
336                      0, 2);                                                   \
337     __ MovFromFloatResult(i.OutputDoubleRegister());                          \
338     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                      \
339   } while (0)
340
341
342 #define ASSEMBLE_FLOAT_MAX(scratch_reg)                                       \
343   do {                                                                        \
344     __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
345     __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(0),  \
346             i.InputDoubleRegister(1));                                        \
347   } while (0)
348
349
350 #define ASSEMBLE_FLOAT_MIN(scratch_reg)                                       \
351   do {                                                                        \
352     __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
353     __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(1),  \
354             i.InputDoubleRegister(0));                                        \
355   } while (0)
356
357
358 #define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx)    \
359   do {                                                \
360     DoubleRegister result = i.OutputDoubleRegister(); \
361     AddressingMode mode = kMode_None;                 \
362     MemOperand operand = i.MemoryOperand(&mode);      \
363     if (mode == kMode_MRI) {                          \
364       __ asm_instr(result, operand);                  \
365     } else {                                          \
366       __ asm_instrx(result, operand);                 \
367     }                                                 \
368     DCHECK_EQ(LeaveRC, i.OutputRCBit());              \
369   } while (0)
370
371
372 #define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
373   do {                                               \
374     Register result = i.OutputRegister();            \
375     AddressingMode mode = kMode_None;                \
376     MemOperand operand = i.MemoryOperand(&mode);     \
377     if (mode == kMode_MRI) {                         \
378       __ asm_instr(result, operand);                 \
379     } else {                                         \
380       __ asm_instrx(result, operand);                \
381     }                                                \
382     DCHECK_EQ(LeaveRC, i.OutputRCBit());             \
383   } while (0)
384
385
386 #define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx)      \
387   do {                                                   \
388     size_t index = 0;                                    \
389     AddressingMode mode = kMode_None;                    \
390     MemOperand operand = i.MemoryOperand(&mode, &index); \
391     DoubleRegister value = i.InputDoubleRegister(index); \
392     if (mode == kMode_MRI) {                             \
393       __ asm_instr(value, operand);                      \
394     } else {                                             \
395       __ asm_instrx(value, operand);                     \
396     }                                                    \
397     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
398   } while (0)
399
400
401 #define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx)    \
402   do {                                                   \
403     size_t index = 0;                                    \
404     AddressingMode mode = kMode_None;                    \
405     MemOperand operand = i.MemoryOperand(&mode, &index); \
406     Register value = i.InputRegister(index);             \
407     if (mode == kMode_MRI) {                             \
408       __ asm_instr(value, operand);                      \
409     } else {                                             \
410       __ asm_instrx(value, operand);                     \
411     }                                                    \
412     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
413   } while (0)
414
415
416 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
417 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width)  \
418   do {                                                             \
419     DoubleRegister result = i.OutputDoubleRegister();              \
420     size_t index = 0;                                              \
421     AddressingMode mode = kMode_None;                              \
422     MemOperand operand = i.MemoryOperand(&mode, index);            \
423     DCHECK_EQ(kMode_MRR, mode);                                    \
424     Register offset = operand.rb();                                \
425     __ extsw(offset, offset);                                      \
426     if (HasRegisterInput(instr, 2)) {                              \
427       __ cmplw(offset, i.InputRegister(2));                        \
428     } else {                                                       \
429       __ cmplwi(offset, i.InputImmediate(2));                      \
430     }                                                              \
431     auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
432     __ bge(ool->entry());                                          \
433     if (mode == kMode_MRI) {                                       \
434       __ asm_instr(result, operand);                               \
435     } else {                                                       \
436       __ asm_instrx(result, operand);                              \
437     }                                                              \
438     __ bind(ool->exit());                                          \
439     DCHECK_EQ(LeaveRC, i.OutputRCBit());                           \
440   } while (0)
441
442
443 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
444 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
445   do {                                                       \
446     Register result = i.OutputRegister();                    \
447     size_t index = 0;                                        \
448     AddressingMode mode = kMode_None;                        \
449     MemOperand operand = i.MemoryOperand(&mode, index);      \
450     DCHECK_EQ(kMode_MRR, mode);                              \
451     Register offset = operand.rb();                          \
452     __ extsw(offset, offset);                                \
453     if (HasRegisterInput(instr, 2)) {                        \
454       __ cmplw(offset, i.InputRegister(2));                  \
455     } else {                                                 \
456       __ cmplwi(offset, i.InputImmediate(2));                \
457     }                                                        \
458     auto ool = new (zone()) OutOfLineLoadZero(this, result); \
459     __ bge(ool->entry());                                    \
460     if (mode == kMode_MRI) {                                 \
461       __ asm_instr(result, operand);                         \
462     } else {                                                 \
463       __ asm_instrx(result, operand);                        \
464     }                                                        \
465     __ bind(ool->exit());                                    \
466     DCHECK_EQ(LeaveRC, i.OutputRCBit());                     \
467   } while (0)
468
469
470 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
471 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr, asm_instrx) \
472   do {                                                      \
473     Label done;                                             \
474     size_t index = 0;                                       \
475     AddressingMode mode = kMode_None;                       \
476     MemOperand operand = i.MemoryOperand(&mode, index);     \
477     DCHECK_EQ(kMode_MRR, mode);                             \
478     Register offset = operand.rb();                         \
479     __ extsw(offset, offset);                               \
480     if (HasRegisterInput(instr, 2)) {                       \
481       __ cmplw(offset, i.InputRegister(2));                 \
482     } else {                                                \
483       __ cmplwi(offset, i.InputImmediate(2));               \
484     }                                                       \
485     __ bge(&done);                                          \
486     DoubleRegister value = i.InputDoubleRegister(3);        \
487     if (mode == kMode_MRI) {                                \
488       __ asm_instr(value, operand);                         \
489     } else {                                                \
490       __ asm_instrx(value, operand);                        \
491     }                                                       \
492     __ bind(&done);                                         \
493     DCHECK_EQ(LeaveRC, i.OutputRCBit());                    \
494   } while (0)
495
496
497 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
498 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
499   do {                                                        \
500     Label done;                                               \
501     size_t index = 0;                                         \
502     AddressingMode mode = kMode_None;                         \
503     MemOperand operand = i.MemoryOperand(&mode, index);       \
504     DCHECK_EQ(kMode_MRR, mode);                               \
505     Register offset = operand.rb();                           \
506     __ extsw(offset, offset);                                 \
507     if (HasRegisterInput(instr, 2)) {                         \
508       __ cmplw(offset, i.InputRegister(2));                   \
509     } else {                                                  \
510       __ cmplwi(offset, i.InputImmediate(2));                 \
511     }                                                         \
512     __ bge(&done);                                            \
513     Register value = i.InputRegister(3);                      \
514     if (mode == kMode_MRI) {                                  \
515       __ asm_instr(value, operand);                           \
516     } else {                                                  \
517       __ asm_instrx(value, operand);                          \
518     }                                                         \
519     __ bind(&done);                                           \
520     DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
521   } while (0)
522
523
524 #define ASSEMBLE_STORE_WRITE_BARRIER()                                         \
525   do {                                                                         \
526     Register object = i.InputRegister(0);                                      \
527     Register index = i.InputRegister(1);                                       \
528     Register value = i.InputRegister(2);                                       \
529     __ add(index, object, index);                                              \
530     __ StoreP(value, MemOperand(index));                                       \
531     SaveFPRegsMode mode =                                                      \
532         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; \
533     LinkRegisterStatus lr_status = kLRHasNotBeenSaved;                         \
534     __ RecordWrite(object, index, value, lr_status, mode);                     \
535     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                       \
536   } while (0)
537
538
539 // Assembles an instruction after register allocation, producing machine code.
540 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
541   PPCOperandConverter i(this, instr);
542   ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
543
544   switch (opcode) {
545     case kArchCallCodeObject: {
546       EnsureSpaceForLazyDeopt();
547       if (HasRegisterInput(instr, 0)) {
548         __ addi(ip, i.InputRegister(0),
549                 Operand(Code::kHeaderSize - kHeapObjectTag));
550         __ Call(ip);
551       } else {
552         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
553                 RelocInfo::CODE_TARGET);
554       }
555       RecordCallPosition(instr);
556       DCHECK_EQ(LeaveRC, i.OutputRCBit());
557       break;
558     }
559     case kArchCallJSFunction: {
560       EnsureSpaceForLazyDeopt();
561       Register func = i.InputRegister(0);
562       if (FLAG_debug_code) {
563         // Check the function's context matches the context argument.
564         __ LoadP(kScratchReg,
565                  FieldMemOperand(func, JSFunction::kContextOffset));
566         __ cmp(cp, kScratchReg);
567         __ Assert(eq, kWrongFunctionContext);
568       }
569       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
570       __ Call(ip);
571       RecordCallPosition(instr);
572       DCHECK_EQ(LeaveRC, i.OutputRCBit());
573       break;
574     }
575     case kArchJmp:
576       AssembleArchJump(i.InputRpo(0));
577       DCHECK_EQ(LeaveRC, i.OutputRCBit());
578       break;
579     case kArchLookupSwitch:
580       AssembleArchLookupSwitch(instr);
581       DCHECK_EQ(LeaveRC, i.OutputRCBit());
582       break;
583     case kArchTableSwitch:
584       AssembleArchTableSwitch(instr);
585       DCHECK_EQ(LeaveRC, i.OutputRCBit());
586       break;
587     case kArchNop:
588       // don't emit code for nops.
589       DCHECK_EQ(LeaveRC, i.OutputRCBit());
590       break;
591     case kArchDeoptimize: {
592       int deopt_state_id =
593           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
594       AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
595       break;
596     }
597     case kArchRet:
598       AssembleReturn();
599       DCHECK_EQ(LeaveRC, i.OutputRCBit());
600       break;
601     case kArchStackPointer:
602       __ mr(i.OutputRegister(), sp);
603       DCHECK_EQ(LeaveRC, i.OutputRCBit());
604       break;
605     case kArchTruncateDoubleToI:
606       // TODO(mbrandy): move slow call to stub out of line.
607       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
608       DCHECK_EQ(LeaveRC, i.OutputRCBit());
609       break;
610     case kPPC_And32:
611     case kPPC_And64:
612       if (HasRegisterInput(instr, 1)) {
613         __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
614                 i.OutputRCBit());
615       } else {
616         __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
617       }
618       break;
619     case kPPC_AndComplement32:
620     case kPPC_AndComplement64:
621       __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
622               i.OutputRCBit());
623       break;
624     case kPPC_Or32:
625     case kPPC_Or64:
626       if (HasRegisterInput(instr, 1)) {
627         __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
628                i.OutputRCBit());
629       } else {
630         __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
631         DCHECK_EQ(LeaveRC, i.OutputRCBit());
632       }
633       break;
634     case kPPC_OrComplement32:
635     case kPPC_OrComplement64:
636       __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
637              i.OutputRCBit());
638       break;
639     case kPPC_Xor32:
640     case kPPC_Xor64:
641       if (HasRegisterInput(instr, 1)) {
642         __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
643                 i.OutputRCBit());
644       } else {
645         __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
646         DCHECK_EQ(LeaveRC, i.OutputRCBit());
647       }
648       break;
649     case kPPC_ShiftLeft32:
650       ASSEMBLE_BINOP_RC(slw, slwi);
651       break;
652 #if V8_TARGET_ARCH_PPC64
653     case kPPC_ShiftLeft64:
654       ASSEMBLE_BINOP_RC(sld, sldi);
655       break;
656 #endif
657     case kPPC_ShiftRight32:
658       ASSEMBLE_BINOP_RC(srw, srwi);
659       break;
660 #if V8_TARGET_ARCH_PPC64
661     case kPPC_ShiftRight64:
662       ASSEMBLE_BINOP_RC(srd, srdi);
663       break;
664 #endif
665     case kPPC_ShiftRightAlg32:
666       ASSEMBLE_BINOP_INT_RC(sraw, srawi);
667       break;
668 #if V8_TARGET_ARCH_PPC64
669     case kPPC_ShiftRightAlg64:
670       ASSEMBLE_BINOP_INT_RC(srad, sradi);
671       break;
672 #endif
673     case kPPC_RotRight32:
674       if (HasRegisterInput(instr, 1)) {
675         __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
676         __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
677                  i.OutputRCBit());
678       } else {
679         int sh = i.InputInt32(1);
680         __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
681       }
682       break;
683 #if V8_TARGET_ARCH_PPC64
684     case kPPC_RotRight64:
685       if (HasRegisterInput(instr, 1)) {
686         __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
687         __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
688                  i.OutputRCBit());
689       } else {
690         int sh = i.InputInt32(1);
691         __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
692       }
693       break;
694 #endif
695     case kPPC_Not32:
696     case kPPC_Not64:
697       __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
698       break;
699     case kPPC_RotLeftAndMask32:
700       __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
701                 31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
702       break;
703 #if V8_TARGET_ARCH_PPC64
704     case kPPC_RotLeftAndClear64:
705       __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
706                63 - i.InputInt32(2), i.OutputRCBit());
707       break;
708     case kPPC_RotLeftAndClearLeft64:
709       __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
710                 63 - i.InputInt32(2), i.OutputRCBit());
711       break;
712     case kPPC_RotLeftAndClearRight64:
713       __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
714                 63 - i.InputInt32(2), i.OutputRCBit());
715       break;
716 #endif
717     case kPPC_Add32:
718     case kPPC_Add64:
719       if (HasRegisterInput(instr, 1)) {
720         __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
721                LeaveOE, i.OutputRCBit());
722       } else {
723         __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
724         DCHECK_EQ(LeaveRC, i.OutputRCBit());
725       }
726       break;
727     case kPPC_AddWithOverflow32:
728       ASSEMBLE_ADD_WITH_OVERFLOW();
729       break;
730     case kPPC_AddFloat64:
731       ASSEMBLE_FLOAT_BINOP_RC(fadd);
732       break;
733     case kPPC_Sub32:
734     case kPPC_Sub64:
735       if (HasRegisterInput(instr, 1)) {
736         __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
737                LeaveOE, i.OutputRCBit());
738       } else {
739         __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
740         DCHECK_EQ(LeaveRC, i.OutputRCBit());
741       }
742       break;
743     case kPPC_SubWithOverflow32:
744       ASSEMBLE_SUB_WITH_OVERFLOW();
745       break;
746     case kPPC_SubFloat64:
747       ASSEMBLE_FLOAT_BINOP_RC(fsub);
748       break;
749     case kPPC_Mul32:
750       __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
751                LeaveOE, i.OutputRCBit());
752       break;
753 #if V8_TARGET_ARCH_PPC64
754     case kPPC_Mul64:
755       __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
756                LeaveOE, i.OutputRCBit());
757       break;
758 #endif
759     case kPPC_MulHigh32:
760       __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
761                i.OutputRCBit());
762       break;
763     case kPPC_MulHighU32:
764       __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
765                 i.OutputRCBit());
766       break;
767     case kPPC_MulFloat64:
768       ASSEMBLE_FLOAT_BINOP_RC(fmul);
769       break;
770     case kPPC_Div32:
771       __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
772       DCHECK_EQ(LeaveRC, i.OutputRCBit());
773       break;
774 #if V8_TARGET_ARCH_PPC64
775     case kPPC_Div64:
776       __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
777       DCHECK_EQ(LeaveRC, i.OutputRCBit());
778       break;
779 #endif
780     case kPPC_DivU32:
781       __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
782       DCHECK_EQ(LeaveRC, i.OutputRCBit());
783       break;
784 #if V8_TARGET_ARCH_PPC64
785     case kPPC_DivU64:
786       __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
787       DCHECK_EQ(LeaveRC, i.OutputRCBit());
788       break;
789 #endif
790     case kPPC_DivFloat64:
791       ASSEMBLE_FLOAT_BINOP_RC(fdiv);
792       break;
793     case kPPC_Mod32:
794       ASSEMBLE_MODULO(divw, mullw);
795       break;
796 #if V8_TARGET_ARCH_PPC64
797     case kPPC_Mod64:
798       ASSEMBLE_MODULO(divd, mulld);
799       break;
800 #endif
801     case kPPC_ModU32:
802       ASSEMBLE_MODULO(divwu, mullw);
803       break;
804 #if V8_TARGET_ARCH_PPC64
805     case kPPC_ModU64:
806       ASSEMBLE_MODULO(divdu, mulld);
807       break;
808 #endif
809     case kPPC_ModFloat64:
810       // TODO(bmeurer): We should really get rid of this special instruction,
811       // and generate a CallAddress instruction instead.
812       ASSEMBLE_FLOAT_MODULO();
813       break;
814     case kPPC_Neg32:
815     case kPPC_Neg64:
816       __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
817       break;
818     case kPPC_MaxFloat64:
819       ASSEMBLE_FLOAT_MAX(kScratchDoubleReg);
820       break;
821     case kPPC_MinFloat64:
822       ASSEMBLE_FLOAT_MIN(kScratchDoubleReg);
823       break;
824     case kPPC_SqrtFloat64:
825       ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
826       break;
827     case kPPC_FloorFloat64:
828       ASSEMBLE_FLOAT_UNOP_RC(frim);
829       break;
830     case kPPC_CeilFloat64:
831       ASSEMBLE_FLOAT_UNOP_RC(frip);
832       break;
833     case kPPC_TruncateFloat64:
834       ASSEMBLE_FLOAT_UNOP_RC(friz);
835       break;
836     case kPPC_RoundFloat64:
837       ASSEMBLE_FLOAT_UNOP_RC(frin);
838       break;
839     case kPPC_NegFloat64:
840       ASSEMBLE_FLOAT_UNOP_RC(fneg);
841       break;
842     case kPPC_Cntlz32:
843       __ cntlzw_(i.OutputRegister(), i.InputRegister(0));
844       DCHECK_EQ(LeaveRC, i.OutputRCBit());
845       break;
846     case kPPC_Cmp32:
847       ASSEMBLE_COMPARE(cmpw, cmplw);
848       break;
849 #if V8_TARGET_ARCH_PPC64
850     case kPPC_Cmp64:
851       ASSEMBLE_COMPARE(cmp, cmpl);
852       break;
853 #endif
854     case kPPC_CmpFloat64:
855       ASSEMBLE_FLOAT_COMPARE(fcmpu);
856       break;
857     case kPPC_Tst32:
858       if (HasRegisterInput(instr, 1)) {
859         __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
860       } else {
861         __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
862       }
863 #if V8_TARGET_ARCH_PPC64
864       __ extsw(r0, r0, i.OutputRCBit());
865 #endif
866       DCHECK_EQ(SetRC, i.OutputRCBit());
867       break;
868 #if V8_TARGET_ARCH_PPC64
869     case kPPC_Tst64:
870       if (HasRegisterInput(instr, 1)) {
871         __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
872       } else {
873         __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
874       }
875       DCHECK_EQ(SetRC, i.OutputRCBit());
876       break;
877 #endif
878     case kPPC_Push:
879       __ Push(i.InputRegister(0));
880       DCHECK_EQ(LeaveRC, i.OutputRCBit());
881       break;
882     case kPPC_ExtendSignWord8:
883       __ extsb(i.OutputRegister(), i.InputRegister(0));
884       DCHECK_EQ(LeaveRC, i.OutputRCBit());
885       break;
886     case kPPC_ExtendSignWord16:
887       __ extsh(i.OutputRegister(), i.InputRegister(0));
888       DCHECK_EQ(LeaveRC, i.OutputRCBit());
889       break;
890 #if V8_TARGET_ARCH_PPC64
891     case kPPC_ExtendSignWord32:
892       __ extsw(i.OutputRegister(), i.InputRegister(0));
893       DCHECK_EQ(LeaveRC, i.OutputRCBit());
894       break;
895     case kPPC_Uint32ToUint64:
896       // Zero extend
897       __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
898       DCHECK_EQ(LeaveRC, i.OutputRCBit());
899       break;
900     case kPPC_Int64ToInt32:
901       // TODO(mbrandy): sign extend?
902       __ Move(i.OutputRegister(), i.InputRegister(0));
903       DCHECK_EQ(LeaveRC, i.OutputRCBit());
904       break;
905 #endif
906     case kPPC_Int32ToFloat64:
907       __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
908       DCHECK_EQ(LeaveRC, i.OutputRCBit());
909       break;
910     case kPPC_Uint32ToFloat64:
911       __ ConvertUnsignedIntToDouble(i.InputRegister(0),
912                                     i.OutputDoubleRegister());
913       DCHECK_EQ(LeaveRC, i.OutputRCBit());
914       break;
915     case kPPC_Float64ToInt32:
916     case kPPC_Float64ToUint32:
917       __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
918 #if !V8_TARGET_ARCH_PPC64
919                               kScratchReg,
920 #endif
921                               i.OutputRegister(), kScratchDoubleReg);
922       DCHECK_EQ(LeaveRC, i.OutputRCBit());
923       break;
924     case kPPC_Float64ToFloat32:
925       ASSEMBLE_FLOAT_UNOP_RC(frsp);
926       break;
927     case kPPC_Float32ToFloat64:
928       // Nothing to do.
929       __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
930       DCHECK_EQ(LeaveRC, i.OutputRCBit());
931       break;
932     case kPPC_Float64ExtractLowWord32:
933       __ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
934       DCHECK_EQ(LeaveRC, i.OutputRCBit());
935       break;
936     case kPPC_Float64ExtractHighWord32:
937       __ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
938       DCHECK_EQ(LeaveRC, i.OutputRCBit());
939       break;
940     case kPPC_Float64InsertLowWord32:
941       __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
942       DCHECK_EQ(LeaveRC, i.OutputRCBit());
943       break;
944     case kPPC_Float64InsertHighWord32:
945       __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
946       DCHECK_EQ(LeaveRC, i.OutputRCBit());
947       break;
948     case kPPC_Float64Construct:
949 #if V8_TARGET_ARCH_PPC64
950       __ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
951                                     i.InputRegister(0), i.InputRegister(1), r0);
952 #else
953       __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0),
954                           i.InputRegister(1));
955 #endif
956       DCHECK_EQ(LeaveRC, i.OutputRCBit());
957       break;
958     case kPPC_LoadWordU8:
959       ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
960       break;
961     case kPPC_LoadWordS8:
962       ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
963       __ extsb(i.OutputRegister(), i.OutputRegister());
964       break;
965     case kPPC_LoadWordU16:
966       ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
967       break;
968     case kPPC_LoadWordS16:
969       ASSEMBLE_LOAD_INTEGER(lha, lhax);
970       break;
971     case kPPC_LoadWordS32:
972       ASSEMBLE_LOAD_INTEGER(lwa, lwax);
973       break;
974 #if V8_TARGET_ARCH_PPC64
975     case kPPC_LoadWord64:
976       ASSEMBLE_LOAD_INTEGER(ld, ldx);
977       break;
978 #endif
979     case kPPC_LoadFloat32:
980       ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
981       break;
982     case kPPC_LoadFloat64:
983       ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
984       break;
985     case kPPC_StoreWord8:
986       ASSEMBLE_STORE_INTEGER(stb, stbx);
987       break;
988     case kPPC_StoreWord16:
989       ASSEMBLE_STORE_INTEGER(sth, sthx);
990       break;
991     case kPPC_StoreWord32:
992       ASSEMBLE_STORE_INTEGER(stw, stwx);
993       break;
994 #if V8_TARGET_ARCH_PPC64
995     case kPPC_StoreWord64:
996       ASSEMBLE_STORE_INTEGER(std, stdx);
997       break;
998 #endif
999     case kPPC_StoreFloat32:
1000       ASSEMBLE_STORE_FLOAT(stfs, stfsx);
1001       break;
1002     case kPPC_StoreFloat64:
1003       ASSEMBLE_STORE_FLOAT(stfd, stfdx);
1004       break;
1005     case kPPC_StoreWriteBarrier:
1006       ASSEMBLE_STORE_WRITE_BARRIER();
1007       break;
1008     case kCheckedLoadInt8:
1009       ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
1010       __ extsb(i.OutputRegister(), i.OutputRegister());
1011       break;
1012     case kCheckedLoadUint8:
1013       ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
1014       break;
1015     case kCheckedLoadInt16:
1016       ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
1017       break;
1018     case kCheckedLoadUint16:
1019       ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
1020       break;
1021     case kCheckedLoadWord32:
1022       ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
1023       break;
1024     case kCheckedLoadFloat32:
1025       ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
1026       break;
1027     case kCheckedLoadFloat64:
1028       ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
1029       break;
1030     case kCheckedStoreWord8:
1031       ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
1032       break;
1033     case kCheckedStoreWord16:
1034       ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
1035       break;
1036     case kCheckedStoreWord32:
1037       ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
1038       break;
1039     case kCheckedStoreFloat32:
1040       ASSEMBLE_CHECKED_STORE_FLOAT(stfs, stfsx);
1041       break;
1042     case kCheckedStoreFloat64:
1043       ASSEMBLE_CHECKED_STORE_FLOAT(stfd, stfdx);
1044       break;
1045     default:
1046       UNREACHABLE();
1047       break;
1048   }
1049 }
1050
1051
1052 // Assembles branches after an instruction.
1053 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
1054   PPCOperandConverter i(this, instr);
1055   Label* tlabel = branch->true_label;
1056   Label* flabel = branch->false_label;
1057   ArchOpcode op = instr->arch_opcode();
1058   FlagsCondition condition = branch->condition;
1059   CRegister cr = cr0;
1060
1061   // Overflow checked for add/sub only.
1062   DCHECK((condition != kOverflow && condition != kNotOverflow) ||
1063          (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
1064
1065   Condition cond = FlagsConditionToCondition(condition);
1066   if (op == kPPC_CmpFloat64) {
1067     // check for unordered if necessary
1068     if (cond == le) {
1069       __ bunordered(flabel, cr);
1070       // Unnecessary for eq/lt since only FU bit will be set.
1071     } else if (cond == gt) {
1072       __ bunordered(tlabel, cr);
1073       // Unnecessary for ne/ge since only FU bit will be set.
1074     }
1075   }
1076   __ b(cond, tlabel, cr);
1077   if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
1078 }
1079
1080
1081 void CodeGenerator::AssembleArchJump(RpoNumber target) {
1082   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
1083 }
1084
1085
1086 // Assembles boolean materializations after an instruction.
1087 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1088                                         FlagsCondition condition) {
1089   PPCOperandConverter i(this, instr);
1090   Label done;
1091   ArchOpcode op = instr->arch_opcode();
1092   bool check_unordered = (op == kPPC_CmpFloat64);
1093   CRegister cr = cr0;
1094
1095   // Overflow checked for add/sub only.
1096   DCHECK((condition != kOverflow && condition != kNotOverflow) ||
1097          (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
1098
1099   // Materialize a full 32-bit 1 or 0 value. The result register is always the
1100   // last output of the instruction.
1101   DCHECK_NE(0u, instr->OutputCount());
1102   Register reg = i.OutputRegister(instr->OutputCount() - 1);
1103
1104   Condition cond = FlagsConditionToCondition(condition);
1105   switch (cond) {
1106     case eq:
1107     case lt:
1108       __ li(reg, Operand::Zero());
1109       __ li(kScratchReg, Operand(1));
1110       __ isel(cond, reg, kScratchReg, reg, cr);
1111       break;
1112     case ne:
1113     case ge:
1114       __ li(reg, Operand(1));
1115       __ isel(NegateCondition(cond), reg, r0, reg, cr);
1116       break;
1117     case gt:
1118       if (check_unordered) {
1119         __ li(reg, Operand(1));
1120         __ li(kScratchReg, Operand::Zero());
1121         __ bunordered(&done, cr);
1122         __ isel(cond, reg, reg, kScratchReg, cr);
1123       } else {
1124         __ li(reg, Operand::Zero());
1125         __ li(kScratchReg, Operand(1));
1126         __ isel(cond, reg, kScratchReg, reg, cr);
1127       }
1128       break;
1129     case le:
1130       if (check_unordered) {
1131         __ li(reg, Operand::Zero());
1132         __ li(kScratchReg, Operand(1));
1133         __ bunordered(&done, cr);
1134         __ isel(NegateCondition(cond), reg, r0, kScratchReg, cr);
1135       } else {
1136         __ li(reg, Operand(1));
1137         __ isel(NegateCondition(cond), reg, r0, reg, cr);
1138       }
1139       break;
1140     default:
1141       UNREACHABLE();
1142       break;
1143   }
1144   __ bind(&done);
1145 }
1146
1147
1148 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
1149   PPCOperandConverter i(this, instr);
1150   Register input = i.InputRegister(0);
1151   for (size_t index = 2; index < instr->InputCount(); index += 2) {
1152     __ Cmpi(input, Operand(i.InputInt32(index + 0)), r0);
1153     __ beq(GetLabel(i.InputRpo(index + 1)));
1154   }
1155   AssembleArchJump(i.InputRpo(1));
1156 }
1157
1158
1159 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
1160   PPCOperandConverter i(this, instr);
1161   Register input = i.InputRegister(0);
1162   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
1163   Label** cases = zone()->NewArray<Label*>(case_count);
1164   for (int32_t index = 0; index < case_count; ++index) {
1165     cases[index] = GetLabel(i.InputRpo(index + 2));
1166   }
1167   Label* const table = AddJumpTable(cases, case_count);
1168   __ Cmpli(input, Operand(case_count), r0);
1169   __ bge(GetLabel(i.InputRpo(1)));
1170   __ mov_label_addr(kScratchReg, table);
1171   __ ShiftLeftImm(r0, input, Operand(kPointerSizeLog2));
1172   __ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
1173   __ Jump(kScratchReg);
1174 }
1175
1176
1177 void CodeGenerator::AssembleDeoptimizerCall(
1178     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
1179   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1180       isolate(), deoptimization_id, bailout_type);
1181   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1182 }
1183
1184
1185 void CodeGenerator::AssemblePrologue() {
1186   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1187   int stack_slots = frame()->GetSpillSlotCount();
1188   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1189     __ function_descriptor();
1190     int register_save_area_size = 0;
1191     RegList frame_saves = fp.bit();
1192     __ mflr(r0);
1193     __ Push(r0, fp);
1194     __ mr(fp, sp);
1195     // Save callee-saved registers.
1196     const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
1197     for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1198       if (!((1 << i) & saves)) continue;
1199       register_save_area_size += kPointerSize;
1200     }
1201     frame()->SetRegisterSaveAreaSize(register_save_area_size);
1202     __ MultiPush(saves);
1203   } else if (descriptor->IsJSFunctionCall()) {
1204     CompilationInfo* info = this->info();
1205     __ Prologue(info->IsCodePreAgingActive());
1206     frame()->SetRegisterSaveAreaSize(
1207         StandardFrameConstants::kFixedFrameSizeFromFp);
1208   } else if (stack_slots > 0) {
1209     __ StubPrologue();
1210     frame()->SetRegisterSaveAreaSize(
1211         StandardFrameConstants::kFixedFrameSizeFromFp);
1212   }
1213
1214   if (info()->is_osr()) {
1215     // TurboFan OSR-compiled functions cannot be entered directly.
1216     __ Abort(kShouldNotDirectlyEnterOsrFunction);
1217
1218     // Unoptimized code jumps directly to this entrypoint while the unoptimized
1219     // frame is still on the stack. Optimized code uses OSR values directly from
1220     // the unoptimized frame. Thus, all that needs to be done is to allocate the
1221     // remaining stack slots.
1222     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1223     osr_pc_offset_ = __ pc_offset();
1224     // TODO(titzer): cannot address target function == local #-1
1225     __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
1226     DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
1227     stack_slots -= frame()->GetOsrStackSlotCount();
1228   }
1229
1230   if (stack_slots > 0) {
1231     __ Add(sp, sp, -stack_slots * kPointerSize, r0);
1232   }
1233 }
1234
1235
1236 void CodeGenerator::AssembleReturn() {
1237   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1238   int stack_slots = frame()->GetSpillSlotCount();
1239   if (descriptor->kind() == CallDescriptor::kCallAddress) {
1240     if (frame()->GetRegisterSaveAreaSize() > 0) {
1241       // Remove this frame's spill slots first.
1242       if (stack_slots > 0) {
1243         __ Add(sp, sp, stack_slots * kPointerSize, r0);
1244       }
1245       // Restore registers.
1246       RegList frame_saves = fp.bit();
1247       const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
1248       if (saves != 0) {
1249         __ MultiPop(saves);
1250       }
1251     }
1252     __ LeaveFrame(StackFrame::MANUAL);
1253     __ Ret();
1254   } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
1255     int pop_count = descriptor->IsJSFunctionCall()
1256                         ? static_cast<int>(descriptor->JSParameterCount())
1257                         : 0;
1258     __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
1259     __ Ret();
1260   } else {
1261     __ Ret();
1262   }
1263 }
1264
1265
1266 void CodeGenerator::AssembleMove(InstructionOperand* source,
1267                                  InstructionOperand* destination) {
1268   PPCOperandConverter g(this, NULL);
1269   // Dispatch on the source and destination operand kinds.  Not all
1270   // combinations are possible.
1271   if (source->IsRegister()) {
1272     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1273     Register src = g.ToRegister(source);
1274     if (destination->IsRegister()) {
1275       __ Move(g.ToRegister(destination), src);
1276     } else {
1277       __ StoreP(src, g.ToMemOperand(destination), r0);
1278     }
1279   } else if (source->IsStackSlot()) {
1280     DCHECK(destination->IsRegister() || destination->IsStackSlot());
1281     MemOperand src = g.ToMemOperand(source);
1282     if (destination->IsRegister()) {
1283       __ LoadP(g.ToRegister(destination), src, r0);
1284     } else {
1285       Register temp = kScratchReg;
1286       __ LoadP(temp, src, r0);
1287       __ StoreP(temp, g.ToMemOperand(destination), r0);
1288     }
1289   } else if (source->IsConstant()) {
1290     Constant src = g.ToConstant(source);
1291     if (destination->IsRegister() || destination->IsStackSlot()) {
1292       Register dst =
1293           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1294       switch (src.type()) {
1295         case Constant::kInt32:
1296           __ mov(dst, Operand(src.ToInt32()));
1297           break;
1298         case Constant::kInt64:
1299           __ mov(dst, Operand(src.ToInt64()));
1300           break;
1301         case Constant::kFloat32:
1302           __ Move(dst,
1303                   isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1304           break;
1305         case Constant::kFloat64:
1306           __ Move(dst,
1307                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1308           break;
1309         case Constant::kExternalReference:
1310           __ mov(dst, Operand(src.ToExternalReference()));
1311           break;
1312         case Constant::kHeapObject:
1313           __ Move(dst, src.ToHeapObject());
1314           break;
1315         case Constant::kRpoNumber:
1316           UNREACHABLE();  // TODO(dcarney): loading RPO constants on PPC.
1317           break;
1318       }
1319       if (destination->IsStackSlot()) {
1320         __ StoreP(dst, g.ToMemOperand(destination), r0);
1321       }
1322     } else {
1323       DoubleRegister dst = destination->IsDoubleRegister()
1324                                ? g.ToDoubleRegister(destination)
1325                                : kScratchDoubleReg;
1326       double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
1327                                                         : src.ToFloat64();
1328       __ LoadDoubleLiteral(dst, value, kScratchReg);
1329       if (destination->IsDoubleStackSlot()) {
1330         __ StoreDouble(dst, g.ToMemOperand(destination), r0);
1331       }
1332     }
1333   } else if (source->IsDoubleRegister()) {
1334     DoubleRegister src = g.ToDoubleRegister(source);
1335     if (destination->IsDoubleRegister()) {
1336       DoubleRegister dst = g.ToDoubleRegister(destination);
1337       __ Move(dst, src);
1338     } else {
1339       DCHECK(destination->IsDoubleStackSlot());
1340       __ StoreDouble(src, g.ToMemOperand(destination), r0);
1341     }
1342   } else if (source->IsDoubleStackSlot()) {
1343     DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1344     MemOperand src = g.ToMemOperand(source);
1345     if (destination->IsDoubleRegister()) {
1346       __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
1347     } else {
1348       DoubleRegister temp = kScratchDoubleReg;
1349       __ LoadDouble(temp, src, r0);
1350       __ StoreDouble(temp, g.ToMemOperand(destination), r0);
1351     }
1352   } else {
1353     UNREACHABLE();
1354   }
1355 }
1356
1357
1358 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1359                                  InstructionOperand* destination) {
1360   PPCOperandConverter g(this, NULL);
1361   // Dispatch on the source and destination operand kinds.  Not all
1362   // combinations are possible.
1363   if (source->IsRegister()) {
1364     // Register-register.
1365     Register temp = kScratchReg;
1366     Register src = g.ToRegister(source);
1367     if (destination->IsRegister()) {
1368       Register dst = g.ToRegister(destination);
1369       __ mr(temp, src);
1370       __ mr(src, dst);
1371       __ mr(dst, temp);
1372     } else {
1373       DCHECK(destination->IsStackSlot());
1374       MemOperand dst = g.ToMemOperand(destination);
1375       __ mr(temp, src);
1376       __ LoadP(src, dst);
1377       __ StoreP(temp, dst);
1378     }
1379 #if V8_TARGET_ARCH_PPC64
1380   } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
1381 #else
1382   } else if (source->IsStackSlot()) {
1383     DCHECK(destination->IsStackSlot());
1384 #endif
1385     Register temp_0 = kScratchReg;
1386     Register temp_1 = r0;
1387     MemOperand src = g.ToMemOperand(source);
1388     MemOperand dst = g.ToMemOperand(destination);
1389     __ LoadP(temp_0, src);
1390     __ LoadP(temp_1, dst);
1391     __ StoreP(temp_0, dst);
1392     __ StoreP(temp_1, src);
1393   } else if (source->IsDoubleRegister()) {
1394     DoubleRegister temp = kScratchDoubleReg;
1395     DoubleRegister src = g.ToDoubleRegister(source);
1396     if (destination->IsDoubleRegister()) {
1397       DoubleRegister dst = g.ToDoubleRegister(destination);
1398       __ fmr(temp, src);
1399       __ fmr(src, dst);
1400       __ fmr(dst, temp);
1401     } else {
1402       DCHECK(destination->IsDoubleStackSlot());
1403       MemOperand dst = g.ToMemOperand(destination);
1404       __ fmr(temp, src);
1405       __ lfd(src, dst);
1406       __ stfd(temp, dst);
1407     }
1408 #if !V8_TARGET_ARCH_PPC64
1409   } else if (source->IsDoubleStackSlot()) {
1410     DCHECK(destination->IsDoubleStackSlot());
1411     DoubleRegister temp_0 = kScratchDoubleReg;
1412     DoubleRegister temp_1 = d0;
1413     MemOperand src = g.ToMemOperand(source);
1414     MemOperand dst = g.ToMemOperand(destination);
1415     __ lfd(temp_0, src);
1416     __ lfd(temp_1, dst);
1417     __ stfd(temp_0, dst);
1418     __ stfd(temp_1, src);
1419 #endif
1420   } else {
1421     // No other combinations are possible.
1422     UNREACHABLE();
1423   }
1424 }
1425
1426
1427 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
1428   for (size_t index = 0; index < target_count; ++index) {
1429     __ emit_label_addr(targets[index]);
1430   }
1431 }
1432
1433
1434 void CodeGenerator::AddNopForSmiCodeInlining() {
1435   // We do not insert nops for inlined Smi code.
1436 }
1437
1438
1439 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1440   int space_needed = Deoptimizer::patch_size();
1441   if (!info()->IsStub()) {
1442     // Ensure that we have enough space after the previous lazy-bailout
1443     // instruction for patching the code here.
1444     int current_pc = masm()->pc_offset();
1445     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1446       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1447       DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1448       while (padding_size > 0) {
1449         __ nop();
1450         padding_size -= v8::internal::Assembler::kInstrSize;
1451       }
1452     }
1453   }
1454   MarkLazyDeoptSite();
1455 }
1456
1457 #undef __
1458
1459 }  // namespace compiler
1460 }  // namespace internal
1461 }  // namespace v8