PPC: [turbofan] Add backend support for float32 operations.
authormichael_dawson <michael_dawson@ca.ibm.com>
Tue, 31 Mar 2015 14:54:29 +0000 (07:54 -0700)
committerCommit bot <commit-bot@chromium.org>
Tue, 31 Mar 2015 14:54:37 +0000 (14:54 +0000)
Port 8dad78cdbd21c2cd02d6e0645313bd4b9983c78e

Original commit message:
This adds the basics necessary to support float32 operations in TurboFan.
The actual functionality required to detect safe float32 operations will
be added based on this later. Therefore this does not affect production
code except for some cleanup/refactoring.

In detail, this patchset contains the following features:
- Add support for float32 operations to arm, arm64, ia32 and x64
  backends.
- Add float32 machine operators.
- Add support for float32 constants to simplified lowering.
- Handle float32 representation for phis in simplified lowering.

In addition, contains the following (related) cleanups:
- Fix/unify naming of backend instructions.
- Use AVX comparisons when available.
- Extend ArchOpcodeField to 9 bits (required for arm64).
- Refactor some code duplication in instruction selectors.

BUG=v8:3589
LOG=N

R=mbrandy@us.ibm.com

Review URL: https://codereview.chromium.org/1049253004

Cr-Commit-Position: refs/heads/master@{#27547}

src/compiler/ppc/code-generator-ppc.cc
src/compiler/ppc/instruction-codes-ppc.h
src/compiler/ppc/instruction-selector-ppc.cc

index c619833..44a82c3 100644 (file)
@@ -383,16 +383,32 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
   } while (0)
 
 
-#define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx)      \
+#define ASSEMBLE_STORE_FLOAT32()                         \
   do {                                                   \
     size_t index = 0;                                    \
     AddressingMode mode = kMode_None;                    \
     MemOperand operand = i.MemoryOperand(&mode, &index); \
     DoubleRegister value = i.InputDoubleRegister(index); \
+    __ frsp(kScratchDoubleReg, value);                   \
     if (mode == kMode_MRI) {                             \
-      __ asm_instr(value, operand);                      \
+      __ stfs(kScratchDoubleReg, operand);               \
     } else {                                             \
-      __ asm_instrx(value, operand);                     \
+      __ stfsx(kScratchDoubleReg, operand);              \
+    }                                                    \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
+  } while (0)
+
+
+#define ASSEMBLE_STORE_DOUBLE()                          \
+  do {                                                   \
+    size_t index = 0;                                    \
+    AddressingMode mode = kMode_None;                    \
+    MemOperand operand = i.MemoryOperand(&mode, &index); \
+    DoubleRegister value = i.InputDoubleRegister(index); \
+    if (mode == kMode_MRI) {                             \
+      __ stfd(value, operand);                           \
+    } else {                                             \
+      __ stfdx(value, operand);                          \
     }                                                    \
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
   } while (0)
@@ -468,29 +484,57 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
 
 
 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr, asm_instrx) \
-  do {                                                      \
-    Label done;                                             \
-    size_t index = 0;                                       \
-    AddressingMode mode = kMode_None;                       \
-    MemOperand operand = i.MemoryOperand(&mode, index);     \
-    DCHECK_EQ(kMode_MRR, mode);                             \
-    Register offset = operand.rb();                         \
-    __ extsw(offset, offset);                               \
-    if (HasRegisterInput(instr, 2)) {                       \
-      __ cmplw(offset, i.InputRegister(2));                 \
-    } else {                                                \
-      __ cmplwi(offset, i.InputImmediate(2));               \
-    }                                                       \
-    __ bge(&done);                                          \
-    DoubleRegister value = i.InputDoubleRegister(3);        \
-    if (mode == kMode_MRI) {                                \
-      __ asm_instr(value, operand);                         \
-    } else {                                                \
-      __ asm_instrx(value, operand);                        \
-    }                                                       \
-    __ bind(&done);                                         \
-    DCHECK_EQ(LeaveRC, i.OutputRCBit());                    \
+#define ASSEMBLE_CHECKED_STORE_FLOAT32()                \
+  do {                                                  \
+    Label done;                                         \
+    size_t index = 0;                                   \
+    AddressingMode mode = kMode_None;                   \
+    MemOperand operand = i.MemoryOperand(&mode, index); \
+    DCHECK_EQ(kMode_MRR, mode);                         \
+    Register offset = operand.rb();                     \
+    __ extsw(offset, offset);                           \
+    if (HasRegisterInput(instr, 2)) {                   \
+      __ cmplw(offset, i.InputRegister(2));             \
+    } else {                                            \
+      __ cmplwi(offset, i.InputImmediate(2));           \
+    }                                                   \
+    __ bge(&done);                                      \
+    DoubleRegister value = i.InputDoubleRegister(3);    \
+    __ frsp(kScratchDoubleReg, value);                  \
+    if (mode == kMode_MRI) {                            \
+      __ stfs(kScratchDoubleReg, operand);              \
+    } else {                                            \
+      __ stfsx(kScratchDoubleReg, operand);             \
+    }                                                   \
+    __ bind(&done);                                     \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                \
+  } while (0)
+
+
+// TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
+#define ASSEMBLE_CHECKED_STORE_DOUBLE()                 \
+  do {                                                  \
+    Label done;                                         \
+    size_t index = 0;                                   \
+    AddressingMode mode = kMode_None;                   \
+    MemOperand operand = i.MemoryOperand(&mode, index); \
+    DCHECK_EQ(kMode_MRR, mode);                         \
+    Register offset = operand.rb();                     \
+    __ extsw(offset, offset);                           \
+    if (HasRegisterInput(instr, 2)) {                   \
+      __ cmplw(offset, i.InputRegister(2));             \
+    } else {                                            \
+      __ cmplwi(offset, i.InputImmediate(2));           \
+    }                                                   \
+    __ bge(&done);                                      \
+    DoubleRegister value = i.InputDoubleRegister(3);    \
+    if (mode == kMode_MRI) {                            \
+      __ stfd(value, operand);                          \
+    } else {                                            \
+      __ stfdx(value, operand);                         \
+    }                                                   \
+    __ bind(&done);                                     \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                \
   } while (0)
 
 
@@ -607,8 +651,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
-    case kPPC_And32:
-    case kPPC_And64:
+    case kPPC_And:
       if (HasRegisterInput(instr, 1)) {
         __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
                 i.OutputRCBit());
@@ -616,13 +659,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
         __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
       }
       break;
-    case kPPC_AndComplement32:
-    case kPPC_AndComplement64:
+    case kPPC_AndComplement:
       __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
               i.OutputRCBit());
       break;
-    case kPPC_Or32:
-    case kPPC_Or64:
+    case kPPC_Or:
       if (HasRegisterInput(instr, 1)) {
         __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
                i.OutputRCBit());
@@ -631,13 +672,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
         DCHECK_EQ(LeaveRC, i.OutputRCBit());
       }
       break;
-    case kPPC_OrComplement32:
-    case kPPC_OrComplement64:
+    case kPPC_OrComplement:
       __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
              i.OutputRCBit());
       break;
-    case kPPC_Xor32:
-    case kPPC_Xor64:
+    case kPPC_Xor:
       if (HasRegisterInput(instr, 1)) {
         __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
                 i.OutputRCBit());
@@ -692,8 +731,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       }
       break;
 #endif
-    case kPPC_Not32:
-    case kPPC_Not64:
+    case kPPC_Not:
       __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
       break;
     case kPPC_RotLeftAndMask32:
@@ -714,8 +752,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
                 63 - i.InputInt32(2), i.OutputRCBit());
       break;
 #endif
-    case kPPC_Add32:
-    case kPPC_Add64:
+    case kPPC_Add:
       if (HasRegisterInput(instr, 1)) {
         __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
                LeaveOE, i.OutputRCBit());
@@ -727,11 +764,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
     case kPPC_AddWithOverflow32:
       ASSEMBLE_ADD_WITH_OVERFLOW();
       break;
-    case kPPC_AddFloat64:
+    case kPPC_AddDouble:
       ASSEMBLE_FLOAT_BINOP_RC(fadd);
       break;
-    case kPPC_Sub32:
-    case kPPC_Sub64:
+    case kPPC_Sub:
       if (HasRegisterInput(instr, 1)) {
         __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
                LeaveOE, i.OutputRCBit());
@@ -743,7 +779,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
     case kPPC_SubWithOverflow32:
       ASSEMBLE_SUB_WITH_OVERFLOW();
       break;
-    case kPPC_SubFloat64:
+    case kPPC_SubDouble:
       ASSEMBLE_FLOAT_BINOP_RC(fsub);
       break;
     case kPPC_Mul32:
@@ -764,7 +800,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
                 i.OutputRCBit());
       break;
-    case kPPC_MulFloat64:
+    case kPPC_MulDouble:
       ASSEMBLE_FLOAT_BINOP_RC(fmul);
       break;
     case kPPC_Div32:
@@ -787,7 +823,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
 #endif
-    case kPPC_DivFloat64:
+    case kPPC_DivDouble:
       ASSEMBLE_FLOAT_BINOP_RC(fdiv);
       break;
     case kPPC_Mod32:
@@ -806,37 +842,36 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       ASSEMBLE_MODULO(divdu, mulld);
       break;
 #endif
-    case kPPC_ModFloat64:
+    case kPPC_ModDouble:
       // TODO(bmeurer): We should really get rid of this special instruction,
       // and generate a CallAddress instruction instead.
       ASSEMBLE_FLOAT_MODULO();
       break;
-    case kPPC_Neg32:
-    case kPPC_Neg64:
+    case kPPC_Neg:
       __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
       break;
-    case kPPC_MaxFloat64:
+    case kPPC_MaxDouble:
       ASSEMBLE_FLOAT_MAX(kScratchDoubleReg);
       break;
-    case kPPC_MinFloat64:
+    case kPPC_MinDouble:
       ASSEMBLE_FLOAT_MIN(kScratchDoubleReg);
       break;
-    case kPPC_SqrtFloat64:
+    case kPPC_SqrtDouble:
       ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
       break;
-    case kPPC_FloorFloat64:
+    case kPPC_FloorDouble:
       ASSEMBLE_FLOAT_UNOP_RC(frim);
       break;
-    case kPPC_CeilFloat64:
+    case kPPC_CeilDouble:
       ASSEMBLE_FLOAT_UNOP_RC(frip);
       break;
-    case kPPC_TruncateFloat64:
+    case kPPC_TruncateDouble:
       ASSEMBLE_FLOAT_UNOP_RC(friz);
       break;
-    case kPPC_RoundFloat64:
+    case kPPC_RoundDouble:
       ASSEMBLE_FLOAT_UNOP_RC(frin);
       break;
-    case kPPC_NegFloat64:
+    case kPPC_NegDouble:
       ASSEMBLE_FLOAT_UNOP_RC(fneg);
       break;
     case kPPC_Cntlz32:
@@ -851,7 +886,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       ASSEMBLE_COMPARE(cmp, cmpl);
       break;
 #endif
-    case kPPC_CmpFloat64:
+    case kPPC_CmpDouble:
       ASSEMBLE_FLOAT_COMPARE(fcmpu);
       break;
     case kPPC_Tst32:
@@ -903,17 +938,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
 #endif
-    case kPPC_Int32ToFloat64:
+    case kPPC_Int32ToDouble:
       __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
-    case kPPC_Uint32ToFloat64:
+    case kPPC_Uint32ToDouble:
       __ ConvertUnsignedIntToDouble(i.InputRegister(0),
                                     i.OutputDoubleRegister());
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
-    case kPPC_Float64ToInt32:
-    case kPPC_Float64ToUint32:
+    case kPPC_DoubleToInt32:
+    case kPPC_DoubleToUint32:
       __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
 #if !V8_TARGET_ARCH_PPC64
                               kScratchReg,
@@ -921,31 +956,31 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
                               i.OutputRegister(), kScratchDoubleReg);
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
-    case kPPC_Float64ToFloat32:
+    case kPPC_DoubleToFloat32:
       ASSEMBLE_FLOAT_UNOP_RC(frsp);
       break;
-    case kPPC_Float32ToFloat64:
+    case kPPC_Float32ToDouble:
       // Nothing to do.
       __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
-    case kPPC_Float64ExtractLowWord32:
+    case kPPC_DoubleExtractLowWord32:
       __ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
-    case kPPC_Float64ExtractHighWord32:
+    case kPPC_DoubleExtractHighWord32:
       __ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
-    case kPPC_Float64InsertLowWord32:
+    case kPPC_DoubleInsertLowWord32:
       __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
-    case kPPC_Float64InsertHighWord32:
+    case kPPC_DoubleInsertHighWord32:
       __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
       DCHECK_EQ(LeaveRC, i.OutputRCBit());
       break;
-    case kPPC_Float64Construct:
+    case kPPC_DoubleConstruct:
 #if V8_TARGET_ARCH_PPC64
       __ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
                                     i.InputRegister(0), i.InputRegister(1), r0);
@@ -979,7 +1014,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
     case kPPC_LoadFloat32:
       ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
       break;
-    case kPPC_LoadFloat64:
+    case kPPC_LoadDouble:
       ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
       break;
     case kPPC_StoreWord8:
@@ -997,10 +1032,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       break;
 #endif
     case kPPC_StoreFloat32:
-      ASSEMBLE_STORE_FLOAT(stfs, stfsx);
+      ASSEMBLE_STORE_FLOAT32();
       break;
-    case kPPC_StoreFloat64:
-      ASSEMBLE_STORE_FLOAT(stfd, stfdx);
+    case kPPC_StoreDouble:
+      ASSEMBLE_STORE_DOUBLE();
       break;
     case kPPC_StoreWriteBarrier:
       ASSEMBLE_STORE_WRITE_BARRIER();
@@ -1037,10 +1072,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
       break;
     case kCheckedStoreFloat32:
-      ASSEMBLE_CHECKED_STORE_FLOAT(stfs, stfsx);
+      ASSEMBLE_CHECKED_STORE_FLOAT32();
       break;
     case kCheckedStoreFloat64:
-      ASSEMBLE_CHECKED_STORE_FLOAT(stfd, stfdx);
+      ASSEMBLE_CHECKED_STORE_DOUBLE();
       break;
     default:
       UNREACHABLE();
@@ -1063,7 +1098,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
          (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
 
   Condition cond = FlagsConditionToCondition(condition);
-  if (op == kPPC_CmpFloat64) {
+  if (op == kPPC_CmpDouble) {
     // check for unordered if necessary
     if (cond == le) {
       __ bunordered(flabel, cr);
@@ -1089,7 +1124,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
   PPCOperandConverter i(this, instr);
   Label done;
   ArchOpcode op = instr->arch_opcode();
-  bool check_unordered = (op == kPPC_CmpFloat64);
+  bool check_unordered = (op == kPPC_CmpDouble);
   CRegister cr = cr0;
 
   // Overflow checked for add/sub only.
index bb0a771..3d35c69 100644 (file)
@@ -12,16 +12,11 @@ namespace compiler {
 // PPC-specific opcodes that specify which assembly sequence to emit.
 // Most opcodes specify a single instruction.
 #define TARGET_ARCH_OPCODE_LIST(V) \
-  V(PPC_And32)                     \
-  V(PPC_And64)                     \
-  V(PPC_AndComplement32)           \
-  V(PPC_AndComplement64)           \
-  V(PPC_Or32)                      \
-  V(PPC_Or64)                      \
-  V(PPC_OrComplement32)            \
-  V(PPC_OrComplement64)            \
-  V(PPC_Xor32)                     \
-  V(PPC_Xor64)                     \
+  V(PPC_And)                       \
+  V(PPC_AndComplement)             \
+  V(PPC_Or)                        \
+  V(PPC_OrComplement)              \
+  V(PPC_Xor)                       \
   V(PPC_ShiftLeft32)               \
   V(PPC_ShiftLeft64)               \
   V(PPC_ShiftRight32)              \
@@ -30,49 +25,45 @@ namespace compiler {
   V(PPC_ShiftRightAlg64)           \
   V(PPC_RotRight32)                \
   V(PPC_RotRight64)                \
-  V(PPC_Not32)                     \
-  V(PPC_Not64)                     \
+  V(PPC_Not)                       \
   V(PPC_RotLeftAndMask32)          \
   V(PPC_RotLeftAndClear64)         \
   V(PPC_RotLeftAndClearLeft64)     \
   V(PPC_RotLeftAndClearRight64)    \
-  V(PPC_Add32)                     \
+  V(PPC_Add)                       \
   V(PPC_AddWithOverflow32)         \
-  V(PPC_Add64)                     \
-  V(PPC_AddFloat64)                \
-  V(PPC_Sub32)                     \
+  V(PPC_AddDouble)                 \
+  V(PPC_Sub)                       \
   V(PPC_SubWithOverflow32)         \
-  V(PPC_Sub64)                     \
-  V(PPC_SubFloat64)                \
+  V(PPC_SubDouble)                 \
   V(PPC_Mul32)                     \
   V(PPC_Mul64)                     \
   V(PPC_MulHigh32)                 \
   V(PPC_MulHighU32)                \
-  V(PPC_MulFloat64)                \
+  V(PPC_MulDouble)                 \
   V(PPC_Div32)                     \
   V(PPC_Div64)                     \
   V(PPC_DivU32)                    \
   V(PPC_DivU64)                    \
-  V(PPC_DivFloat64)                \
+  V(PPC_DivDouble)                 \
   V(PPC_Mod32)                     \
   V(PPC_Mod64)                     \
   V(PPC_ModU32)                    \
   V(PPC_ModU64)                    \
-  V(PPC_ModFloat64)                \
-  V(PPC_Neg32)                     \
-  V(PPC_Neg64)                     \
-  V(PPC_NegFloat64)                \
-  V(PPC_SqrtFloat64)               \
-  V(PPC_FloorFloat64)              \
-  V(PPC_CeilFloat64)               \
-  V(PPC_TruncateFloat64)           \
-  V(PPC_RoundFloat64)              \
-  V(PPC_MaxFloat64)                \
-  V(PPC_MinFloat64)                \
+  V(PPC_ModDouble)                 \
+  V(PPC_Neg)                       \
+  V(PPC_NegDouble)                 \
+  V(PPC_SqrtDouble)                \
+  V(PPC_FloorDouble)               \
+  V(PPC_CeilDouble)                \
+  V(PPC_TruncateDouble)            \
+  V(PPC_RoundDouble)               \
+  V(PPC_MaxDouble)                 \
+  V(PPC_MinDouble)                 \
   V(PPC_Cntlz32)                   \
   V(PPC_Cmp32)                     \
   V(PPC_Cmp64)                     \
-  V(PPC_CmpFloat64)                \
+  V(PPC_CmpDouble)                 \
   V(PPC_Tst32)                     \
   V(PPC_Tst64)                     \
   V(PPC_Push)                      \
@@ -81,17 +72,17 @@ namespace compiler {
   V(PPC_ExtendSignWord32)          \
   V(PPC_Uint32ToUint64)            \
   V(PPC_Int64ToInt32)              \
-  V(PPC_Int32ToFloat64)            \
-  V(PPC_Uint32ToFloat64)           \
-  V(PPC_Float32ToFloat64)          \
-  V(PPC_Float64ToInt32)            \
-  V(PPC_Float64ToUint32)           \
-  V(PPC_Float64ToFloat32)          \
-  V(PPC_Float64ExtractLowWord32)   \
-  V(PPC_Float64ExtractHighWord32)  \
-  V(PPC_Float64InsertLowWord32)    \
-  V(PPC_Float64InsertHighWord32)   \
-  V(PPC_Float64Construct)          \
+  V(PPC_Int32ToDouble)             \
+  V(PPC_Uint32ToDouble)            \
+  V(PPC_Float32ToDouble)           \
+  V(PPC_DoubleToInt32)             \
+  V(PPC_DoubleToUint32)            \
+  V(PPC_DoubleToFloat32)           \
+  V(PPC_DoubleExtractLowWord32)    \
+  V(PPC_DoubleExtractHighWord32)   \
+  V(PPC_DoubleInsertLowWord32)     \
+  V(PPC_DoubleInsertHighWord32)    \
+  V(PPC_DoubleConstruct)           \
   V(PPC_LoadWordS8)                \
   V(PPC_LoadWordU8)                \
   V(PPC_LoadWordS16)               \
@@ -99,13 +90,13 @@ namespace compiler {
   V(PPC_LoadWordS32)               \
   V(PPC_LoadWord64)                \
   V(PPC_LoadFloat32)               \
-  V(PPC_LoadFloat64)               \
+  V(PPC_LoadDouble)                \
   V(PPC_StoreWord8)                \
   V(PPC_StoreWord16)               \
   V(PPC_StoreWord32)               \
   V(PPC_StoreWord64)               \
   V(PPC_StoreFloat32)              \
-  V(PPC_StoreFloat64)              \
+  V(PPC_StoreDouble)               \
   V(PPC_StoreWriteBarrier)
 
 
index ae4c97a..9d576ba 100644 (file)
@@ -67,25 +67,16 @@ class PPCOperandGenerator FINAL : public OperandGenerator {
 };
 
 
-static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
-                           Node* node) {
-  PPCOperandGenerator g(selector);
-  selector->Emit(opcode, g.DefineAsRegister(node),
-                 g.UseRegister(node->InputAt(0)));
-}
-
+namespace {
 
-static void VisitRRR(InstructionSelector* selector, Node* node,
-                     ArchOpcode opcode) {
+void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
   PPCOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
-                 g.UseRegister(node->InputAt(0)),
-                 g.UseRegister(node->InputAt(1)));
+                 g.UseRegister(node->InputAt(0)));
 }
 
 
-static void VisitRRRFloat64(InstructionSelector* selector, Node* node,
-                            ArchOpcode opcode) {
+void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
   PPCOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
                  g.UseRegister(node->InputAt(0)),
@@ -93,8 +84,8 @@ static void VisitRRRFloat64(InstructionSelector* selector, Node* node,
 }
 
 
-static void VisitRRO(InstructionSelector* selector, Node* node,
-                     ArchOpcode opcode, ImmediateMode operand_mode) {
+void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
+              ImmediateMode operand_mode) {
   PPCOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
                  g.UseRegister(node->InputAt(0)),
@@ -104,9 +95,9 @@ static void VisitRRO(InstructionSelector* selector, Node* node,
 
 // Shared routine for multiple binary operations.
 template <typename Matcher>
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode, ImmediateMode operand_mode,
-                       FlagsContinuation* cont) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, ImmediateMode operand_mode,
+                FlagsContinuation* cont) {
   PPCOperandGenerator g(selector);
   Matcher m(node);
   InstructionOperand inputs[4];
@@ -139,12 +130,14 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
 
 // Shared routine for multiple binary operations.
 template <typename Matcher>
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       ArchOpcode opcode, ImmediateMode operand_mode) {
+void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+                ImmediateMode operand_mode) {
   FlagsContinuation cont;
   VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
 }
 
+}  // namespace
+
 
 void InstructionSelector::VisitLoad(Node* node) {
   MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
@@ -160,7 +153,7 @@ void InstructionSelector::VisitLoad(Node* node) {
       opcode = kPPC_LoadFloat32;
       break;
     case kRepFloat64:
-      opcode = kPPC_LoadFloat64;
+      opcode = kPPC_LoadDouble;
       break;
     case kRepBit:  // Fall through.
     case kRepWord8:
@@ -230,7 +223,7 @@ void InstructionSelector::VisitStore(Node* node) {
       opcode = kPPC_StoreFloat32;
       break;
     case kRepFloat64:
-      opcode = kPPC_StoreFloat64;
+      opcode = kPPC_StoreDouble;
       break;
     case kRepBit:  // Fall through.
     case kRepWord8:
@@ -348,17 +341,11 @@ static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
   // Map instruction to equivalent operation with inverted right input.
   ArchOpcode inv_opcode = opcode;
   switch (opcode) {
-    case kPPC_And32:
-      inv_opcode = kPPC_AndComplement32;
+    case kPPC_And:
+      inv_opcode = kPPC_AndComplement;
       break;
-    case kPPC_And64:
-      inv_opcode = kPPC_AndComplement64;
-      break;
-    case kPPC_Or32:
-      inv_opcode = kPPC_OrComplement32;
-      break;
-    case kPPC_Or64:
-      inv_opcode = kPPC_OrComplement64;
+    case kPPC_Or:
+      inv_opcode = kPPC_OrComplement;
       break;
     default:
       UNREACHABLE();
@@ -451,7 +438,7 @@ void InstructionSelector::VisitWord32And(Node* node) {
     }
   }
   VisitLogical<Int32BinopMatcher>(
-      this, node, &m, kPPC_And32, CanCover(node, m.left().node()),
+      this, node, &m, kPPC_And, CanCover(node, m.left().node()),
       CanCover(node, m.right().node()), kInt16Imm_Unsigned);
 }
 
@@ -508,7 +495,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
     }
   }
   VisitLogical<Int64BinopMatcher>(
-      this, node, &m, kPPC_And64, CanCover(node, m.left().node()),
+      this, node, &m, kPPC_And, CanCover(node, m.left().node()),
       CanCover(node, m.right().node()), kInt16Imm_Unsigned);
 }
 #endif
@@ -517,7 +504,7 @@ void InstructionSelector::VisitWord64And(Node* node) {
 void InstructionSelector::VisitWord32Or(Node* node) {
   Int32BinopMatcher m(node);
   VisitLogical<Int32BinopMatcher>(
-      this, node, &m, kPPC_Or32, CanCover(node, m.left().node()),
+      this, node, &m, kPPC_Or, CanCover(node, m.left().node()),
       CanCover(node, m.right().node()), kInt16Imm_Unsigned);
 }
 
@@ -526,7 +513,7 @@ void InstructionSelector::VisitWord32Or(Node* node) {
 void InstructionSelector::VisitWord64Or(Node* node) {
   Int64BinopMatcher m(node);
   VisitLogical<Int64BinopMatcher>(
-      this, node, &m, kPPC_Or64, CanCover(node, m.left().node()),
+      this, node, &m, kPPC_Or, CanCover(node, m.left().node()),
       CanCover(node, m.right().node()), kInt16Imm_Unsigned);
 }
 #endif
@@ -536,9 +523,9 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
   PPCOperandGenerator g(this);
   Int32BinopMatcher m(node);
   if (m.right().Is(-1)) {
-    Emit(kPPC_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+    Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
   } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kPPC_Xor32, kInt16Imm_Unsigned);
+    VisitBinop<Int32BinopMatcher>(this, node, kPPC_Xor, kInt16Imm_Unsigned);
   }
 }
 
@@ -548,9 +535,9 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
   PPCOperandGenerator g(this);
   Int64BinopMatcher m(node);
   if (m.right().Is(-1)) {
-    Emit(kPPC_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+    Emit(kPPC_Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
   } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kPPC_Xor64, kInt16Imm_Unsigned);
+    VisitBinop<Int64BinopMatcher>(this, node, kPPC_Xor, kInt16Imm_Unsigned);
   }
 }
 #endif
@@ -577,7 +564,7 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
       }
     }
   }
-  VisitRRO(this, node, kPPC_ShiftLeft32, kShift32Imm);
+  VisitRRO(this, kPPC_ShiftLeft32, node, kShift32Imm);
 }
 
 
@@ -622,7 +609,7 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
       }
     }
   }
-  VisitRRO(this, node, kPPC_ShiftLeft64, kShift64Imm);
+  VisitRRO(this, kPPC_ShiftLeft64, node, kShift64Imm);
 }
 #endif
 
@@ -649,7 +636,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
       }
     }
   }
-  VisitRRO(this, node, kPPC_ShiftRight32, kShift32Imm);
+  VisitRRO(this, kPPC_ShiftRight32, node, kShift32Imm);
 }
 
 
@@ -690,7 +677,7 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
       }
     }
   }
-  VisitRRO(this, node, kPPC_ShiftRight64, kShift64Imm);
+  VisitRRO(this, kPPC_ShiftRight64, node, kShift64Imm);
 }
 #endif
 
@@ -711,27 +698,27 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
       return;
     }
   }
-  VisitRRO(this, node, kPPC_ShiftRightAlg32, kShift32Imm);
+  VisitRRO(this, kPPC_ShiftRightAlg32, node, kShift32Imm);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitWord64Sar(Node* node) {
-  VisitRRO(this, node, kPPC_ShiftRightAlg64, kShift64Imm);
+  VisitRRO(this, kPPC_ShiftRightAlg64, node, kShift64Imm);
 }
 #endif
 
 
 // TODO(mbrandy): Absorb logical-and into rlwinm?
 void InstructionSelector::VisitWord32Ror(Node* node) {
-  VisitRRO(this, node, kPPC_RotRight32, kShift32Imm);
+  VisitRRO(this, kPPC_RotRight32, node, kShift32Imm);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 // TODO(mbrandy): Absorb logical-and into rldic?
 void InstructionSelector::VisitWord64Ror(Node* node) {
-  VisitRRO(this, node, kPPC_RotRight64, kShift64Imm);
+  VisitRRO(this, kPPC_RotRight64, node, kShift64Imm);
 }
 #endif
 
@@ -743,13 +730,13 @@ void InstructionSelector::VisitWord32Clz(Node* node) {
 
 
 void InstructionSelector::VisitInt32Add(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
+  VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitInt64Add(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
 }
 #endif
 
@@ -758,9 +745,9 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
   PPCOperandGenerator g(this);
   Int32BinopMatcher m(node);
   if (m.left().Is(0)) {
-    Emit(kPPC_Neg32, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+    Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
   } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kPPC_Sub32, kInt16Imm_Negate);
+    VisitBinop<Int32BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate);
   }
 }
 
@@ -770,22 +757,22 @@ void InstructionSelector::VisitInt64Sub(Node* node) {
   PPCOperandGenerator g(this);
   Int64BinopMatcher m(node);
   if (m.left().Is(0)) {
-    Emit(kPPC_Neg64, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+    Emit(kPPC_Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
   } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub64, kInt16Imm_Negate);
+    VisitBinop<Int64BinopMatcher>(this, node, kPPC_Sub, kInt16Imm_Negate);
   }
 }
 #endif
 
 
 void InstructionSelector::VisitInt32Mul(Node* node) {
-  VisitRRR(this, node, kPPC_Mul32);
+  VisitRRR(this, kPPC_Mul32, node);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitInt64Mul(Node* node) {
-  VisitRRR(this, node, kPPC_Mul64);
+  VisitRRR(this, kPPC_Mul64, node);
 }
 #endif
 
@@ -805,94 +792,82 @@ void InstructionSelector::VisitUint32MulHigh(Node* node) {
 
 
 void InstructionSelector::VisitInt32Div(Node* node) {
-  VisitRRR(this, node, kPPC_Div32);
+  VisitRRR(this, kPPC_Div32, node);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitInt64Div(Node* node) {
-  VisitRRR(this, node, kPPC_Div64);
+  VisitRRR(this, kPPC_Div64, node);
 }
 #endif
 
 
 void InstructionSelector::VisitUint32Div(Node* node) {
-  VisitRRR(this, node, kPPC_DivU32);
+  VisitRRR(this, kPPC_DivU32, node);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitUint64Div(Node* node) {
-  VisitRRR(this, node, kPPC_DivU64);
+  VisitRRR(this, kPPC_DivU64, node);
 }
 #endif
 
 
 void InstructionSelector::VisitInt32Mod(Node* node) {
-  VisitRRR(this, node, kPPC_Mod32);
+  VisitRRR(this, kPPC_Mod32, node);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitInt64Mod(Node* node) {
-  VisitRRR(this, node, kPPC_Mod64);
+  VisitRRR(this, kPPC_Mod64, node);
 }
 #endif
 
 
 void InstructionSelector::VisitUint32Mod(Node* node) {
-  VisitRRR(this, node, kPPC_ModU32);
+  VisitRRR(this, kPPC_ModU32, node);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitUint64Mod(Node* node) {
-  VisitRRR(this, node, kPPC_ModU64);
+  VisitRRR(this, kPPC_ModU64, node);
 }
 #endif
 
 
 void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
-  PPCOperandGenerator g(this);
-  Emit(kPPC_Float32ToFloat64, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  VisitRR(this, kPPC_Float32ToDouble, node);
 }
 
 
 void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
-  PPCOperandGenerator g(this);
-  Emit(kPPC_Int32ToFloat64, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  VisitRR(this, kPPC_Int32ToDouble, node);
 }
 
 
 void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
-  PPCOperandGenerator g(this);
-  Emit(kPPC_Uint32ToFloat64, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  VisitRR(this, kPPC_Uint32ToDouble, node);
 }
 
 
 void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
-  PPCOperandGenerator g(this);
-  Emit(kPPC_Float64ToInt32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  VisitRR(this, kPPC_DoubleToInt32, node);
 }
 
 
 void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
-  PPCOperandGenerator g(this);
-  Emit(kPPC_Float64ToUint32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  VisitRR(this, kPPC_DoubleToUint32, node);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
   // TODO(mbrandy): inspect input to see if nop is appropriate.
-  PPCOperandGenerator g(this);
-  Emit(kPPC_ExtendSignWord32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  VisitRR(this, kPPC_ExtendSignWord32, node);
 }
 
 
@@ -907,7 +882,7 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
 
 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
   PPCOperandGenerator g(this);
-  Emit(kPPC_Float64ToFloat32, g.DefineAsRegister(node),
+  Emit(kPPC_DoubleToFloat32, g.DefineAsRegister(node),
        g.UseRegister(node->InputAt(0)));
 }
 
@@ -922,9 +897,19 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
 #endif
 
 
+void InstructionSelector::VisitFloat32Add(Node* node) {
+  VisitRRR(this, kPPC_AddDouble, node);
+}
+
+
 void InstructionSelector::VisitFloat64Add(Node* node) {
   // TODO(mbrandy): detect multiply-add
-  VisitRRRFloat64(this, node, kPPC_AddFloat64);
+  VisitRRR(this, kPPC_AddDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+  VisitRRR(this, kPPC_SubDouble, node);
 }
 
 
@@ -939,62 +924,87 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
       Float64BinopMatcher mright0(m.right().InputAt(0));
       if (mright0.left().IsMinusZero()) {
         // -floor(-x) = ceil(x)
-        Emit(kPPC_CeilFloat64, g.DefineAsRegister(node),
+        Emit(kPPC_CeilDouble, g.DefineAsRegister(node),
              g.UseRegister(mright0.right().node()));
         return;
       }
     }
   }
-  VisitRRRFloat64(this, node, kPPC_SubFloat64);
+  VisitRRR(this, kPPC_SubDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+  VisitRRR(this, kPPC_MulDouble, node);
 }
 
 
 void InstructionSelector::VisitFloat64Mul(Node* node) {
   // TODO(mbrandy): detect negate
-  VisitRRRFloat64(this, node, kPPC_MulFloat64);
+  VisitRRR(this, kPPC_MulDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+  VisitRRR(this, kPPC_DivDouble, node);
 }
 
 
 void InstructionSelector::VisitFloat64Div(Node* node) {
-  VisitRRRFloat64(this, node, kPPC_DivFloat64);
+  VisitRRR(this, kPPC_DivDouble, node);
 }
 
 
 void InstructionSelector::VisitFloat64Mod(Node* node) {
   PPCOperandGenerator g(this);
-  Emit(kPPC_ModFloat64, g.DefineAsFixed(node, d1),
+  Emit(kPPC_ModDouble, g.DefineAsFixed(node, d1),
        g.UseFixed(node->InputAt(0), d1),
        g.UseFixed(node->InputAt(1), d2))->MarkAsCall();
 }
 
 
+void InstructionSelector::VisitFloat32Max(Node* node) {
+  VisitRRR(this, kPPC_MaxDouble, node);
+}
+
+
 void InstructionSelector::VisitFloat64Max(Node* node) {
-  VisitRRRFloat64(this, node, kPPC_MaxFloat64);
+  VisitRRR(this, kPPC_MaxDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+  VisitRRR(this, kPPC_MinDouble, node);
 }
 
 
 void InstructionSelector::VisitFloat64Min(Node* node) {
-  VisitRRRFloat64(this, node, kPPC_MinFloat64);
+  VisitRRR(this, kPPC_MinDouble, node);
+}
+
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+  VisitRR(this, kPPC_SqrtDouble, node);
 }
 
 
 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
-  VisitRRFloat64(this, kPPC_SqrtFloat64, node);
+  VisitRR(this, kPPC_SqrtDouble, node);
 }
 
 
 void InstructionSelector::VisitFloat64RoundDown(Node* node) {
-  VisitRRFloat64(this, kPPC_FloorFloat64, node);
+  VisitRR(this, kPPC_FloorDouble, node);
 }
 
 
 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
-  VisitRRFloat64(this, kPPC_TruncateFloat64, node);
+  VisitRR(this, kPPC_TruncateDouble, node);
 }
 
 
 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
-  VisitRRFloat64(this, kPPC_RoundFloat64, node);
+  VisitRR(this, kPPC_RoundDouble, node);
 }
 
 
@@ -1037,10 +1047,12 @@ static bool CompareLogical(FlagsContinuation* cont) {
 }
 
 
+namespace {
+
 // Shared routine for multiple compare operations.
-static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
-                         InstructionOperand left, InstructionOperand right,
-                         FlagsContinuation* cont) {
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                  InstructionOperand left, InstructionOperand right,
+                  FlagsContinuation* cont) {
   PPCOperandGenerator g(selector);
   opcode = cont->Encode(opcode);
   if (cont->IsBranch()) {
@@ -1054,9 +1066,9 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
 
 
 // Shared routine for multiple word compare operations.
-static void VisitWordCompare(InstructionSelector* selector, Node* node,
-                             InstructionCode opcode, FlagsContinuation* cont,
-                             bool commutative, ImmediateMode immediate_mode) {
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      InstructionCode opcode, FlagsContinuation* cont,
+                      bool commutative, ImmediateMode immediate_mode) {
   PPCOperandGenerator g(selector);
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
@@ -1076,37 +1088,48 @@ static void VisitWordCompare(InstructionSelector* selector, Node* node,
 }
 
 
-static void VisitWord32Compare(InstructionSelector* selector, Node* node,
-                               FlagsContinuation* cont) {
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
   ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
   VisitWordCompare(selector, node, kPPC_Cmp32, cont, false, mode);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
-static void VisitWord64Compare(InstructionSelector* selector, Node* node,
-                               FlagsContinuation* cont) {
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
   ImmediateMode mode = (CompareLogical(cont) ? kInt16Imm_Unsigned : kInt16Imm);
   VisitWordCompare(selector, node, kPPC_Cmp64, cont, false, mode);
 }
 #endif
 
 
-// Shared routine for multiple float compare operations.
-static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
-                                FlagsContinuation* cont) {
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
   PPCOperandGenerator g(selector);
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
-  VisitCompare(selector, kPPC_CmpFloat64, g.UseRegister(left),
+  VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left),
+               g.UseRegister(right), cont);
+}
+
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  PPCOperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(selector, kPPC_CmpDouble, g.UseRegister(left),
                g.UseRegister(right), cont);
 }
 
 
 // Shared routine for word comparisons against zero.
-static void VisitWordCompareZero(InstructionSelector* selector, Node* user,
-                                 Node* value, InstructionCode opcode,
-                                 FlagsContinuation* cont) {
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, InstructionCode opcode,
+                          FlagsContinuation* cont) {
   while (selector->CanCover(user, value)) {
     switch (value->opcode()) {
       case IrOpcode::kWord32Equal: {
@@ -1148,6 +1171,15 @@ static void VisitWordCompareZero(InstructionSelector* selector, Node* user,
         cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
         return VisitWord64Compare(selector, value, cont);
 #endif
+      case IrOpcode::kFloat32Equal:
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitFloat32Compare(selector, value, cont);
+      case IrOpcode::kFloat32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitFloat32Compare(selector, value, cont);
+      case IrOpcode::kFloat32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitFloat32Compare(selector, value, cont);
       case IrOpcode::kFloat64Equal:
         cont->OverwriteAndNegateIfEqual(kEqual);
         return VisitFloat64Compare(selector, value, cont);
@@ -1228,19 +1260,21 @@ static void VisitWordCompareZero(InstructionSelector* selector, Node* user,
 }
 
 
-static void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
-                                   Node* value, FlagsContinuation* cont) {
+void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
+                            Node* value, FlagsContinuation* cont) {
   VisitWordCompareZero(selector, user, value, kPPC_Cmp32, cont);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
-static void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
-                                   Node* value, FlagsContinuation* cont) {
+void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
+                            Node* value, FlagsContinuation* cont) {
   VisitWordCompareZero(selector, user, value, kPPC_Cmp64, cont);
 }
 #endif
 
+}  // namespace
+
 
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
@@ -1265,7 +1299,7 @@ void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
     InstructionOperand index_operand = value_operand;
     if (sw.min_value) {
       index_operand = g.TempRegister();
-      Emit(kPPC_Sub32, index_operand, value_operand,
+      Emit(kPPC_Sub, index_operand, value_operand,
            g.TempImmediate(sw.min_value));
     }
     // Generate a table lookup.
@@ -1341,6 +1375,24 @@ void InstructionSelector::VisitUint64LessThan(Node* node) {
 #endif
 
 
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+  FlagsContinuation cont(kEqual, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitFloat32Compare(this, node, &cont);
+}
+
+
 void InstructionSelector::VisitFloat64Equal(Node* node) {
   FlagsContinuation cont(kEqual, node);
   VisitFloat64Compare(this, node, &cont);
@@ -1419,14 +1471,14 @@ void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
 
 void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
   PPCOperandGenerator g(this);
-  Emit(kPPC_Float64ExtractLowWord32, g.DefineAsRegister(node),
+  Emit(kPPC_DoubleExtractLowWord32, g.DefineAsRegister(node),
        g.UseRegister(node->InputAt(0)));
 }
 
 
 void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
   PPCOperandGenerator g(this);
-  Emit(kPPC_Float64ExtractHighWord32, g.DefineAsRegister(node),
+  Emit(kPPC_DoubleExtractHighWord32, g.DefineAsRegister(node),
        g.UseRegister(node->InputAt(0)));
 }
 
@@ -1438,11 +1490,11 @@ void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
       CanCover(node, left)) {
     left = left->InputAt(1);
-    Emit(kPPC_Float64Construct, g.DefineAsRegister(node), g.UseRegister(left),
+    Emit(kPPC_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(left),
          g.UseRegister(right));
     return;
   }
-  Emit(kPPC_Float64InsertLowWord32, g.DefineSameAsFirst(node),
+  Emit(kPPC_DoubleInsertLowWord32, g.DefineSameAsFirst(node),
        g.UseRegister(left), g.UseRegister(right));
 }
 
@@ -1454,11 +1506,11 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
   if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
       CanCover(node, left)) {
     left = left->InputAt(1);
-    Emit(kPPC_Float64Construct, g.DefineAsRegister(node), g.UseRegister(right),
+    Emit(kPPC_DoubleConstruct, g.DefineAsRegister(node), g.UseRegister(right),
          g.UseRegister(left));
     return;
   }
-  Emit(kPPC_Float64InsertHighWord32, g.DefineSameAsFirst(node),
+  Emit(kPPC_DoubleInsertHighWord32, g.DefineSameAsFirst(node),
        g.UseRegister(left), g.UseRegister(right));
 }
 
@@ -1466,7 +1518,9 @@ void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
-  return MachineOperatorBuilder::kFloat64Max |
+  return MachineOperatorBuilder::kFloat32Max |
+         MachineOperatorBuilder::kFloat32Min |
+         MachineOperatorBuilder::kFloat64Max |
          MachineOperatorBuilder::kFloat64Min |
          MachineOperatorBuilder::kFloat64RoundDown |
          MachineOperatorBuilder::kFloat64RoundTruncate |