}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kArmVmovLowU32F64, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVmovLowF64U32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
ArmOperandGenerator g(this);
Float32BinopMatcher m(node);
__ Fmov(i.OutputFloat64Register(), tmp);
break;
}
- case kArm64Float64MoveU64: {
+ case kArm64Float64MoveU64:
__ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
break;
- }
+ case kArm64U64MoveFloat64:
+ __ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
break;
V(Arm64Float64InsertLowWord32) \
V(Arm64Float64InsertHighWord32) \
V(Arm64Float64MoveU64) \
+ V(Arm64U64MoveFloat64) \
V(Arm64LdrS) \
V(Arm64StrS) \
V(Arm64LdrD) \
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kArm64Float64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kArm64U64MoveFloat64, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ VisitRR(this, kArm64Float64MoveU64, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kArm64Float64MoveU64, node);
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kArm64Float32Add, node);
}
__ movss(operand, i.InputDoubleRegister(index));
}
break;
+ case kIA32BitcastFI:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ mov(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kIA32BitcastIF:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
case kIA32Lea: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
V(IA32Movl) \
V(IA32Movss) \
V(IA32Movsd) \
+ V(IA32BitcastFI) \
+ V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
V(IA32Poke) \
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRROFloat(this, node, kAVXFloat32Add, kSSEFloat32Add);
}
return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
case IrOpcode::kTruncateInt64ToInt32:
return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
+ case IrOpcode::kBitcastFloat32ToInt32:
+ return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
+ case IrOpcode::kBitcastFloat64ToInt64:
+ return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
+ case IrOpcode::kBitcastInt32ToFloat32:
+ return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node);
+ case IrOpcode::kBitcastInt64ToFloat64:
+ return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node);
case IrOpcode::kFloat32Add:
return MarkAsFloat32(node), VisitFloat32Add(node);
case IrOpcode::kFloat32Sub:
UNIMPLEMENTED();
}
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
#endif // V8_TARGET_ARCH_32_BIT
V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Add, Operator::kCommutative, 2, 0, 1) \
V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
const Operator* TruncateFloat64ToInt32(TruncationMode);
const Operator* TruncateInt64ToInt32();
+ // These operators reinterpret the bits of a floating point number as an
+ // integer and vice versa.
+ const Operator* BitcastFloat32ToInt32();
+ const Operator* BitcastFloat64ToInt64();
+ const Operator* BitcastInt32ToFloat32();
+ const Operator* BitcastInt64ToFloat64();
+
// Floating point operators always operate with IEEE 754 round-to-nearest
// (single-precision).
const Operator* Float32Add();
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kMipsFloat64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kMipsAddS, node);
}
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
+ case kMips64BitcastDL:
+ __ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMips64BitcastLD:
+ __ dmtc1(i.InputRegister(0), i.OutputDoubleRegister());
+ break;
case kMips64Float64ExtractLowWord32:
__ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
break;
V(Mips64Swc1) \
V(Mips64Ldc1) \
V(Mips64Sdc1) \
+ V(Mips64BitcastDL) \
+ V(Mips64BitcastLD) \
V(Mips64Float64ExtractLowWord32) \
V(Mips64Float64ExtractHighWord32) \
V(Mips64Float64InsertLowWord32) \
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kMips64Float64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kMips64BitcastDL, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kMips64BitcastLD, node);
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kMips64AddS, node);
}
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64Float64ExtractLowWord32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMips64Float64ExtractLowWord32, node);
}
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64Float64ExtractHighWord32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMips64Float64ExtractHighWord32, node);
}
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToInt32) \
V(TruncateInt64ToInt32) \
+ V(BitcastFloat32ToInt32) \
+ V(BitcastFloat64ToInt64) \
+ V(BitcastInt32ToFloat32) \
+ V(BitcastInt64ToFloat64) \
V(Float32Add) \
V(Float32Sub) \
V(Float32Mul) \
#endif
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_BitcastDL:
+ __ mffprd(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kPPC_BitcastLD:
+ __ mtfprd(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+#endif
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
break;
V(PPC_DoubleInsertLowWord32) \
V(PPC_DoubleInsertHighWord32) \
V(PPC_DoubleConstruct) \
+ V(PPC_BitcastDL) \
+ V(PPC_BitcastLD) \
V(PPC_LoadWordS8) \
V(PPC_LoadWordU8) \
V(PPC_LoadWordS16) \
#endif
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kPPC_DoubleExtractLowWord32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kPPC_BitcastDL, node);
+}
+#endif
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_DoubleInsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kPPC_BitcastLD, node);
+}
+#endif
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kPPC_AddDouble, node);
}
Node* TruncateInt64ToInt32(Node* a) {
return NewNode(machine()->TruncateInt64ToInt32(), a);
}
+ Node* BitcastFloat32ToInt32(Node* a) {
+ return NewNode(machine()->BitcastFloat32ToInt32(), a);
+ }
+ Node* BitcastFloat64ToInt64(Node* a) {
+ return NewNode(machine()->BitcastFloat64ToInt64(), a);
+ }
+ Node* BitcastInt32ToFloat32(Node* a) {
+ return NewNode(machine()->BitcastInt32ToFloat32(), a);
+ }
+ Node* BitcastInt64ToFloat64(Node* a) {
+ return NewNode(machine()->BitcastInt64ToFloat64(), a);
+ }
Node* Float64RoundDown(Node* a) {
return NewNode(machine()->Float64RoundDown().op(), a);
}
}
+Type* Typer::Visitor::TypeBitcastFloat32ToInt32(Node* node) {
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeBitcastFloat64ToInt64(Node* node) {
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeBitcastInt32ToFloat32(Node* node) {
+ return Type::Number();
+}
+
+
+Type* Typer::Visitor::TypeBitcastInt64ToFloat64(Node* node) {
+ return Type::Number();
+}
+
+
Type* Typer::Visitor::TypeFloat32Add(Node* node) { return Type::Number(); }
case IrOpcode::kTruncateInt64ToInt32:
case IrOpcode::kTruncateFloat64ToFloat32:
case IrOpcode::kTruncateFloat64ToInt32:
+ case IrOpcode::kBitcastFloat32ToInt32:
+ case IrOpcode::kBitcastFloat64ToInt64:
+ case IrOpcode::kBitcastInt32ToFloat32:
+ case IrOpcode::kBitcastInt64ToFloat64:
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
case IrOpcode::kChangeInt32ToFloat64:
__ movsd(operand, i.InputDoubleRegister(index));
}
break;
+ case kX64BitcastFI:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ movl(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kX64BitcastDL:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ movq(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ movq(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kX64BitcastIF:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64BitcastLD:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movq(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ movsd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
case kX64Lea32: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
+ V(X64BitcastFI) \
+ V(X64BitcastDL) \
+ V(X64BitcastIF) \
+ V(X64BitcastLD) \
V(X64Lea32) \
V(X64Lea) \
V(X64Dec32) \
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastDL, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastLD, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
}
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
CHECK_EQ(write, buffer[0]);
CHECK_EQ(write, buffer[1]);
}
+
+
+TEST(RunBitcastInt64ToFloat64) {
+ // TODO(titzer): run int64 tests on all platforms when supported.
+ int64_t input = 1;
+ double output = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ m.StoreToPointer(
+ &output, kMachFloat64,
+ m.BitcastInt64ToFloat64(m.LoadFromPointer(&input, kMachInt64)));
+ m.Return(m.Int32Constant(11));
+ FOR_INT32_INPUTS(i) {
+ input = static_cast<int64_t>(*i) * 14444;
+ CHECK_EQ(11, m.Call());
+ double expected = bit_cast<double>(input);
+ CHECK_EQ(bit_cast<int64_t>(expected), bit_cast<int64_t>(output));
+ }
+}
+
+
+TEST(RunBitcastFloat64ToInt64) {
+ // TODO(titzer): run int64 tests on all platforms when supported.
+ double input = 0;
+ int64_t output = 0;
+ RawMachineAssemblerTester<int32_t> m;
+ m.StoreToPointer(
+ &output, kMachInt64,
+ m.BitcastFloat64ToInt64(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Return(m.Int32Constant(11));
+ FOR_FLOAT64_INPUTS(i) {
+ input = *i;
+ CHECK_EQ(11, m.Call());
+ double expected = bit_cast<int64_t>(input);
+ CHECK_EQ(expected, output);
+ }
+}
#endif
+
+
+TEST(RunBitcastFloat32ToInt32) {
+ float input = 32.25;
+ RawMachineAssemblerTester<int32_t> m;
+ m.Return(m.BitcastFloat32ToInt32(m.LoadFromPointer(&input, kMachFloat32)));
+ FOR_FLOAT32_INPUTS(i) {
+ input = *i;
+ int32_t expected = bit_cast<int32_t>(input);
+ CHECK_EQ(expected, m.Call());
+ }
+}
+
+
+TEST(RunBitcastInt32ToFloat32) {
+ int32_t input = 1;
+ float output = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ m.StoreToPointer(
+ &output, kMachFloat32,
+ m.BitcastInt32ToFloat32(m.LoadFromPointer(&input, kMachInt32)));
+ m.Return(m.Int32Constant(11));
+ FOR_INT32_INPUTS(i) {
+ input = *i;
+ CHECK_EQ(11, m.Call());
+ float expected = bit_cast<float>(input);
+ CHECK_EQ(bit_cast<int32_t>(expected), bit_cast<int32_t>(output));
+ }
+}