case kSSEFloat32Div:
ASSEMBLE_SSE_BINOP(divss);
break;
+ case kSSEFloat32Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ // TODO(turbofan): Add AVX version with relaxed register constraints.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 31);
+ __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kSSEFloat32Sqrt:
+ ASSEMBLE_SSE_UNOP(sqrtss);
+ break;
case kSSEFloat32Max:
ASSEMBLE_SSE_BINOP(maxss);
break;
case kSSEFloat32Min:
ASSEMBLE_SSE_BINOP(minss);
break;
- case kSSEFloat32Sqrt:
- ASSEMBLE_SSE_UNOP(sqrtss);
- break;
case kSSEFloat32ToFloat64:
ASSEMBLE_SSE_UNOP(cvtss2sd);
break;
case kSSEFloat64Min:
ASSEMBLE_SSE_BINOP(minsd);
break;
+ case kSSEFloat64Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ // TODO(turbofan): Add AVX version with relaxed register constraints.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+ __ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
case kSSEFloat64Sqrt:
ASSEMBLE_SSE_UNOP(sqrtsd);
break;
V(SSEFloat32Sub) \
V(SSEFloat32Mul) \
V(SSEFloat32Div) \
+ V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \
V(SSEFloat32Max) \
V(SSEFloat32Min) \
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
+ V(SSEFloat64Neg) \
V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \
V(SSEFloat64Max) \
void InstructionSelector::VisitFloat32Sub(Node* node) {
+ X64OperandGenerator g(this);
+ Float32BinopMatcher m(node);
+ if (m.left().IsMinusZero()) {
+ Emit(kSSEFloat32Neg, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()));
+ return;
+ }
VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
void InstructionSelector::VisitFloat64Sub(Node* node) {
X64OperandGenerator g(this);
Float64BinopMatcher m(node);
- if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
- g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
- return;
+ if (m.left().IsMinusZero()) {
+ if (m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
+ g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
+ return;
+ }
}
}
+ Emit(kSSEFloat64Neg, g.DefineSameAsFirst(node),
+ g.UseRegister(m.right().node()));
+ return;
}
VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
}
}
+TEST(RunFloat32SubImm1) {
+ float input = 0.0f;
+ float output = 0.0f;
+
+ FOR_FLOAT32_INPUTS(i) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* t0 = m.LoadFromPointer(&input, kMachFloat32);
+ Node* t1 = m.Float32Sub(m.Float32Constant(*i), t0);
+ m.StoreToPointer(&output, kMachFloat32, t1);
+ m.Return(m.Int32Constant(0));
+ FOR_FLOAT32_INPUTS(j) {
+ input = *j;
+ float expected = *i - input;
+ CHECK_EQ(0, m.Call());
+ CheckFloatEq(expected, output);
+ }
+ }
+}
+
+
+TEST(RunFloat32SubImm2) {
+ float input = 0.0f;
+ float output = 0.0f;
+
+ FOR_FLOAT32_INPUTS(i) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* t0 = m.LoadFromPointer(&input, kMachFloat32);
+ Node* t1 = m.Float32Sub(t0, m.Float32Constant(*i));
+ m.StoreToPointer(&output, kMachFloat32, t1);
+ m.Return(m.Int32Constant(0));
+ FOR_FLOAT32_INPUTS(j) {
+ input = *j;
+ float expected = input - *i;
+ CHECK_EQ(0, m.Call());
+ CheckFloatEq(expected, output);
+ }
+ }
+}
+
+
TEST(RunFloat64SubP) {
RawMachineAssemblerTester<int32_t> m;
Float64BinopTester bt(&m);
// -----------------------------------------------------------------------------
// Loads and stores
+
namespace {
struct MemoryAccess {
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
+
// -----------------------------------------------------------------------------
// ChangeUint32ToUint64.
}
+TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
// -----------------------------------------------------------------------------
// Miscellaneous.