__ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
+ case kArm64Float64Neg:
+ __ Fneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Float64Sqrt:
__ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
void InstructionSelector::VisitFloat64Sub(Node* node) {
Arm64OperandGenerator g(this);
Float64BinopMatcher m(node);
- if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
- CanCover(m.node(), m.right().node())) {
- if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
- CanCover(m.right().node(), m.right().InputAt(0))) {
- Float64BinopMatcher mright0(m.right().InputAt(0));
- if (mright0.left().IsMinusZero()) {
- Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
- g.UseRegister(mright0.right().node()));
- return;
+ if (m.left().IsMinusZero()) {
+ if (m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
}
}
+ Emit(kArm64Float64Neg, g.DefineAsRegister(node),
+ g.UseRegister(m.right().node()));
+ return;
}
VisitRRR(this, kArm64Float64Sub, node);
}
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+
+TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8