case kArmVsqrtF32:
__ vsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
+ case kArmVabsF32:
+ __ vabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArmVnegF32:
__ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVsqrtF64:
__ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
+ case kArmVabsF64:
+ __ vabs(i.OutputFloat64Register(), i.InputFloat64Register(0));
+ break;
case kArmVnegF64:
__ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
}
-}
+} // NOLINT(readability/fn_size)
// Assembles branches after an instruction.
V(ArmVmlaF32) \
V(ArmVmlsF32) \
V(ArmVdivF32) \
+ V(ArmVabsF32) \
V(ArmVnegF32) \
V(ArmVsqrtF32) \
V(ArmVcmpF64) \
V(ArmVmlsF64) \
V(ArmVdivF64) \
V(ArmVmodF64) \
+ V(ArmVabsF64) \
V(ArmVnegF64) \
V(ArmVsqrtF64) \
V(ArmVrintmF64) \
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kArmVabsF32, node);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kArmVabsF64, node);
+}
+
+
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kArmVsqrtF32, node);
}
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat32Abs |
+ MachineOperatorBuilder::kFloat64Abs |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe;
__ Fmin(i.OutputFloat32Register(), i.InputFloat32Register(0),
i.InputFloat32Register(1));
break;
+ case kArm64Float32Abs:
+ __ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
+ break;
case kArm64Float32Sqrt:
__ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
__ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
+ case kArm64Float64Abs:
+ __ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Float64Neg:
__ Fneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
V(Arm64Float32Div) \
V(Arm64Float32Max) \
V(Arm64Float32Min) \
+ V(Arm64Float32Abs) \
V(Arm64Float32Sqrt) \
V(Arm64Float64Cmp) \
V(Arm64Float64Add) \
V(Arm64Float64Mod) \
V(Arm64Float64Max) \
V(Arm64Float64Min) \
+ V(Arm64Float64Abs) \
V(Arm64Float64Neg) \
V(Arm64Float64Sqrt) \
V(Arm64Float64RoundDown) \
}
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kArm64Float32Abs, node);
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kArm64Float64Abs, node);
+}
+
+
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kArm64Float32Sqrt, node);
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat32Max |
+ return MachineOperatorBuilder::kFloat32Abs |
+ MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat64Abs |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kFloat64RoundDown |
if (matcher.Matched()) {
if (matcher.IfTrue() == merge->InputAt(1)) std::swap(vtrue, vfalse);
Node* cond = matcher.Branch()->InputAt(0);
- if (cond->opcode() == IrOpcode::kFloat64LessThan) {
- if (cond->InputAt(0) == vtrue && cond->InputAt(1) == vfalse &&
+ if (cond->opcode() == IrOpcode::kFloat32LessThan) {
+ Float32BinopMatcher mcond(cond);
+ if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
+ vfalse->opcode() == IrOpcode::kFloat32Sub &&
+ machine()->HasFloat32Abs()) {
+ Float32BinopMatcher mvfalse(vfalse);
+ if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
+ return Change(node, machine()->Float32Abs(), vtrue);
+ }
+ }
+ if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
+ machine()->HasFloat32Min()) {
+ return Change(node, machine()->Float32Min(), vtrue, vfalse);
+ } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
+ machine()->HasFloat32Max()) {
+ return Change(node, machine()->Float32Max(), vtrue, vfalse);
+ }
+ } else if (cond->opcode() == IrOpcode::kFloat64LessThan) {
+ Float64BinopMatcher mcond(cond);
+ if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
+ vfalse->opcode() == IrOpcode::kFloat64Sub &&
+ machine()->HasFloat64Abs()) {
+ Float64BinopMatcher mvfalse(vfalse);
+ if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
+ return Change(node, machine()->Float64Abs(), vtrue);
+ }
+ }
+ if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
machine()->HasFloat64Min()) {
- node->set_op(machine()->Float64Min());
- node->ReplaceInput(0, vtrue);
- node->ReplaceInput(1, vfalse);
- node->TrimInputCount(2);
- return Changed(node);
- } else if (cond->InputAt(0) == vfalse && cond->InputAt(1) == vtrue &&
+ return Change(node, machine()->Float64Min(), vtrue, vfalse);
+ } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
machine()->HasFloat64Max()) {
- node->set_op(machine()->Float64Max());
- node->ReplaceInput(0, vtrue);
- node->ReplaceInput(1, vfalse);
- node->TrimInputCount(2);
- return Changed(node);
+ return Change(node, machine()->Float64Max(), vtrue, vfalse);
}
}
}
Node* vtrue = NodeProperties::GetValueInput(node, 1);
Node* vfalse = NodeProperties::GetValueInput(node, 2);
if (vtrue == vfalse) return Replace(vtrue);
- if (cond->opcode() == IrOpcode::kFloat64LessThan) {
- if (cond->InputAt(0) == vtrue && cond->InputAt(1) == vfalse &&
+ if (cond->opcode() == IrOpcode::kFloat32LessThan) {
+ Float32BinopMatcher mcond(cond);
+ if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
+ vfalse->opcode() == IrOpcode::kFloat32Sub &&
+ machine()->HasFloat32Abs()) {
+ Float32BinopMatcher mvfalse(vfalse);
+ if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
+ return Change(node, machine()->Float32Abs(), vtrue);
+ }
+ }
+ if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
+ machine()->HasFloat32Min()) {
+ return Change(node, machine()->Float32Min(), vtrue, vfalse);
+ } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
+ machine()->HasFloat32Max()) {
+ return Change(node, machine()->Float32Max(), vtrue, vfalse);
+ }
+ } else if (cond->opcode() == IrOpcode::kFloat64LessThan) {
+ Float64BinopMatcher mcond(cond);
+ if (mcond.left().Is(0.0) && mcond.right().Equals(vtrue) &&
+ vfalse->opcode() == IrOpcode::kFloat64Sub &&
+ machine()->HasFloat64Abs()) {
+ Float64BinopMatcher mvfalse(vfalse);
+ if (mvfalse.left().IsZero() && mvfalse.right().Equals(vtrue)) {
+ return Change(node, machine()->Float64Abs(), vtrue);
+ }
+ }
+ if (mcond.left().Equals(vtrue) && mcond.right().Equals(vfalse) &&
machine()->HasFloat64Min()) {
- node->set_op(machine()->Float64Min());
- node->ReplaceInput(0, vtrue);
- node->ReplaceInput(1, vfalse);
- node->TrimInputCount(2);
- return Changed(node);
- } else if (cond->InputAt(0) == vfalse && cond->InputAt(1) == vtrue &&
+ return Change(node, machine()->Float64Min(), vtrue, vfalse);
+ } else if (mcond.left().Equals(vfalse) && mcond.right().Equals(vtrue) &&
machine()->HasFloat64Max()) {
- node->set_op(machine()->Float64Max());
- node->ReplaceInput(0, vtrue);
- node->ReplaceInput(1, vfalse);
- node->TrimInputCount(2);
- return Changed(node);
+ return Change(node, machine()->Float64Max(), vtrue, vfalse);
}
}
return NoChange();
}
+Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
+ Node* a) {
+ node->set_op(op);
+ node->ReplaceInput(0, a);
+ node->TrimInputCount(1);
+ return Changed(node);
+}
+
+
+Reduction CommonOperatorReducer::Change(Node* node, Operator const* op, Node* a,
+ Node* b) {
+ node->set_op(op);
+ node->ReplaceInput(0, a);
+ node->ReplaceInput(1, b);
+ node->TrimInputCount(2);
+ return Changed(node);
+}
+
+
CommonOperatorBuilder* CommonOperatorReducer::common() const {
return jsgraph()->common();
}
class Graph;
class JSGraph;
class MachineOperatorBuilder;
+class Operator;
// Performs strength reduction on nodes that have common operators.
Reduction ReducePhi(Node* node);
Reduction ReduceSelect(Node* node);
+ Reduction Change(Node* node, Operator const* op, Node* a);
+ Reduction Change(Node* node, Operator const* op, Node* a, Node* b);
+
CommonOperatorBuilder* common() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
case kSSEFloat32Sqrt:
__ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
+ case kSSEFloat32Abs: {
+ // TODO(bmeurer): Use 128-bit constants.
+ // TODO(turbofan): Add AVX version with relaxed register constraints.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 33);
+ __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
case kSSEFloat32Neg: {
// TODO(bmeurer): Use 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
__ add(esp, Immediate(kDoubleSize));
break;
}
+ case kSSEFloat64Abs: {
+ // TODO(bmeurer): Use 128-bit constants.
+ // TODO(turbofan): Add AVX version with relaxed register constraints.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 1);
+ __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
case kSSEFloat64Neg: {
// TODO(bmeurer): Use 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
V(SSEFloat32Div) \
V(SSEFloat32Max) \
V(SSEFloat32Min) \
+ V(SSEFloat32Abs) \
V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \
V(SSEFloat64Cmp) \
V(SSEFloat64Mod) \
V(SSEFloat64Max) \
V(SSEFloat64Min) \
+ V(SSEFloat64Abs) \
V(SSEFloat64Neg) \
V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \
}
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat32Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitROFloat(this, node, kSSEFloat32Sqrt);
}
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat32Abs |
MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat64Abs |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe;
return MarkAsDouble(node), VisitFloat32Min(node);
case IrOpcode::kFloat32Max:
return MarkAsDouble(node), VisitFloat32Max(node);
+ case IrOpcode::kFloat32Abs:
+ return MarkAsDouble(node), VisitFloat32Abs(node);
case IrOpcode::kFloat32Sqrt:
return MarkAsDouble(node), VisitFloat32Sqrt(node);
case IrOpcode::kFloat32Equal:
return MarkAsDouble(node), VisitFloat64Min(node);
case IrOpcode::kFloat64Max:
return MarkAsDouble(node), VisitFloat64Max(node);
+ case IrOpcode::kFloat64Abs:
+ return MarkAsDouble(node), VisitFloat64Abs(node);
case IrOpcode::kFloat64Sqrt:
return MarkAsDouble(node), VisitFloat64Sqrt(node);
case IrOpcode::kFloat64Equal:
V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
V(Float32Mul, Operator::kCommutative, 2, 0, 1) \
V(Float32Div, Operator::kNoProperties, 2, 0, 1) \
+ V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(Float64Add, Operator::kCommutative, 2, 0, 1) \
V(Float64Sub, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mul, Operator::kCommutative, 2, 0, 1) \
V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
// for operations that are unsupported by some back-ends.
enum Flag {
kNoFlags = 0u,
- kFloat32Max = 1u << 0,
- kFloat32Min = 1u << 1,
- kFloat64Max = 1u << 2,
- kFloat64Min = 1u << 3,
- kFloat64RoundDown = 1u << 4,
- kFloat64RoundTruncate = 1u << 5,
- kFloat64RoundTiesAway = 1u << 6,
- kInt32DivIsSafe = 1u << 7,
- kUint32DivIsSafe = 1u << 8,
- kWord32ShiftIsSafe = 1u << 9
+ kFloat32Abs = 1u << 0,
+ kFloat32Max = 1u << 1,
+ kFloat32Min = 1u << 2,
+ kFloat64Abs = 1u << 3,
+ kFloat64Max = 1u << 4,
+ kFloat64Min = 1u << 5,
+ kFloat64RoundDown = 1u << 6,
+ kFloat64RoundTruncate = 1u << 7,
+ kFloat64RoundTiesAway = 1u << 8,
+ kInt32DivIsSafe = 1u << 9,
+ kUint32DivIsSafe = 1u << 10,
+ kWord32ShiftIsSafe = 1u << 11
};
typedef base::Flags<Flag, unsigned> Flags;
bool HasFloat64Max() { return flags_ & kFloat64Max; }
bool HasFloat64Min() { return flags_ & kFloat64Min; }
+ // Floating point abs complying to IEEE 754 (single-precision).
+ const Operator* Float32Abs();
+ bool HasFloat32Abs() const { return flags_ & kFloat32Abs; }
+
+ // Floating point abs complying to IEEE 754 (double-precision).
+ const Operator* Float64Abs();
+ bool HasFloat64Abs() const { return flags_ & kFloat64Abs; }
+
// Floating point rounding.
const Operator* Float64RoundDown();
const Operator* Float64RoundTruncate();
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Abs(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMipsSqrtS, node);
}
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Abs(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kMips64SqrtS, node);
}
}
Node* InputAt(int index) const { return node()->InputAt(index); }
+ bool Equals(const Node* node) const { return node_ == node; }
+
bool IsComparison() const;
#define DEFINE_IS_OPCODE(Opcode) \
return this->Is(0.0) && std::signbit(this->Value());
}
bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
+ bool IsZero() const { return this->Is(0.0) && !std::signbit(this->Value()); }
};
typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher;
V(Float32Div) \
V(Float32Max) \
V(Float32Min) \
+ V(Float32Abs) \
V(Float32Sqrt) \
V(Float64Add) \
V(Float64Sub) \
V(Float64Mod) \
V(Float64Max) \
V(Float64Min) \
+ V(Float64Abs) \
V(Float64Sqrt) \
V(Float64RoundDown) \
V(Float64RoundTruncate) \
}
+void InstructionSelector::VisitFloat32Abs(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Abs(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
VisitRR(this, kPPC_SqrtDouble, node);
}
Node* Float32Div(Node* a, Node* b) {
return NewNode(machine()->Float32Div(), a, b);
}
+ Node* Float32Abs(Node* a) { return NewNode(machine()->Float32Abs(), a); }
Node* Float32Sqrt(Node* a) { return NewNode(machine()->Float32Sqrt(), a); }
Node* Float32Equal(Node* a, Node* b) {
return NewNode(machine()->Float32Equal(), a, b);
Node* Float64Mod(Node* a, Node* b) {
return NewNode(machine()->Float64Mod(), a, b);
}
+ Node* Float64Abs(Node* a) { return NewNode(machine()->Float64Abs(), a); }
Node* Float64Sqrt(Node* a) { return NewNode(machine()->Float64Sqrt(), a); }
Node* Float64Equal(Node* a, Node* b) {
return NewNode(machine()->Float64Equal(), a, b);
case IrOpcode::kFloat64Mod:
case IrOpcode::kFloat64Min:
return VisitFloat64Binop(node);
+ case IrOpcode::kFloat64Abs:
case IrOpcode::kFloat64Sqrt:
case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
}
+Bounds Typer::Visitor::TypeFloat32Abs(Node* node) {
+ // TODO(turbofan): We should be able to infer a better type here.
+ return Bounds(Type::Number());
+}
+
+
Bounds Typer::Visitor::TypeFloat32Sqrt(Node* node) {
return Bounds(Type::Number());
}
}
+Bounds Typer::Visitor::TypeFloat64Abs(Node* node) {
+ // TODO(turbofan): We should be able to infer a better type here.
+ return Bounds(Type::Number());
+}
+
+
Bounds Typer::Visitor::TypeFloat64Sqrt(Node* node) {
return Bounds(Type::Number());
}
case IrOpcode::kFloat32Div:
case IrOpcode::kFloat32Max:
case IrOpcode::kFloat32Min:
+ case IrOpcode::kFloat32Abs:
case IrOpcode::kFloat32Sqrt:
case IrOpcode::kFloat32Equal:
case IrOpcode::kFloat32LessThan:
case IrOpcode::kFloat64Mod:
case IrOpcode::kFloat64Max:
case IrOpcode::kFloat64Min:
+ case IrOpcode::kFloat64Abs:
case IrOpcode::kFloat64Sqrt:
case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
case kSSEFloat32Div:
ASSEMBLE_SSE_BINOP(divss);
break;
+ case kSSEFloat32Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ // TODO(turbofan): Add AVX version with relaxed register constraints.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 33);
+ __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
case kSSEFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
case kSSEFloat64Min:
ASSEMBLE_SSE_BINOP(minsd);
break;
+ case kSSEFloat64Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ // TODO(turbofan): Add AVX version with relaxed register constraints.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 1);
+ __ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
+ break;
+ }
case kSSEFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
// TODO(turbofan): Add AVX version with relaxed register constraints.
V(SSEFloat32Sub) \
V(SSEFloat32Mul) \
V(SSEFloat32Div) \
+ V(SSEFloat32Abs) \
V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \
V(SSEFloat32Max) \
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
+ V(SSEFloat64Abs) \
V(SSEFloat64Neg) \
V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \
}
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat32Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat32Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat32Abs |
MachineOperatorBuilder::kFloat32Max |
MachineOperatorBuilder::kFloat32Min |
+ MachineOperatorBuilder::kFloat64Abs |
MachineOperatorBuilder::kFloat64Max |
MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe;
// ECMA 262 - 15.8.2.1
function MathAbs(x) {
x = +x;
- if (x > 0) return x;
- return 0 - x;
+ return (x > 0) ? x : 0 - x;
}
// ECMA 262 - 15.8.2.2
}
+TEST(RunFloat32Abs) {
+ float input = -1.0;
+ float result = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ if (!m.machine()->HasFloat32Abs()) return;
+ m.StoreToPointer(&result, kMachFloat32,
+ m.Float32Abs(m.LoadFromPointer(&input, kMachFloat32)));
+ m.Return(m.Int32Constant(0));
+ FOR_FLOAT32_INPUTS(i) {
+ input = *i;
+ float expected = std::abs(input);
+ CHECK_EQ(0, m.Call());
+ CheckFloatEq(expected, result);
+ }
+}
+
+
+TEST(RunFloat64Abs) {
+ double input = -1.0;
+ double result = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ if (!m.machine()->HasFloat64Abs()) return;
+ m.StoreToPointer(&result, kMachFloat64,
+ m.Float64Abs(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Return(m.Int32Constant(0));
+ FOR_FLOAT64_INPUTS(i) {
+ input = *i;
+ double expected = std::abs(input);
+ CHECK_EQ(0, m.Call());
+ CheckDoubleEq(expected, result);
+ }
+}
+
+
static double two_30 = 1 << 30; // 2^30 is a smi boundary.
static double two_52 = two_30 * (1 << 22); // 2^52 is a precision boundary.
static double kValues[] = {0.1,
static std::vector<double> float64_vector() {
static const double nan = std::numeric_limits<double>::quiet_NaN();
static const double values[] = {
- 0.125, 0.25, 0.375, 0.5,
- 1.25, -1.75, 2, 5.125,
- 6.25, 0.0, -0.0, 982983.25,
- 888, 2147483647.0, -999.75, 3.1e7,
- -2e66, 3e-88, -2147483648.0, V8_INFINITY,
- -V8_INFINITY, nan, 2147483647.375, 2147483647.75,
- 2147483648.0, 2147483648.25, 2147483649.25, -2147483647.0,
- -2147483647.125, -2147483647.875, -2147483648.25, -2147483649.5};
+ 0.125, 0.25, 0.375, 0.5, 1.25, -1.75, 2, 5.125, 6.25, 0.0, -0.0,
+ 982983.25, 888, 2147483647.0, -999.75, 3.1e7, -2e66, 3e-88,
+ -2147483648.0, V8_INFINITY, -V8_INFINITY, -nan, nan, 2147483647.375,
+ 2147483647.75, 2147483648.0, 2147483648.25, 2147483649.25,
+ -2147483647.0, -2147483647.125, -2147483647.875, -2147483648.25,
+ -2147483649.5};
return std::vector<double>(&values[0], &values[arraysize(values)]);
}
::testing::ValuesIn(kFAIs));
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVabsF32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVabsF64, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
{
StreamBuilder m(this, kMachFloat32, kMachFloat32, kMachFloat32,
}
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);
}
-TEST_F(CommonOperatorReducerTest, PhiToFloat64MaxOrFloat64Min) {
+TEST_F(CommonOperatorReducerTest, PhiToFloat32Abs) {
+ Node* p0 = Parameter(0);
+ Node* c0 = Float32Constant(0.0);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), c0, p0);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = p0;
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(machine()->Float32Sub(), c0, p0);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi =
+ graph()->NewNode(common()->Phi(kMachFloat32, 2), vtrue, vfalse, merge);
+ Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat32Abs);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Abs(p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, PhiToFloat64Abs) {
+ Node* p0 = Parameter(0);
+ Node* c0 = Float64Constant(0.0);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), c0, p0);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = p0;
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(machine()->Float64Sub(), c0, p0);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi =
+ graph()->NewNode(common()->Phi(kMachFloat64, 2), vtrue, vfalse, merge);
+ Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat64Abs);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, PhiToFloat32Max) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(kMachFloat32, 2), p1, p0, merge);
+ Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat32Max);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Max(p1, p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, PhiToFloat64Max) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(kMachFloat64, 2), p1, p0, merge);
+ Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat64Max);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Max(p1, p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, PhiToFloat32Min) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(kMachFloat32, 2), p0, p1, merge);
+ Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat32Min);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Min(p0, p1));
+}
+
+
+TEST_F(CommonOperatorReducerTest, PhiToFloat64Min) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
- Reduction r1 =
- Reduce(graph()->NewNode(common()->Phi(kMachFloat64, 2), p1, p0, merge),
- MachineOperatorBuilder::kFloat64Max);
- ASSERT_TRUE(r1.Changed());
- EXPECT_THAT(r1.replacement(), IsFloat64Max(p1, p0));
- Reduction r2 =
- Reduce(graph()->NewNode(common()->Phi(kMachFloat64, 2), p0, p1, merge),
- MachineOperatorBuilder::kFloat64Min);
- ASSERT_TRUE(r2.Changed());
- EXPECT_THAT(r2.replacement(), IsFloat64Min(p0, p1));
+ Node* phi = graph()->NewNode(common()->Phi(kMachFloat64, 2), p0, p1, merge);
+ Reduction r = Reduce(phi, MachineOperatorBuilder::kFloat64Min);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Min(p0, p1));
}
}
-TEST_F(CommonOperatorReducerTest, SelectToFloat64MaxOrFloat64Min) {
+TEST_F(CommonOperatorReducerTest, SelectToFloat32Abs) {
+ Node* p0 = Parameter(0);
+ Node* c0 = Float32Constant(0.0);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), c0, p0);
+ Node* select =
+ graph()->NewNode(common()->Select(kMachFloat32), check, p0,
+ graph()->NewNode(machine()->Float32Sub(), c0, p0));
+ Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Abs);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Abs(p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat64Abs) {
+ Node* p0 = Parameter(0);
+ Node* c0 = Float64Constant(0.0);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), c0, p0);
+ Node* select =
+ graph()->NewNode(common()->Select(kMachFloat64), check, p0,
+ graph()->NewNode(machine()->Float64Sub(), c0, p0));
+ Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Abs);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat32Max) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
+ Node* select =
+ graph()->NewNode(common()->Select(kMachFloat32), check, p1, p0);
+ Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Max);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Max(p1, p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat64Max) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
+ Node* select =
+ graph()->NewNode(common()->Select(kMachFloat64), check, p1, p0);
+ Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Max);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Max(p1, p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat32Min) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
+ Node* select =
+ graph()->NewNode(common()->Select(kMachFloat32), check, p0, p1);
+ Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Min);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Min(p0, p1));
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat64Min) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
- Reduction r1 =
- Reduce(graph()->NewNode(common()->Select(kMachFloat64), check, p1, p0),
- MachineOperatorBuilder::kFloat64Max);
- ASSERT_TRUE(r1.Changed());
- EXPECT_THAT(r1.replacement(), IsFloat64Max(p1, p0));
- Reduction r2 =
- Reduce(graph()->NewNode(common()->Select(kMachFloat64), check, p0, p1),
- MachineOperatorBuilder::kFloat64Min);
- ASSERT_TRUE(r2.Changed());
- EXPECT_THAT(r2.replacement(), IsFloat64Min(p0, p1));
+ Node* select =
+ graph()->NewNode(common()->Select(kMachFloat64), check, p0, p1);
+ Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Min);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Min(p0, p1));
}
} // namespace compiler
// Floating point operations.
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
{
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
PURE(TruncateFloat64ToInt32, 1, 0, 1), PURE(TruncateInt64ToInt32, 1, 0, 1),
PURE(Float32Add, 2, 0, 1), PURE(Float32Sub, 2, 0, 1),
PURE(Float32Mul, 2, 0, 1), PURE(Float32Div, 2, 0, 1),
- PURE(Float32Sqrt, 1, 0, 1), PURE(Float32Equal, 2, 0, 1),
- PURE(Float32LessThan, 2, 0, 1), PURE(Float32LessThanOrEqual, 2, 0, 1),
- PURE(Float32Max, 2, 0, 1), PURE(Float32Min, 2, 0, 1),
- PURE(Float64Add, 2, 0, 1), PURE(Float64Sub, 2, 0, 1),
- PURE(Float64Mul, 2, 0, 1), PURE(Float64Div, 2, 0, 1),
- PURE(Float64Mod, 2, 0, 1), PURE(Float64Sqrt, 1, 0, 1),
+ PURE(Float32Abs, 1, 0, 1), PURE(Float32Sqrt, 1, 0, 1),
+ PURE(Float32Equal, 2, 0, 1), PURE(Float32LessThan, 2, 0, 1),
+ PURE(Float32LessThanOrEqual, 2, 0, 1), PURE(Float32Max, 2, 0, 1),
+ PURE(Float32Min, 2, 0, 1), PURE(Float64Add, 2, 0, 1),
+ PURE(Float64Sub, 2, 0, 1), PURE(Float64Mul, 2, 0, 1),
+ PURE(Float64Div, 2, 0, 1), PURE(Float64Mod, 2, 0, 1),
+ PURE(Float64Abs, 1, 0, 1), PURE(Float64Sqrt, 1, 0, 1),
PURE(Float64Equal, 2, 0, 1), PURE(Float64LessThan, 2, 0, 1),
PURE(Float64LessThanOrEqual, 2, 0, 1), PURE(Float64Max, 2, 0, 1),
PURE(Float64Min, 2, 0, 1), PURE(LoadStackPointer, 0, 0, 1),
IS_BINOP_MATCHER(Int32LessThan)
IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
+IS_BINOP_MATCHER(Float32Max)
+IS_BINOP_MATCHER(Float32Min)
IS_BINOP_MATCHER(Float64Max)
IS_BINOP_MATCHER(Float64Min)
IS_BINOP_MATCHER(Float64Sub)
IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
IS_UNOP_MATCHER(TruncateFloat64ToInt32)
IS_UNOP_MATCHER(TruncateInt64ToInt32)
+IS_UNOP_MATCHER(Float32Abs)
+IS_UNOP_MATCHER(Float64Abs)
IS_UNOP_MATCHER(Float64Sqrt)
IS_UNOP_MATCHER(Float64RoundDown)
IS_UNOP_MATCHER(Float64RoundTruncate)
Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat32Max(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat32Min(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat32Abs(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64Max(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Min(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat64Abs(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundDown(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
// Floating point operations.
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
{
StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
}
+TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
StreamBuilder m(this, kMachFloat64, kMachFloat64);
Node* const p0 = m.Parameter(0);