}
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
VisitRRFloat64(this, kArm64Float64Sqrt, node);
}
#include "src/compiler/common-operator-reducer.h"
+#include <algorithm>
+
#include "src/compiler/common-operator.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
namespace v8 {
namespace internal {
Reduction CommonOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kEffectPhi:
- case IrOpcode::kPhi: {
- int const input_count = node->InputCount();
- if (input_count > 1) {
- Node* const replacement = node->InputAt(0);
- for (int i = 1; i < input_count - 1; ++i) {
- if (node->InputAt(i) != replacement) return NoChange();
- }
- return Replace(replacement);
- }
+ return ReduceEffectPhi(node);
+ case IrOpcode::kPhi:
+ return ReducePhi(node);
+ case IrOpcode::kSelect:
+ return ReduceSelect(node);
+ default:
break;
+ }
+ return NoChange();
+}
+
+
+Reduction CommonOperatorReducer::ReduceEffectPhi(Node* node) {
+ DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
+ int const input_count = node->InputCount();
+ if (input_count > 1) {
+ Node* const replacement = node->InputAt(0);
+ for (int i = 1; i < input_count - 1; ++i) {
+ if (node->InputAt(i) != replacement) return NoChange();
}
- case IrOpcode::kSelect: {
- if (node->InputAt(1) == node->InputAt(2)) {
- return Replace(node->InputAt(1));
+ return Replace(replacement);
+ }
+ return NoChange();
+}
+
+
+Reduction CommonOperatorReducer::ReducePhi(Node* node) {
+ DCHECK_EQ(IrOpcode::kPhi, node->opcode());
+ int const input_count = node->InputCount();
+ if (input_count == 3) {
+ Node* vtrue = NodeProperties::GetValueInput(node, 0);
+ Node* vfalse = NodeProperties::GetValueInput(node, 1);
+ Node* merge = NodeProperties::GetControlInput(node);
+ Node* if_true = NodeProperties::GetControlInput(merge, 0);
+ Node* if_false = NodeProperties::GetControlInput(merge, 1);
+ if (if_true->opcode() != IrOpcode::kIfTrue) {
+ std::swap(if_true, if_false);
+ std::swap(vtrue, vfalse);
+ }
+ if (if_true->opcode() == IrOpcode::kIfTrue &&
+ if_false->opcode() == IrOpcode::kIfFalse &&
+ if_true->InputAt(0) == if_false->InputAt(0)) {
+ Node* branch = if_true->InputAt(0);
+ Node* cond = branch->InputAt(0);
+ if (cond->opcode() == IrOpcode::kFloat64LessThan) {
+ if (cond->InputAt(0) == vtrue && cond->InputAt(1) == vfalse &&
+ machine()->HasFloat64Min()) {
+ node->set_op(machine()->Float64Min());
+ node->ReplaceInput(0, vtrue);
+ node->ReplaceInput(1, vfalse);
+ node->TrimInputCount(2);
+ return Changed(node);
+ } else if (cond->InputAt(0) == vfalse && cond->InputAt(1) == vtrue &&
+ machine()->HasFloat64Max()) {
+ node->set_op(machine()->Float64Max());
+ node->ReplaceInput(0, vtrue);
+ node->ReplaceInput(1, vfalse);
+ node->TrimInputCount(2);
+ return Changed(node);
+ }
}
- break;
}
- default:
- break;
+ }
+ if (input_count > 1) {
+ Node* const replacement = node->InputAt(0);
+ for (int i = 1; i < input_count - 1; ++i) {
+ if (node->InputAt(i) != replacement) return NoChange();
+ }
+ return Replace(replacement);
+ }
+ return NoChange();
+}
+
+
+Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
+ DCHECK_EQ(IrOpcode::kSelect, node->opcode());
+ Node* cond = NodeProperties::GetValueInput(node, 0);
+ Node* vtrue = NodeProperties::GetValueInput(node, 1);
+ Node* vfalse = NodeProperties::GetValueInput(node, 2);
+ if (vtrue == vfalse) return Replace(vtrue);
+ if (cond->opcode() == IrOpcode::kFloat64LessThan) {
+ if (cond->InputAt(0) == vtrue && cond->InputAt(1) == vfalse &&
+ machine()->HasFloat64Min()) {
+ node->set_op(machine()->Float64Min());
+ node->ReplaceInput(0, vtrue);
+ node->ReplaceInput(1, vfalse);
+ node->TrimInputCount(2);
+ return Changed(node);
+ } else if (cond->InputAt(0) == vfalse && cond->InputAt(1) == vtrue &&
+ machine()->HasFloat64Max()) {
+ node->set_op(machine()->Float64Max());
+ node->ReplaceInput(0, vtrue);
+ node->ReplaceInput(1, vfalse);
+ node->TrimInputCount(2);
+ return Changed(node);
+ }
}
return NoChange();
}
+
+CommonOperatorBuilder* CommonOperatorReducer::common() const {
+ return jsgraph()->common();
+}
+
+
+Graph* CommonOperatorReducer::graph() const { return jsgraph()->graph(); }
+
+
+MachineOperatorBuilder* CommonOperatorReducer::machine() const {
+ return jsgraph()->machine();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
namespace internal {
namespace compiler {
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+class JSGraph;
+class MachineOperatorBuilder;
+
+
// Performs strength reduction on nodes that have common operators.
class CommonOperatorReducer FINAL : public Reducer {
public:
- CommonOperatorReducer() {}
+ explicit CommonOperatorReducer(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
~CommonOperatorReducer() FINAL {}
Reduction Reduce(Node* node) FINAL;
+
+ private:
+ Reduction ReduceEffectPhi(Node* node);
+ Reduction ReducePhi(Node* node);
+ Reduction ReduceSelect(Node* node);
+
+ CommonOperatorBuilder* common() const;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ MachineOperatorBuilder* machine() const;
+
+ JSGraph* const jsgraph_;
};
} // namespace compiler
case kSSEFloat64Div:
__ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
+ case kSSEFloat64Max:
+ __ maxsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
+ case kSSEFloat64Min:
+ __ minsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
case kSSEFloat64Mod: {
// TODO(dcarney): alignment is wrong.
__ sub(esp, Immediate(kDoubleSize));
i.InputOperand(1));
break;
}
+ case kAVXFloat64Max: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vmaxsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kAVXFloat64Min: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vminsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
case kIA32Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
+ V(SSEFloat64Max) \
+ V(SSEFloat64Min) \
V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \
V(SSECvtss2sd) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
+ V(AVXFloat64Max) \
+ V(AVXFloat64Min) \
V(IA32Movsxbl) \
V(IA32Movzxbl) \
V(IA32Movb) \
}
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ IA32OperandGenerator g(this);
+ if (IsSupported(AVX)) {
+ Emit(kAVXFloat64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ } else {
+ Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ IA32OperandGenerator g(this);
+ if (IsSupported(AVX)) {
+ Emit(kAVXFloat64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ } else {
+ Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ }
+}
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
IA32OperandGenerator g(this);
Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe;
if (CpuFeatures::IsSupported(SSE4_1)) {
flags |= MachineOperatorBuilder::kFloat64RoundDown |
case IrOpcode::kFloat64Mul:
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
+ case IrOpcode::kFloat64Max:
+ case IrOpcode::kFloat64Min:
case IrOpcode::kFloat64Sqrt:
case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
return MarkAsDouble(node), VisitFloat64Div(node);
case IrOpcode::kFloat64Mod:
return MarkAsDouble(node), VisitFloat64Mod(node);
+ case IrOpcode::kFloat64Min:
+ return MarkAsDouble(node), VisitFloat64Min(node);
+ case IrOpcode::kFloat64Max:
+ return MarkAsDouble(node), VisitFloat64Max(node);
case IrOpcode::kFloat64Sqrt:
return MarkAsDouble(node), VisitFloat64Sqrt(node);
case IrOpcode::kFloat64Equal:
V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Max, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Min, Operator::kNoProperties, 2, 0, 1) \
V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)
// for operations that are unsupported by some back-ends.
enum Flag {
kNoFlags = 0u,
- kFloat64RoundDown = 1u << 0,
- kFloat64RoundTruncate = 1u << 1,
- kFloat64RoundTiesAway = 1u << 2,
- kInt32DivIsSafe = 1u << 3,
- kUint32DivIsSafe = 1u << 4,
- kWord32ShiftIsSafe = 1u << 5
+ kFloat64Max = 1u << 0,
+ kFloat64Min = 1u << 1,
+ kFloat64RoundDown = 1u << 2,
+ kFloat64RoundTruncate = 1u << 3,
+ kFloat64RoundTiesAway = 1u << 4,
+ kInt32DivIsSafe = 1u << 5,
+ kUint32DivIsSafe = 1u << 6,
+ kWord32ShiftIsSafe = 1u << 7
};
typedef base::Flags<Flag, unsigned> Flags;
const Operator* Float64LessThan();
const Operator* Float64LessThanOrEqual();
+ // Floating point min/max complying to IEEE 754.
+ const Operator* Float64Max();
+ const Operator* Float64Min();
+ bool HasFloat64Max() { return flags_ & kFloat64Max; }
+ bool HasFloat64Min() { return flags_ & kFloat64Min; }
+
// Floating point rounding.
const Operator* Float64RoundDown();
const Operator* Float64RoundTruncate();
V(Float64Mul) \
V(Float64Div) \
V(Float64Mod) \
+ V(Float64Max) \
+ V(Float64Min) \
V(Float64Sqrt) \
V(Float64RoundDown) \
V(Float64RoundTruncate) \
JSTypedLowering typed_lowering(data->jsgraph(), temp_zone);
JSIntrinsicLowering intrinsic_lowering(data->jsgraph());
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer;
+ CommonOperatorReducer common_reducer(data->jsgraph());
GraphReducer graph_reducer(data->graph(), temp_zone);
AddReducer(data, &graph_reducer, &vn_reducer);
AddReducer(data, &graph_reducer, &builtin_reducer);
ValueNumberingReducer vn_reducer(temp_zone);
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
MachineOperatorReducer machine_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer;
+ CommonOperatorReducer common_reducer(data->jsgraph());
GraphReducer graph_reducer(data->graph(), temp_zone);
AddReducer(data, &graph_reducer, &vn_reducer);
AddReducer(data, &graph_reducer, &simple_reducer);
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
ChangeLowering lowering(data->jsgraph());
MachineOperatorReducer machine_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer;
+ CommonOperatorReducer common_reducer(data->jsgraph());
GraphReducer graph_reducer(data->graph(), temp_zone);
AddReducer(data, &graph_reducer, &vn_reducer);
AddReducer(data, &graph_reducer, &simple_reducer);
case IrOpcode::kFloat64Mul:
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
+ case IrOpcode::kFloat64Min:
return VisitFloat64Binop(node);
case IrOpcode::kFloat64Sqrt:
case IrOpcode::kFloat64RoundDown:
}
+Bounds Typer::Visitor::TypeFloat64Max(Node* node) {
+ return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Min(Node* node) {
+ return Bounds(Type::Number());
+}
+
+
Bounds Typer::Visitor::TypeFloat64Sqrt(Node* node) {
return Bounds(Type::Number());
}
case IrOpcode::kFloat64Mul:
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
+ case IrOpcode::kFloat64Max:
+ case IrOpcode::kFloat64Min:
case IrOpcode::kFloat64Sqrt:
case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
__ addq(rsp, Immediate(kDoubleSize));
break;
}
+ case kSSEFloat64Max:
+ ASSEMBLE_DOUBLE_BINOP(maxsd);
+ break;
+ case kSSEFloat64Min:
+ ASSEMBLE_DOUBLE_BINOP(minsd);
+ break;
case kSSEFloat64Sqrt:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
case kAVXFloat64Div:
ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
break;
+ case kAVXFloat64Max:
+ ASSEMBLE_AVX_DOUBLE_BINOP(vmaxsd);
+ break;
+ case kAVXFloat64Min:
+ ASSEMBLE_AVX_DOUBLE_BINOP(vminsd);
+ break;
case kX64Movsxbl:
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
V(SSEFloat64Mod) \
V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \
+ V(SSEFloat64Max) \
+ V(SSEFloat64Min) \
V(SSECvtss2sd) \
V(SSECvtsd2ss) \
V(SSEFloat64ToInt32) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
+ V(AVXFloat64Max) \
+ V(AVXFloat64Min) \
V(X64Movsxbl) \
V(X64Movzxbl) \
V(X64Movb) \
}
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ X64OperandGenerator g(this);
+ if (IsSupported(AVX)) {
+ Emit(kAVXFloat64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ } else {
+ Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ X64OperandGenerator g(this);
+ if (IsSupported(AVX)) {
+ Emit(kAVXFloat64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ } else {
+ Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ }
+}
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe;
if (CpuFeatures::IsSupported(SSE4_1)) {
flags |= MachineOperatorBuilder::kFloat64RoundDown |
}
+void Assembler::maxsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
void punpckldq(XMMRegister dst, XMMRegister src);
void punpckhdq(XMMRegister dst, XMMRegister src);
+ void maxsd(XMMRegister dst, XMMRegister src) { maxsd(dst, Operand(src)); }
+ void maxsd(XMMRegister dst, const Operand& src);
+ void minsd(XMMRegister dst, XMMRegister src) { minsd(dst, Operand(src)); }
+ void minsd(XMMRegister dst, const Operand& src);
+
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x5e, dst, src1, src2);
}
+ void vmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vmaxsd(dst, src1, Operand(src2));
+ }
+ void vmaxsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x5f, dst, src1, src2);
+ }
+ void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vminsd(dst, src1, Operand(src2));
+ }
+ void vminsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x5d, dst, src1, src2);
+ }
void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
// Prefetch src position into cache level.
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5d:
+ AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5e:
AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5f:
+ AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
default:
UnimplementedInstruction();
}
case 0x58: mnem = "addsd"; break;
case 0x59: mnem = "mulsd"; break;
case 0x5C: mnem = "subsd"; break;
+ case 0x5D:
+ mnem = "minsd";
+ break;
case 0x5E: mnem = "divsd"; break;
+ case 0x5F:
+ mnem = "maxsd";
+ break;
}
data += 3;
int mod, regop, rm;
}
+void Assembler::maxsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
void punpckldq(XMMRegister dst, XMMRegister src);
void punpckhdq(XMMRegister dst, XMMRegister src);
+ void maxsd(XMMRegister dst, XMMRegister src);
+ void maxsd(XMMRegister dst, const Operand& src);
+ void minsd(XMMRegister dst, XMMRegister src);
+ void minsd(XMMRegister dst, const Operand& src);
+
// SSE 4.1 instruction
void extractps(Register dst, XMMRegister src, byte imm8);
void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x5e, dst, src1, src2);
}
+ void vmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsd(0x5f, dst, src1, src2);
+ }
+ void vmaxsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x5f, dst, src1, src2);
+ }
+ void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsd(0x5d, dst, src1, src2);
+ }
+ void vminsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x5d, dst, src1, src2);
+ }
void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5d:
+ AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5e:
AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5f:
+ AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
default:
UnimplementedInstruction();
}
return "mulsd";
case 0x5A: // F2 prefix.
return "cvtsd2ss";
+ case 0x5D: // F2 prefix.
+ return "minsd";
case 0x5C: // F2 prefix.
return "subsd";
case 0x5E: // F2 prefix.
return "divsd";
+ case 0x5F: // F2 prefix.
+ return "maxsd";
case 0xA2:
return "cpuid";
case 0xA5:
__ subsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ divsd(xmm1, xmm0);
__ divsd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ minsd(xmm1, xmm0);
+ __ minsd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ maxsd(xmm1, xmm0);
+ __ maxsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ ucomisd(xmm0, xmm1);
__ cmpltsd(xmm0, xmm1);
__ vsubsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vdivsd(xmm0, xmm1, xmm2);
__ vdivsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vminsd(xmm0, xmm1, xmm2);
+ __ vminsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vmaxsd(xmm0, xmm1, xmm2);
+ __ vmaxsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
}
}
__ subsd(xmm1, Operand(rbx, rcx, times_4, 10000));
__ divsd(xmm1, xmm0);
__ divsd(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ minsd(xmm1, xmm0);
+ __ minsd(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ maxsd(xmm1, xmm0);
+ __ maxsd(xmm1, Operand(rbx, rcx, times_4, 10000));
__ ucomisd(xmm0, xmm1);
__ andpd(xmm0, xmm1);
__ vsubsd(xmm0, xmm1, xmm2);
__ vsubsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vdivsd(xmm0, xmm1, xmm2);
- __ vdivsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ vdivsd(xmm0, xmm1, Operand(rbx, rcx, times_2, 10000));
+ __ vminsd(xmm8, xmm1, xmm2);
+ __ vminsd(xmm9, xmm1, Operand(rbx, rcx, times_8, 10000));
+ __ vmaxsd(xmm8, xmm1, xmm2);
+ __ vmaxsd(xmm9, xmm1, Operand(rbx, rcx, times_1, 10000));
}
}
#include "src/compiler/common-operator.h"
#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/machine-type.h"
#include "src/compiler/operator.h"
#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
namespace v8 {
namespace internal {
class CommonOperatorReducerTest : public GraphTest {
public:
explicit CommonOperatorReducerTest(int num_parameters = 1)
- : GraphTest(num_parameters) {}
+ : GraphTest(num_parameters), machine_(zone()) {}
~CommonOperatorReducerTest() OVERRIDE {}
protected:
- Reduction Reduce(Node* node) {
- CommonOperatorReducer reducer;
+ Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kNoFlags) {
+ JSOperatorBuilder javascript(zone());
+ MachineOperatorBuilder machine(zone(), kMachPtr, flags);
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, &machine);
+ CommonOperatorReducer reducer(&jsgraph);
return reducer.Reduce(node);
}
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+ MachineOperatorBuilder machine_;
};
TRACED_FORRANGE(int, input_count, 2, kMaxInputs - 1) {
int const value_input_count = input_count - 1;
TRACED_FOREACH(MachineType, type, kMachineTypes) {
+ for (int i = 0; i < value_input_count; ++i) {
+ inputs[i] = graph()->start();
+ }
+ Node* merge = graph()->NewNode(common()->Merge(value_input_count),
+ value_input_count, inputs);
for (int i = 0; i < value_input_count; ++i) {
inputs[i] = input;
}
- inputs[value_input_count] = graph()->start();
+ inputs[value_input_count] = merge;
Reduction r = Reduce(graph()->NewNode(
common()->Phi(type, value_input_count), input_count, inputs));
ASSERT_TRUE(r.Changed());
}
+TEST_F(CommonOperatorReducerTest, PhiToFloat64MaxOrFloat64Min) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Reduction r1 =
+ Reduce(graph()->NewNode(common()->Phi(kMachFloat64, 2), p1, p0, merge),
+ MachineOperatorBuilder::kFloat64Max);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsFloat64Max(p1, p0));
+ Reduction r2 =
+ Reduce(graph()->NewNode(common()->Phi(kMachFloat64, 2), p0, p1, merge),
+ MachineOperatorBuilder::kFloat64Min);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsFloat64Min(p0, p1));
+}
+
+
// -----------------------------------------------------------------------------
// Select
}
}
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat64MaxOrFloat64Min) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
+ Reduction r1 =
+ Reduce(graph()->NewNode(common()->Select(kMachFloat64), check, p1, p0),
+ MachineOperatorBuilder::kFloat64Max);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsFloat64Max(p1, p0));
+ Reduction r2 =
+ Reduce(graph()->NewNode(common()->Select(kMachFloat64), check, p0, p1),
+ MachineOperatorBuilder::kFloat64Min);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsFloat64Min(p0, p1));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
PURE(Float64RoundTiesAway, 1, 0, 1), PURE(Float64ExtractLowWord32, 1, 0, 1),
PURE(Float64ExtractHighWord32, 1, 0, 1),
PURE(Float64InsertLowWord32, 2, 0, 1),
- PURE(Float64InsertHighWord32, 2, 0, 1)
+ PURE(Float64InsertHighWord32, 2, 0, 1), PURE(Float64Max, 2, 0, 1),
+ PURE(Float64Min, 2, 0, 1)
#undef PURE
};
IS_BINOP_MATCHER(Int32LessThan)
IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
+IS_BINOP_MATCHER(Float64Max)
+IS_BINOP_MATCHER(Float64Min)
IS_BINOP_MATCHER(Float64Sub)
IS_BINOP_MATCHER(Float64InsertLowWord32)
IS_BINOP_MATCHER(Float64InsertHighWord32)
Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Max(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat64Min(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);