break;
case kSSEFloat32Abs: {
// TODO(bmeurer): Use 128-bit constants.
- // TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33);
__ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
}
case kSSEFloat32Neg: {
// TODO(bmeurer): Use 128-bit constants.
- // TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 31);
__ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
}
case kSSEFloat64Abs: {
// TODO(bmeurer): Use 128-bit constants.
- // TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1);
__ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
}
case kSSEFloat64Neg: {
// TODO(bmeurer): Use 128-bit constants.
- // TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 63);
__ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
i.InputOperand(1));
break;
}
+ case kAVXFloat32Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 33);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ break;
+ }
+ case kAVXFloat32Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 31);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ break;
+ }
+ case kAVXFloat64Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 1);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ break;
+ }
+ case kAVXFloat64Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
+ break;
+ }
case kIA32Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
V(AVXFloat64Div) \
V(AVXFloat64Max) \
V(AVXFloat64Min) \
+ V(AVXFloat64Abs) \
+ V(AVXFloat64Neg) \
+ V(AVXFloat32Abs) \
+ V(AVXFloat32Neg) \
V(IA32Movsxbl) \
V(IA32Movzxbl) \
V(IA32Movb) \
}
}
+
+void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
+ ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ IA32OperandGenerator g(selector);
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
+ }
+}
+
+
} // namespace
IA32OperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsMinusZero()) {
- Emit(kSSEFloat32Neg, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()));
+ VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
+ kSSEFloat32Neg);
return;
}
VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
}
}
}
- Emit(kSSEFloat64Neg, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()));
+ VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
+ kSSEFloat64Neg);
return;
}
VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
void InstructionSelector::VisitFloat32Abs(Node* node) {
IA32OperandGenerator g(this);
- Emit(kSSEFloat32Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
IA32OperandGenerator g(this);
- Emit(kSSEFloat64Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
}
break;
case kSSEFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- // TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33);
__ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
}
case kSSEFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- // TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 31);
__ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
case kSSEFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- // TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1);
__ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
}
case kSSEFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- // TODO(turbofan): Add AVX version with relaxed register constraints.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 63);
__ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
case kAVXFloat64Min:
ASSEMBLE_AVX_BINOP(vminsd);
break;
+ case kAVXFloat32Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 33);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputDoubleRegister(0));
+ } else {
+ __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ }
+ break;
+ }
+ case kAVXFloat32Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 31);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputDoubleRegister(0));
+ } else {
+ __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ }
+ break;
+ }
+ case kAVXFloat64Abs: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psrlq(kScratchDoubleReg, 1);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputDoubleRegister(0));
+ } else {
+ __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ }
+ break;
+ }
+ case kAVXFloat64Neg: {
+ // TODO(bmeurer): Use RIP relative 128-bit constants.
+ __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ psllq(kScratchDoubleReg, 63);
+ CpuFeatureScope avx_scope(masm(), AVX);
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputDoubleRegister(0));
+ } else {
+ __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ }
+ break;
+ }
case kX64Movsxbl:
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
V(AVXFloat64Div) \
V(AVXFloat64Max) \
V(AVXFloat64Min) \
+ V(AVXFloat64Abs) \
+ V(AVXFloat64Neg) \
+ V(AVXFloat32Abs) \
+ V(AVXFloat32Neg) \
V(X64Movsxbl) \
V(X64Movzxbl) \
V(X64Movb) \
}
}
+
+void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
+ ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ X64OperandGenerator g(selector);
+ if (selector->IsSupported(AVX)) {
+ selector->Emit(avx_opcode, g.DefineAsRegister(node), g.Use(input));
+ } else {
+ selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
+ }
+}
+
+
} // namespace
X64OperandGenerator g(this);
Float32BinopMatcher m(node);
if (m.left().IsMinusZero()) {
- Emit(kSSEFloat32Neg, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()));
+ VisitFloatUnop(this, node, m.right().node(), kAVXFloat32Neg,
+ kSSEFloat32Neg);
return;
}
VisitFloatBinop(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
void InstructionSelector::VisitFloat32Abs(Node* node) {
X64OperandGenerator g(this);
- Emit(kSSEFloat32Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
}
}
}
}
- Emit(kSSEFloat64Neg, g.DefineSameAsFirst(node),
- g.UseRegister(m.right().node()));
+ VisitFloatUnop(this, node, m.right().node(), kAVXFloat64Neg,
+ kSSEFloat64Neg);
return;
}
VisitFloatBinop(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
void InstructionSelector::VisitFloat64Abs(Node* node) {
X64OperandGenerator g(this);
- Emit(kSSEFloat64Abs, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
}
}
+void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
+ const Operand& src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(src1, kL128, kNone, k0F, kWIG);
+ EMIT(op);
+ emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
+ const Operand& src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(src1, kL128, k66, k0F, kWIG);
+ EMIT(op);
+ emit_sse_operand(dst, src2);
+}
+
+
void Assembler::bmi1(byte op, Register reg, Register vreg, const Operand& rm) {
DCHECK(IsEnabled(BMI1));
EnsureSpace ensure_space(this);
}
void rorx(Register dst, const Operand& src, byte imm8);
+#define PACKED_OP_LIST(V) \
+ V(and, 0x54) \
+ V(xor, 0x57)
+
+#define AVX_PACKED_OP_DECLARE(name, opcode) \
+ void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vps(opcode, dst, src1, Operand(src2)); \
+ } \
+ void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
+ vps(opcode, dst, src1, src2); \
+ } \
+ void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vpd(opcode, dst, src1, Operand(src2)); \
+ } \
+ void v##name##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
+ vpd(opcode, dst, src1, src2); \
+ }
+
+ PACKED_OP_LIST(AVX_PACKED_OP_DECLARE);
+ void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal
default:
UnimplementedInstruction();
}
+ } else if (vex_none() && vex_0f()) {
+ int mod, regop, rm, vvvv = vex_vreg();
+ get_modrm(*current, &mod, ®op, &rm);
+ switch (opcode) {
+ case 0x54:
+ AppendToBuffer("vandps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x57:
+ AppendToBuffer("vxorps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ } else if (vex_66() && vex_0f()) {
+ int mod, regop, rm, vvvv = vex_vreg();
+ get_modrm(*current, &mod, ®op, &rm);
+ switch (opcode) {
+ case 0x54:
+ AppendToBuffer("vandpd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x57:
+ AppendToBuffer("vxorpd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
+ default:
+ UnimplementedInstruction();
+ }
} else {
UnimplementedInstruction();
}
}
+void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
+ const Operand& src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, kNone, k0F, kWIG);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
+ XMMRegister src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, k66, k0F, kWIG);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
+ const Operand& src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL128, k66, k0F, kWIG);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
+
void Assembler::vucomiss(XMMRegister dst, XMMRegister src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
void rorxl(Register dst, Register src, byte imm8);
void rorxl(Register dst, const Operand& src, byte imm8);
+#define PACKED_OP_LIST(V) \
+ V(and, 0x54) \
+ V(xor, 0x57)
+
+#define AVX_PACKED_OP_DECLARE(name, opcode) \
+ void v##name##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vps(opcode, dst, src1, src2); \
+ } \
+ void v##name##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
+ vps(opcode, dst, src1, src2); \
+ } \
+ void v##name##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vpd(opcode, dst, src1, src2); \
+ } \
+ void v##name##pd(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
+ vpd(opcode, dst, src1, src2); \
+ }
+
+ PACKED_OP_LIST(AVX_PACKED_OP_DECLARE);
+ void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vps(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
// Debugging
void Print();
int DisassemblerX64::AVXInstruction(byte* data) {
byte opcode = *data;
byte* current = data + 1;
- if (vex_0f() && opcode == 0x2e) {
- int mod, regop, rm;
- get_modrm(*current, &mod, ®op, &rm);
- AppendToBuffer("vucomis%c %s,", vex_66() ? 'd' : 's',
- NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
- } else if (vex_66() && vex_0f38()) {
+ if (vex_66() && vex_0f38()) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, ®op, &rm);
switch (opcode) {
default:
UnimplementedInstruction();
}
+ } else if (vex_none() && vex_0f()) {
+ int mod, regop, rm, vvvv = vex_vreg();
+ get_modrm(*current, &mod, ®op, &rm);
+ switch (opcode) {
+ case 0x2e:
+ AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x54:
+ AppendToBuffer("vandps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x57:
+ AppendToBuffer("vxorps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+ } else if (vex_66() && vex_0f()) {
+ int mod, regop, rm, vvvv = vex_vreg();
+ get_modrm(*current, &mod, ®op, &rm);
+ switch (opcode) {
+ case 0x2e:
+ AppendToBuffer("vucomisd %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x54:
+ AppendToBuffer("vandpd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x57:
+ AppendToBuffer("vxorpd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
+ default:
+ UnimplementedInstruction();
+ }
+
} else {
UnimplementedInstruction();
}
__ vminss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vmaxss(xmm0, xmm1, xmm2);
__ vmaxss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+
+ __ vandps(xmm0, xmm1, xmm2);
+ __ vandps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vxorps(xmm0, xmm1, xmm2);
+ __ vxorps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+
+ __ vandpd(xmm0, xmm1, xmm2);
+ __ vandpd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vxorpd(xmm0, xmm1, xmm2);
+ __ vxorpd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
}
}
__ vmaxsd(xmm9, xmm1, Operand(rbx, rcx, times_1, 10000));
__ vucomisd(xmm9, xmm1);
__ vucomisd(xmm8, Operand(rbx, rdx, times_2, 10981));
+
+ __ vandps(xmm0, xmm9, xmm2);
+ __ vandps(xmm9, xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ vxorps(xmm0, xmm1, xmm9);
+ __ vxorps(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+
+ __ vandpd(xmm0, xmm9, xmm2);
+ __ vandpd(xmm9, xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ vxorpd(xmm0, xmm1, xmm9);
+ __ vxorpd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
}
}
TEST_F(InstructionSelectorTest, Float32Abs) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Abs(p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
}
TEST_F(InstructionSelectorTest, Float64Abs) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Abs(p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
}
TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
}
TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
}
TEST_F(InstructionSelectorTest, Float32Abs) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Abs(p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
}
TEST_F(InstructionSelectorTest, Float64Abs) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Abs(p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
}
TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
- StreamBuilder m(this, kMachFloat32, kMachFloat32);
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
}
TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
- Node* const p0 = m.Parameter(0);
- Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
- m.Return(n);
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
- EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
- ASSERT_EQ(1U, s[0]->OutputCount());
- EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
- EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
}