__ MovFromFloatResult(i.OutputSingleRegister());
break;
}
+ case kMipsAbsS:
+ __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
case kMipsSqrtS: {
__ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
+ case kMipsAbsD:
+ __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kMipsSqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
V(MipsMulS) \
V(MipsDivS) \
V(MipsModS) \
+ V(MipsAbsS) \
V(MipsSqrtS) \
V(MipsCmpD) \
V(MipsAddD) \
V(MipsMulD) \
V(MipsDivD) \
V(MipsModD) \
+ V(MipsAbsD) \
V(MipsSqrtD) \
V(MipsFloat64RoundDown) \
V(MipsFloat64RoundTruncate) \
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitFloat32Abs(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kMipsAbsS, node);
+}
-void InstructionSelector::VisitFloat64Abs(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kMipsAbsD, node);
+}
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat32Abs | MachineOperatorBuilder::kFloat64Abs;
+
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
- return MachineOperatorBuilder::kFloat64RoundDown |
- MachineOperatorBuilder::kFloat64RoundTruncate;
+ flags |= MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat64RoundTruncate;
}
- return MachineOperatorBuilder::kNoFlags;
+ return flags;
}
} // namespace compiler
__ MovFromFloatResult(i.OutputSingleRegister());
break;
}
+ case kMips64AbsS:
+ __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
case kMips64SqrtS: {
__ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
+ case kMips64AbsD:
+ __ abs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kMips64SqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
V(Mips64MulS) \
V(Mips64DivS) \
V(Mips64ModS) \
+ V(Mips64AbsS) \
V(Mips64SqrtS) \
V(Mips64CmpD) \
V(Mips64AddD) \
V(Mips64MulD) \
V(Mips64DivD) \
V(Mips64ModD) \
+ V(Mips64AbsD) \
V(Mips64SqrtD) \
V(Mips64Float64RoundDown) \
V(Mips64Float64RoundTruncate) \
void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitFloat32Abs(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kMips64AbsS, node);
+}
-void InstructionSelector::VisitFloat64Abs(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kMips64AbsD, node);
+}
void InstructionSelector::VisitFloat32Sqrt(Node* node) {
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64RoundDown |
+ return MachineOperatorBuilder::kFloat32Abs |
+ MachineOperatorBuilder::kFloat64Abs |
+ MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate;
}
}
+void Assembler::abs_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
+}
+
+
void Assembler::abs_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
}
void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void abs_s(FPURegister fd, FPURegister fs);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
void neg_s(FPURegister fd, FPURegister fs);
}
+void Assembler::abs_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, ABS_D);
+}
+
+
void Assembler::abs_d(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
}
void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
void div_s(FPURegister fd, FPURegister fs, FPURegister ft);
void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void abs_s(FPURegister fd, FPURegister fs);
void abs_d(FPURegister fd, FPURegister fs);
void mov_d(FPURegister fd, FPURegister fs);
void neg_s(FPURegister fd, FPURegister fs);
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsAbsS, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsAbsD, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, kMachFloat32, kMachFloat32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64AbsS, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64AbsD, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8