// TODO(plind): Possibly avoid using these lithium names.
#define kScratchReg kLithiumScratchReg
#define kCompareReg kLithiumScratchReg2
+#define kScratchReg2 kLithiumScratchReg2
#define kScratchDoubleReg kLithiumScratchDouble
Register const result_;
};
+
+class OutOfLineRound : public OutOfLineCode {
+ public:
+ OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ // Handle rounding to zero case where sign has to be preserved.
+ // High bits of double input already in kScratchReg.
+ __ srl(at, kScratchReg, 31);
+ __ sll(at, at, 31);
+ __ Mthc1(at, result_);
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineTruncate FINAL : public OutOfLineRound {
+ public:
+ OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineFloor FINAL : public OutOfLineRound {
+ public:
+ OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineCeil FINAL : public OutOfLineRound {
+ public:
+ OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineRound(gen, result) {}
+};
+
} // namespace
} while (0)
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
+ do { \
+ auto ool = \
+ new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
+ Label done; \
+ __ Mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
+ __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
+ HeapNumber::kExponentBits); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, at, \
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
+ __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ Move(at, kScratchReg2, i.OutputDoubleRegister()); \
+ __ or_(at, at, kScratchReg2); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
+ __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ __ bind(ool->exit()); \
+ __ bind(&done); \
+ } while (0)
+
+
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
MipsOperandConverter i(this, instr);
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
+ case kMipsFloat64Floor: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+ break;
+ }
+ case kMipsFloat64Ceil: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+ break;
+ }
+ case kMipsFloat64RoundTruncate: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+ break;
+ }
case kMipsSqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
V(MipsDivD) \
V(MipsModD) \
V(MipsSqrtD) \
+ V(MipsFloat64Floor) \
+ V(MipsFloat64Ceil) \
+ V(MipsFloat64RoundTruncate) \
V(MipsCvtSD) \
V(MipsCvtDS) \
V(MipsTruncWD) \
}
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ MipsOperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
Node* node) {
MipsOperandGenerator g(selector);
}
-void InstructionSelector::VisitFloat64Floor(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+ VisitRR(this, kMipsFloat64Floor, node);
+}
-void InstructionSelector::VisitFloat64Ceil(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+ VisitRR(this, kMipsFloat64Ceil, node);
+}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- UNREACHABLE();
+ VisitRR(this, kMipsFloat64RoundTruncate, node);
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
+ if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+ return MachineOperatorBuilder::kFloat64Floor |
+ MachineOperatorBuilder::kFloat64Ceil |
+ MachineOperatorBuilder::kFloat64RoundTruncate;
+ }
return MachineOperatorBuilder::kNoFlags;
}
Register const result_;
};
+
+class OutOfLineRound : public OutOfLineCode {
+ public:
+ OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineCode(gen), result_(result) {}
+
+ void Generate() FINAL {
+ // Handle rounding to zero case where sign has to be preserved.
+ // High bits of double input already in kScratchReg.
+ __ dsrl(at, kScratchReg, 31);
+ __ dsll(at, at, 31);
+ __ mthc1(at, result_);
+ }
+
+ private:
+ DoubleRegister const result_;
+};
+
+
+class OutOfLineTruncate FINAL : public OutOfLineRound {
+ public:
+ OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineFloor FINAL : public OutOfLineRound {
+ public:
+ OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineCeil FINAL : public OutOfLineRound {
+ public:
+ OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
+ : OutOfLineRound(gen, result) {}
+};
+
+
} // namespace
} while (0)
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation) \
+ do { \
+ auto ool = \
+ new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister()); \
+ Label done; \
+ __ mfhc1(kScratchReg, i.InputDoubleRegister(0)); \
+ __ Ext(at, kScratchReg, HeapNumber::kExponentShift, \
+ HeapNumber::kExponentBits); \
+ __ Branch(USE_DELAY_SLOT, &done, hs, at, \
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
+ __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
+ __ dmfc1(at, i.OutputDoubleRegister()); \
+ __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg)); \
+ __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister()); \
+ __ bind(ool->exit()); \
+ __ bind(&done); \
+ } while (0)
+
+
// Assembles an instruction after register allocation, producing machine code.
void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
MipsOperandConverter i(this, instr);
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
- case kMips64FloorD: {
- __ floor_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ case kMips64Float64Floor: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
break;
}
- case kMips64CeilD: {
- __ ceil_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ case kMips64Float64Ceil: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
break;
}
- case kMips64RoundTruncateD: {
- __ trunc_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ case kMips64Float64RoundTruncate: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
break;
}
case kMips64SqrtD: {
V(Mips64DivD) \
V(Mips64ModD) \
V(Mips64SqrtD) \
- V(Mips64FloorD) \
- V(Mips64CeilD) \
- V(Mips64RoundTruncateD) \
+ V(Mips64Float64Floor) \
+ V(Mips64Float64Ceil) \
+ V(Mips64Float64RoundTruncate) \
V(Mips64CvtSD) \
V(Mips64CvtDS) \
V(Mips64TruncWD) \
void InstructionSelector::VisitFloat64Floor(Node* node) {
- VisitRR(this, kMips64FloorD, node);
+ VisitRR(this, kMips64Float64Floor, node);
}
void InstructionSelector::VisitFloat64Ceil(Node* node) {
- VisitRR(this, kMips64CeilD, node);
+ VisitRR(this, kMips64Float64Ceil, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- VisitRR(this, kMips64RoundTruncateD, node);
+ VisitRR(this, kMips64Float64RoundTruncate, node);
}
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kNoFlags;
+ return MachineOperatorBuilder::kFloat64Floor |
+ MachineOperatorBuilder::kFloat64Ceil |
+ MachineOperatorBuilder::kFloat64RoundTruncate;
}
} // namespace compiler