__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSECvtss2sd:
- __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSECvtsd2ss:
__ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
IA32OperandGenerator g(this);
- // TODO(turbofan): IA32 SSE conversions should take an operand.
- Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
}
break;
case kSSECvtss2sd:
- __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ if (instr->InputAt(0)->IsDoubleRegister()) {
+ __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ } else {
+ __ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
break;
case kSSECvtsd2ss:
if (instr->InputAt(0)->IsDoubleRegister()) {
void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
X64OperandGenerator g(this);
- // TODO(turbofan): X64 SSE conversions should take an operand.
- Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+ Emit(kSSECvtss2sd, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
}
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
+void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0xF3);
EMIT(0x0F);
void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
void cvtsi2sd(XMMRegister dst, const Operand& src);
- void cvtss2sd(XMMRegister dst, XMMRegister src);
+ void cvtss2sd(XMMRegister dst, const Operand& src);
+ void cvtss2sd(XMMRegister dst, XMMRegister src) {
+ cvtss2sd(dst, Operand(src));
+ }
void cvtsd2ss(XMMRegister dst, const Operand& src);
void cvtsd2ss(XMMRegister dst, XMMRegister src) {
cvtsd2ss(dst, Operand(src));
}
-
void addsd(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
}
+TEST(RunChangeFloat32ToFloat64_spilled) {
+ RawMachineAssemblerTester<int32_t> m;
+ const int kNumInputs = 32;
+ int32_t magic = 0x786234;
+ float input[kNumInputs];
+ double result[kNumInputs];
+ Node* input_node[kNumInputs];
+
+ for (int i = 0; i < kNumInputs; i++) {
+ input_node[i] =
+ m.Load(kMachFloat32, m.PointerConstant(&input), m.Int32Constant(i * 4));
+ }
+
+ for (int i = 0; i < kNumInputs; i++) {
+ m.Store(kMachFloat64, m.PointerConstant(&result), m.Int32Constant(i * 8),
+ m.ChangeFloat32ToFloat64(input_node[i]));
+ }
+
+ m.Return(m.Int32Constant(magic));
+
+ for (int i = 0; i < kNumInputs; i++) {
+ input[i] = 100.9f + i;
+ }
+
+ CHECK_EQ(magic, m.Call());
+
+ for (int i = 0; i < kNumInputs; i++) {
+ CHECK_EQ(result[i], static_cast<double>(input[i]));
+ }
+}
+
+
TEST(RunTruncateFloat64ToFloat32) {
float actual = 0.0f;
double input = 0.0;
{
__ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000));
__ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ cvtss2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ cvtss2sd(xmm1, xmm0);
__ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
// 128 bit move instructions.