switch (constant.type()) {
case Constant::kInt32:
return Operand(constant.ToInt32());
+ case Constant::kFloat32:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
__ str(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst =
destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
- Constant src = g.ToConstant(source);
switch (src.type()) {
case Constant::kInt32:
__ mov(dst, Operand(src.ToInt32()));
case Constant::kInt64:
UNREACHABLE();
break;
+ case Constant::kFloat32:
+ __ Move(dst,
+ isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+ break;
case Constant::kFloat64:
__ Move(dst,
isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
break;
}
if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
- } else if (destination->IsDoubleRegister()) {
- DwVfpRegister result = g.ToDoubleRegister(destination);
- __ vmov(result, g.ToDouble(source));
+ } else if (src.type() == Constant::kFloat32) {
+ SwVfpRegister dst = destination->IsDoubleRegister()
+ ? g.ToFloat32Register(destination)
+ : kScratchDoubleReg.low();
+ // TODO(turbofan): Can we do better here?
+ __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ vmov(dst, ip);
+ if (destination->IsDoubleStackSlot()) {
+ __ vstr(dst, g.ToMemOperand(destination));
+ }
} else {
- DCHECK(destination->IsDoubleStackSlot());
- DwVfpRegister temp = kScratchDoubleReg;
- __ vmov(temp, g.ToDouble(source));
- __ vstr(temp, g.ToMemOperand(destination));
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ DwVfpRegister dst = destination->IsDoubleRegister()
+ ? g.ToFloat64Register(destination)
+ : kScratchDoubleReg;
+ __ vmov(dst, src.ToFloat64());
+ if (destination->IsDoubleStackSlot()) {
+ __ vstr(dst, g.ToMemOperand(destination));
+ }
}
} else if (source->IsDoubleRegister()) {
DwVfpRegister src = g.ToDoubleRegister(source);
return Operand(constant.ToInt32());
case Constant::kInt64:
return Operand(constant.ToInt64());
+ case Constant::kFloat32:
+ return Operand(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
__ Str(temp, g.ToMemOperand(destination, masm()));
}
} else if (source->IsConstant()) {
- ConstantOperand* constant_source = ConstantOperand::cast(source);
+ Constant src = g.ToConstant(ConstantOperand::cast(source));
if (destination->IsRegister() || destination->IsStackSlot()) {
UseScratchRegisterScope scope(masm());
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: scope.AcquireX();
- Constant src = g.ToConstant(source);
if (src.type() == Constant::kHeapObject) {
__ LoadObject(dst, src.ToHeapObject());
} else {
if (destination->IsStackSlot()) {
__ Str(dst, g.ToMemOperand(destination, masm()));
}
- } else if (destination->IsDoubleRegister()) {
- FPRegister result = g.ToDoubleRegister(destination);
- __ Fmov(result, g.ToDouble(constant_source));
+ } else if (src.type() == Constant::kFloat32) {
+ if (destination->IsDoubleRegister()) {
+ FPRegister dst = g.ToDoubleRegister(destination).S();
+ __ Fmov(dst, src.ToFloat32());
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireS();
+ __ Fmov(temp, src.ToFloat32());
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
} else {
- DCHECK(destination->IsDoubleStackSlot());
- UseScratchRegisterScope scope(masm());
- FPRegister temp = scope.AcquireD();
- __ Fmov(temp, g.ToDouble(constant_source));
- __ Str(temp, g.ToMemOperand(destination, masm()));
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ if (destination->IsDoubleRegister()) {
+ FPRegister dst = g.ToDoubleRegister(destination);
+ __ Fmov(dst, src.ToFloat64());
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ UseScratchRegisterScope scope(masm());
+ FPRegister temp = scope.AcquireD();
+ __ Fmov(temp, src.ToFloat64());
+ __ Str(temp, g.ToMemOperand(destination, masm()));
+ }
}
} else if (source->IsDoubleRegister()) {
FPRegister src = g.ToDoubleRegister(source);
switch (constant.type()) {
case Constant::kInt32:
return Immediate(constant.ToInt32());
+ case Constant::kFloat32:
+ return Immediate(
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64:
return Immediate(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
} else if (destination->IsStackSlot()) {
Operand dst = g.ToOperand(destination);
__ mov(dst, g.ToImmediate(source));
+ } else if (src_constant.type() == Constant::kFloat32) {
+ // TODO(turbofan): Can we do better here?
+ Immediate src(bit_cast<int32_t>(src_constant.ToFloat32()));
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ push(Immediate(src));
+ __ movss(dst, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize / 2));
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ mov(dst, src);
+ }
} else {
- double v = g.ToDouble(source);
+ DCHECK_EQ(Constant::kFloat64, src_constant.type());
+ double v = src_constant.ToFloat64();
uint64_t int_val = bit_cast<uint64_t, double>(v);
int32_t lower = static_cast<int32_t>(int_val);
int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
return Constant(OpParameter<int32_t>(node));
case IrOpcode::kInt64Constant:
return Constant(OpParameter<int64_t>(node));
- case IrOpcode::kNumberConstant:
+ case IrOpcode::kFloat32Constant:
+ return Constant(OpParameter<float>(node));
case IrOpcode::kFloat64Constant:
+ case IrOpcode::kNumberConstant:
return Constant(OpParameter<double>(node));
case IrOpcode::kExternalConstant:
return Constant(OpParameter<ExternalReference>(node));
// Return.
+TARGET_TEST_F(InstructionSelectorTest, ReturnFloat32Constant) {
+ const float kValue = 4.2f;
+ StreamBuilder m(this, kMachFloat32);
+ m.Return(m.Float32Constant(kValue));
+ Stream s = m.Build(kAllInstructions);
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+ ASSERT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
+ EXPECT_FLOAT_EQ(kValue, s.ToFloat32(s[0]->OutputAt(0)));
+ EXPECT_EQ(kArchRet, s[1]->arch_opcode());
+ EXPECT_EQ(1U, s[1]->InputCount());
+}
+
+
TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
StreamBuilder m(this, kMachInt32, kMachInt32);
m.Return(m.Parameter(0));
// -----------------------------------------------------------------------------
// Calls with deoptimization.
+
+
TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
kMachAnyTagged);
return references_.find(virtual_register) != references_.end();
}
+ float ToFloat32(const InstructionOperand* operand) const {
+ return ToConstant(operand).ToFloat32();
+ }
+
int32_t ToInt32(const InstructionOperand* operand) const {
return ToConstant(operand).ToInt32();
}
case IrOpcode::kInt64Constant:
case IrOpcode::kExternalConstant:
return VisitConstant(node);
+ case IrOpcode::kFloat32Constant:
+ return MarkAsDouble(node), VisitConstant(node);
case IrOpcode::kFloat64Constant:
return MarkAsDouble(node), VisitConstant(node);
case IrOpcode::kHeapConstant:
return os << constant.ToInt32();
case Constant::kInt64:
return os << constant.ToInt64() << "l";
+ case Constant::kFloat32:
+ return os << constant.ToFloat32() << "f";
case Constant::kFloat64:
return os << constant.ToFloat64();
case Constant::kExternalReference:
class Constant FINAL {
public:
- enum Type { kInt32, kInt64, kFloat64, kExternalReference, kHeapObject };
+ enum Type {
+ kInt32,
+ kInt64,
+ kFloat32,
+ kFloat64,
+ kExternalReference,
+ kHeapObject
+ };
explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
+ explicit Constant(float v) : type_(kFloat32), value_(bit_cast<int32_t>(v)) {}
explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
explicit Constant(ExternalReference ref)
: type_(kExternalReference), value_(bit_cast<intptr_t>(ref)) {}
return value_;
}
+ float ToFloat32() const {
+ DCHECK_EQ(kFloat32, type());
+ return bit_cast<float>(static_cast<int32_t>(value_));
+ }
+
double ToFloat64() const {
if (type() == kInt32) return ToInt32();
DCHECK_EQ(kFloat64, type());
Node* NumberConstant(double value) {
return NewNode(common()->NumberConstant(value));
}
+ Node* Float32Constant(float value) {
+ return NewNode(common()->Float32Constant(value));
+ }
Node* Float64Constant(double value) {
return NewNode(common()->Float64Constant(value));
}
case Constant::kInt64:
immediate.value = constant.ToInt64();
return immediate;
+ case Constant::kFloat32:
+ immediate.type = kImm64Handle;
+ immediate.handle =
+ isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED);
+ return immediate;
case Constant::kFloat64:
immediate.type = kImm64Handle;
immediate.handle =
case Constant::kInt32:
return Immediate(constant.ToInt32());
case Constant::kInt64:
+ case Constant::kFloat32:
case Constant::kFloat64:
case Constant::kExternalReference:
case Constant::kHeapObject:
}
} else if (source->IsConstant()) {
ConstantOperand* constant_source = ConstantOperand::cast(source);
+ Constant src = g.ToConstant(constant_source);
if (destination->IsRegister() || destination->IsStackSlot()) {
Register dst = destination->IsRegister() ? g.ToRegister(destination)
: kScratchRegister;
if (destination->IsStackSlot()) {
__ movq(g.ToOperand(destination), kScratchRegister);
}
+ } else if (src.type() == Constant::kFloat32) {
+ // TODO(turbofan): Can we do better here?
+ __ movl(kScratchRegister, Immediate(bit_cast<int32_t>(src.ToFloat32())));
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = g.ToDoubleRegister(destination);
+ __ movq(dst, kScratchRegister);
+ } else {
+ DCHECK(destination->IsDoubleStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ movl(dst, kScratchRegister);
+ }
} else {
- __ movq(kScratchRegister,
- bit_cast<uint64_t, double>(g.ToDouble(constant_source)));
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ __ movq(kScratchRegister, bit_cast<int64_t>(src.ToFloat64()));
if (destination->IsDoubleRegister()) {
__ movq(g.ToDoubleRegister(destination), kScratchRegister);
} else {
}
}
+
+TEST(RunFloat32Constant) {
+ FOR_FLOAT32_INPUTS(i) {
+ float expected = *i;
+ float actual = *i;
+ RawMachineAssemblerTester<int32_t> m;
+ m.StoreToPointer(&actual, kMachFloat32, m.Float32Constant(expected));
+ m.Return(m.Int32Constant(0));
+ CHECK_EQ(0, m.Call());
+ CHECK_EQ(expected, actual);
+ }
+}
+
#endif // V8_TURBOFAN_TARGET