#include "src/code-factory.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph-inl.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties-inl.h"
#include "src/compiler/representation-change.h"
#include "src/compiler/simplified-lowering.h"
return changer_->Float64OperatorFor(node->opcode());
}
+ bool CanLowerToInt32Binop(Node* node, MachineTypeUnion use) {
+ return BothInputsAre(node, Type::Signed32()) && !CanObserveNonInt32(use);
+ }
+
+ bool CanLowerToUint32Binop(Node* node, MachineTypeUnion use) {
+ return BothInputsAre(node, Type::Unsigned32()) && !CanObserveNonUint32(use);
+ }
+
+ bool CanObserveNonInt32(MachineTypeUnion use) {
+ return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
+ }
+
+ bool CanObserveMinusZero(MachineTypeUnion use) {
+ // TODO(turbofan): technically Uint32 cannot observe minus zero either.
+ return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
+ }
+
+ bool CanObserveNonUint32(MachineTypeUnion use) {
+ return (use & (kTypeInt32 | kTypeNumber | kTypeAny)) != 0;
+ }
+
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, MachineTypeUnion use,
case IrOpcode::kNumberSubtract: {
// Add and subtract reduce to Int32Add/Sub if the inputs
// are already integers and all uses are truncating.
- if (BothInputsAre(node, Type::Signed32()) &&
- (use & (kTypeUint32 | kTypeNumber | kTypeAny)) == 0) {
+ if (CanLowerToInt32Binop(node, use)) {
// => signed Int32Add/Sub
VisitInt32Binop(node);
if (lower()) node->set_op(Int32Op(node));
- } else if (BothInputsAre(node, Type::Unsigned32()) &&
- (use & (kTypeInt32 | kTypeNumber | kTypeAny)) == 0) {
+ } else if (CanLowerToUint32Binop(node, use)) {
// => unsigned Int32Add/Sub
VisitUint32Binop(node);
if (lower()) node->set_op(Uint32Op(node));
}
break;
}
- case IrOpcode::kNumberMultiply:
- case IrOpcode::kNumberDivide:
+ case IrOpcode::kNumberMultiply: {
+ NumberMatcher right(node->InputAt(1));
+ if (right.IsInRange(-1048576, 1048576)) { // must fit double mantissa.
+ if (CanLowerToInt32Binop(node, use)) {
+ // => signed Int32Mul
+ VisitInt32Binop(node);
+ if (lower()) node->set_op(Int32Op(node));
+ break;
+ }
+ }
+ // => Float64Mul
+ VisitFloat64Binop(node);
+ if (lower()) node->set_op(Float64Op(node));
+ break;
+ }
+ case IrOpcode::kNumberDivide: {
+ NumberMatcher right(node->InputAt(1));
+ if (right.HasValue() && !right.Is(0) && !right.Is(-1)) {
+ if (CanLowerToInt32Binop(node, use)) {
+ // => signed Int32Div
+ VisitInt32Binop(node);
+ if (lower()) node->set_op(Int32Op(node));
+ break;
+ } else if (CanLowerToUint32Binop(node, use)) {
+ // => unsigned Uint32Div
+ VisitUint32Binop(node);
+ if (lower()) node->set_op(Uint32Op(node));
+ break;
+ }
+ }
+ // => Float64Div
+ VisitFloat64Binop(node);
+ if (lower()) node->set_op(Float64Op(node));
+ break;
+ }
case IrOpcode::kNumberModulus: {
- // Float64Mul/Div/Mod
+ NumberMatcher right(node->InputAt(1));
+ if (right.HasValue() && !right.Is(0) && !right.Is(-1)) {
+ if (BothInputsAre(node, Type::Signed32()) &&
+ !CanObserveMinusZero(use)) {
+ // => signed Int32Mod
+ VisitInt32Binop(node);
+ if (lower()) node->set_op(Int32Op(node));
+ break;
+ } else if (BothInputsAre(node, Type::Unsigned32())) {
+ // => unsigned Uint32Mod
+ VisitUint32Binop(node);
+ if (lower()) node->set_op(Uint32Op(node));
+ break;
+ }
+ }
+ // => Float64Mod
VisitFloat64Binop(node);
if (lower()) node->set_op(Float64Op(node));
break;
MachineTypeUnion use_rep = use & kRepMask;
Node* input = node->InputAt(0);
MachineTypeUnion in = GetInfo(input)->output;
- if (NodeProperties::GetBounds(input).upper->Is(Type::Signed32()) ||
- (in & kTypeMask) == kTypeInt32 || (in & kRepMask) == kRepWord32) {
- // If the input has type int32, or is already a word32, just change
- // representation if necessary.
+ if (NodeProperties::GetBounds(input).upper->Is(Type::Signed32())) {
+ // If the input has type int32, pass through representation.
VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else if ((in & kTypeMask) == kTypeUint32 ||
+ (in & kTypeMask) == kTypeInt32 ||
+ (in & kRepMask) == kRepWord32) {
+ // Just change representation if necessary.
+ VisitUnop(node, kTypeInt32 | kRepWord32, kTypeInt32 | kRepWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
// Require the input in float64 format and perform truncation.
// TODO(turbofan): avoid a truncation with a smi check.
MachineTypeUnion use_rep = use & kRepMask;
Node* input = node->InputAt(0);
MachineTypeUnion in = GetInfo(input)->output;
- if (NodeProperties::GetBounds(input).upper->Is(Type::Unsigned32()) ||
- (in & kTypeMask) == kTypeUint32) {
- // If the input has type uint32, just change representation.
+ if (NodeProperties::GetBounds(input).upper->Is(Type::Unsigned32())) {
+ // If the input has type uint32, pass through representation.
VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else if ((in & kTypeMask) == kTypeUint32 ||
+ (in & kTypeMask) == kTypeInt32 ||
+ (in & kRepMask) == kRepWord32) {
+ // Just change representation if necessary.
+ VisitUnop(node, kTypeUint32 | kRepWord32, kTypeUint32 | kRepWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
// Require the input in float64 format and perform truncation.
// TODO(turbofan): avoid a truncation with a smi check.
}
void DeferReplacement(Node* node, Node* replacement) {
+ if (FLAG_trace_representation) {
+ TRACE(("defer replacement #%d:%s with #%d:%s\n", node->id(),
+ node->op()->mnemonic(), replacement->id(),
+ replacement->op()->mnemonic()));
+ }
if (replacement->id() < count_) {
// Replace with a previously existing node eagerly.
node->ReplaceUses(replacement);
Verifier::Run(this->graph());
}
+ void CheckNumberCall(double expected, double input) {
+ // TODO(titzer): make calls to NewNumber work in cctests.
+ if (expected <= Smi::kMinValue) return;
+ if (expected >= Smi::kMaxValue) return;
+ Handle<Object> num = factory()->NewNumber(input);
+ Object* result = this->Call(*num);
+ CHECK(factory()->NewNumber(expected)->SameValue(result));
+ }
+
Factory* factory() { return this->isolate()->factory(); }
Heap* heap() { return this->isolate()->heap(); }
};
t.GenerateCode();
FOR_INT32_INPUTS(i) {
- Handle<HeapNumber> num = t.factory()->NewHeapNumber(*i);
int32_t x = 0 - *i;
- // TODO(titzer): make calls to NewHeapNumber work in cctests.
- if (x <= Smi::kMinValue) continue;
- if (x >= Smi::kMaxValue) continue;
- Handle<HeapNumber> expected = t.factory()->NewHeapNumber(x);
- Object* result = t.Call(*num);
- CHECK(expected->SameValue(result));
+ t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
+ }
+ }
+}
+
+
+TEST(NumberMultiply_TruncatingToInt32) {
+ int32_t constants[] = {-100, -10, -1, 0, 1, 100, 1000};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ TestingGraph t(Type::Signed32());
+ Node* k = t.jsgraph.Constant(constants[i]);
+ Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mul);
+ t.Return(trunc);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kInt32Mul, mul->opcode());
+ }
+}
+
+
+TEST(RunNumberMultiply_TruncatingToInt32) {
+ int32_t constants[] = {-100, -10, -1, 0, 1, 100, 1000, 3000999};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ double k = static_cast<double>(constants[i]);
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ Node* num = t.NumberToInt32(t.Parameter(0));
+ Node* mul = t.NumberMultiply(num, t.jsgraph.Constant(k));
+ Node* trunc = t.NumberToInt32(mul);
+ t.Return(trunc);
+
+ if (Pipeline::SupportedTarget()) {
+ t.LowerAllNodesAndLowerChanges();
+ t.GenerateCode();
+
+ FOR_INT32_INPUTS(i) {
+ int32_t x = DoubleToInt32(static_cast<double>(*i) * k);
+ t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
+ }
+ }
+ }
+}
+
+
+TEST(RunNumberMultiply_TruncatingToUint32) {
+ uint32_t constants[] = {0, 1, 2, 3, 4, 100, 1000, 1024, 2048, 3000999};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ double k = static_cast<double>(constants[i]);
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ Node* num = t.NumberToUint32(t.Parameter(0));
+ Node* mul = t.NumberMultiply(num, t.jsgraph.Constant(k));
+ Node* trunc = t.NumberToUint32(mul);
+ t.Return(trunc);
+
+ if (Pipeline::SupportedTarget()) {
+ t.LowerAllNodesAndLowerChanges();
+ t.GenerateCode();
+
+ FOR_UINT32_INPUTS(i) {
+ uint32_t x = DoubleToUint32(static_cast<double>(*i) * k);
+ t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
+ }
}
}
}
if (Pipeline::SupportedTarget()) {
t.LowerAllNodesAndLowerChanges();
- {
- FILE* dot_file = fopen("/tmp/test.dot", "w+");
- OFStream dot_of(dot_file);
- dot_of << AsDOT(*t.jsgraph.graph());
- fclose(dot_file);
- }
t.GenerateCode();
FOR_UINT32_INPUTS(i) {
- Handle<HeapNumber> num =
- t.factory()->NewHeapNumber(static_cast<double>(*i));
- uint32_t x = *i / 2;
- // TODO(titzer): make calls to NewHeapNumber work in cctests.
- if (x >= static_cast<uint32_t>(Smi::kMaxValue)) continue;
- Handle<HeapNumber> expected =
- t.factory()->NewHeapNumber(static_cast<double>(x));
- Object* result = t.Call(*num);
- CHECK(expected->SameValue(result));
+ uint32_t x = DoubleToUint32(static_cast<double>(*i / 2.0));
+ t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
+ }
+ }
+}
+
+
+TEST(NumberMultiply_ConstantOutOfRange) {
+ TestingGraph t(Type::Signed32());
+ Node* k = t.jsgraph.Constant(1000000023);
+ Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mul);
+ t.Return(trunc);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kFloat64Mul, mul->opcode());
+}
+
+
+TEST(NumberMultiply_NonTruncating) {
+ TestingGraph t(Type::Signed32());
+ Node* k = t.jsgraph.Constant(111);
+ Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
+ t.Return(mul);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kFloat64Mul, mul->opcode());
+}
+
+
+TEST(NumberDivide_TruncatingToInt32) {
+ int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ TestingGraph t(Type::Signed32());
+ Node* k = t.jsgraph.Constant(constants[i]);
+ Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), div);
+ t.Return(trunc);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kInt32Div, div->opcode());
+ }
+}
+
+
+TEST(RunNumberDivide_TruncatingToInt32) {
+ int32_t constants[] = {-100, -10, -1, 1, 2, 100, 1000, 1024, 2048};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ int32_t k = constants[i];
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ Node* num = t.NumberToInt32(t.Parameter(0));
+ Node* div = t.NumberDivide(num, t.jsgraph.Constant(k));
+ Node* trunc = t.NumberToInt32(div);
+ t.Return(trunc);
+
+ if (Pipeline::SupportedTarget()) {
+ t.LowerAllNodesAndLowerChanges();
+ t.GenerateCode();
+
+ FOR_INT32_INPUTS(i) {
+ if (*i == INT_MAX) continue; // exclude max int.
+ int32_t x = DoubleToInt32(static_cast<double>(*i) / k);
+ t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
+ }
+ }
+ }
+}
+
+
+TEST(NumberDivide_TruncatingToUint32) {
+ double constants[] = {1, 3, 100, 1000, 100998348};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ TestingGraph t(Type::Unsigned32());
+ Node* k = t.jsgraph.Constant(constants[i]);
+ Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), div);
+ t.Return(trunc);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kUint32Div, div->opcode());
+ }
+}
+
+
+TEST(RunNumberDivide_TruncatingToUint32) {
+ uint32_t constants[] = {100, 10, 1, 1, 2, 4, 1000, 1024, 2048};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ uint32_t k = constants[i];
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ Node* num = t.NumberToUint32(t.Parameter(0));
+ Node* div = t.NumberDivide(num, t.jsgraph.Constant(static_cast<double>(k)));
+ Node* trunc = t.NumberToUint32(div);
+ t.Return(trunc);
+
+ if (Pipeline::SupportedTarget()) {
+ t.LowerAllNodesAndLowerChanges();
+ t.GenerateCode();
+
+ FOR_UINT32_INPUTS(i) {
+ uint32_t x = *i / k;
+ t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
+ }
}
}
}
+
+
+TEST(NumberDivide_BadConstants) {
+ int32_t constants[] = {-1, 0};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ TestingGraph t(Type::Signed32());
+ Node* k = t.jsgraph.Constant(constants[i]);
+ Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), div);
+ t.Return(trunc);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kFloat64Div, div->opcode());
+ }
+
+ {
+ TestingGraph t(Type::Unsigned32());
+ Node* k = t.jsgraph.Constant(0);
+ Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), div);
+ t.Return(trunc);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kFloat64Div, div->opcode());
+ }
+}
+
+
+TEST(NumberModulus_TruncatingToInt32) {
+ int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ TestingGraph t(Type::Signed32());
+ Node* k = t.jsgraph.Constant(constants[i]);
+ Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mod);
+ t.Return(trunc);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kInt32Mod, mod->opcode());
+ }
+}
+
+
+TEST(RunNumberModulus_TruncatingToInt32) {
+ int32_t constants[] = {-100, -10, -1, 1, 2, 100, 1000, 1024, 2048};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ int32_t k = constants[i];
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ Node* num = t.NumberToInt32(t.Parameter(0));
+ Node* mod = t.NumberModulus(num, t.jsgraph.Constant(k));
+ Node* trunc = t.NumberToInt32(mod);
+ t.Return(trunc);
+
+ if (Pipeline::SupportedTarget()) {
+ t.LowerAllNodesAndLowerChanges();
+ t.GenerateCode();
+
+ FOR_INT32_INPUTS(i) {
+ if (*i == INT_MAX) continue; // exclude max int.
+ int32_t x = DoubleToInt32(std::fmod(static_cast<double>(*i), k));
+ t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
+ }
+ }
+ }
+}
+
+
+TEST(NumberModulus_TruncatingToUint32) {
+ double constants[] = {1, 3, 100, 1000, 100998348};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ TestingGraph t(Type::Unsigned32());
+ Node* k = t.jsgraph.Constant(constants[i]);
+ Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), mod);
+ t.Return(trunc);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kUint32Mod, mod->opcode());
+ }
+}
+
+
+TEST(RunNumberModulus_TruncatingToUint32) {
+ uint32_t constants[] = {1, 2, 100, 1000, 1024, 2048};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ uint32_t k = constants[i];
+ SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+ Node* num = t.NumberToUint32(t.Parameter(0));
+ Node* mod =
+ t.NumberModulus(num, t.jsgraph.Constant(static_cast<double>(k)));
+ Node* trunc = t.NumberToUint32(mod);
+ t.Return(trunc);
+
+ if (Pipeline::SupportedTarget()) {
+ t.LowerAllNodesAndLowerChanges();
+ t.GenerateCode();
+
+ FOR_UINT32_INPUTS(i) {
+ uint32_t x = *i % k;
+ t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
+ }
+ }
+ }
+}
+
+
+TEST(NumberModulus_Int32) {
+ int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ TestingGraph t(Type::Signed32());
+ Node* k = t.jsgraph.Constant(constants[i]);
+ Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
+ t.Return(mod);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kFloat64Mod, mod->opcode()); // Pesky -0 behavior.
+ }
+}
+
+
+TEST(NumberModulus_Uint32) {
+ double constants[] = {1, 3, 100, 1000, 100998348};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ TestingGraph t(Type::Unsigned32());
+ Node* k = t.jsgraph.Constant(constants[i]);
+ Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
+ t.Return(mod);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kUint32Mod, mod->opcode());
+ }
+}
+
+
+TEST(NumberModulus_BadConstants) {
+ int32_t constants[] = {-1, 0};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ TestingGraph t(Type::Signed32());
+ Node* k = t.jsgraph.Constant(constants[i]);
+ Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mod);
+ t.Return(trunc);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kFloat64Mod, mod->opcode());
+ }
+
+ {
+ TestingGraph t(Type::Unsigned32());
+ Node* k = t.jsgraph.Constant(0);
+ Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), mod);
+ t.Return(trunc);
+ t.Lower();
+
+ CHECK_EQ(IrOpcode::kFloat64Mod, mod->opcode());
+ }
+}