1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/compiler/access-builder.h"
8 #include "src/compiler/change-lowering.h"
9 #include "src/compiler/control-builders.h"
10 #include "src/compiler/graph-reducer.h"
11 #include "src/compiler/graph-visualizer.h"
12 #include "src/compiler/node-properties.h"
13 #include "src/compiler/pipeline.h"
14 #include "src/compiler/representation-change.h"
15 #include "src/compiler/simplified-lowering.h"
16 #include "src/compiler/source-position.h"
17 #include "src/compiler/typer.h"
18 #include "src/compiler/verifier.h"
19 #include "src/execution.h"
20 #include "src/parser.h"
21 #include "src/rewriter.h"
22 #include "src/scopes.h"
23 #include "test/cctest/cctest.h"
24 #include "test/cctest/compiler/codegen-tester.h"
25 #include "test/cctest/compiler/graph-builder-tester.h"
26 #include "test/cctest/compiler/value-helper.h"
28 using namespace v8::internal;
29 using namespace v8::internal::compiler;
31 template <typename ReturnType>
32 class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
34 SimplifiedLoweringTester(MachineType p0 = kMachNone,
35 MachineType p1 = kMachNone,
36 MachineType p2 = kMachNone,
37 MachineType p3 = kMachNone,
38 MachineType p4 = kMachNone)
39 : GraphBuilderTester<ReturnType>(p0, p1, p2, p3, p4),
40 typer(this->isolate(), this->graph(), MaybeHandle<Context>()),
41 javascript(this->zone()),
42 jsgraph(this->isolate(), this->graph(), this->common(), &javascript,
44 source_positions(jsgraph.graph()),
45 lowering(&jsgraph, this->zone(), &source_positions) {}
48 JSOperatorBuilder javascript;
50 SourcePositionTable source_positions;
51 SimplifiedLowering lowering;
53 void LowerAllNodes() {
56 lowering.LowerAllNodes();
59 void LowerAllNodesAndLowerChanges() {
62 lowering.LowerAllNodes();
64 ChangeLowering lowering(&jsgraph);
65 GraphReducer reducer(this->graph(), this->zone());
66 reducer.AddReducer(&lowering);
67 reducer.ReduceGraph();
68 Verifier::Run(this->graph());
71 void CheckNumberCall(double expected, double input) {
72 // TODO(titzer): make calls to NewNumber work in cctests.
73 if (expected <= Smi::kMinValue) return;
74 if (expected >= Smi::kMaxValue) return;
75 Handle<Object> num = factory()->NewNumber(input);
76 Object* result = this->Call(*num);
77 CHECK(factory()->NewNumber(expected)->SameValue(result));
80 Factory* factory() { return this->isolate()->factory(); }
81 Heap* heap() { return this->isolate()->heap(); }
85 // TODO(titzer): factor these tests out to test-run-simplifiedops.cc.
86 // TODO(titzer): test tagged representation for input to NumberToInt32.
87 TEST(RunNumberToInt32_float64) {
88 // TODO(titzer): explicit load/stores here are only because of representations
91 SimplifiedLoweringTester<Object*> t;
92 FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
94 Node* loaded = t.LoadField(load, t.PointerConstant(&input));
95 NodeProperties::SetBounds(loaded, Bounds(Type::Number()));
96 Node* convert = t.NumberToInt32(loaded);
97 FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Signed32(),
99 t.StoreField(store, t.PointerConstant(&result), convert);
100 t.Return(t.jsgraph.TrueConstant());
104 if (Pipeline::SupportedTarget()) {
105 FOR_FLOAT64_INPUTS(i) {
107 int32_t expected = DoubleToInt32(*i);
109 CHECK_EQ(expected, result);
115 // TODO(titzer): test tagged representation for input to NumberToUint32.
116 TEST(RunNumberToUint32_float64) {
117 // TODO(titzer): explicit load/stores here are only because of representations
120 SimplifiedLoweringTester<Object*> t;
121 FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
123 Node* loaded = t.LoadField(load, t.PointerConstant(&input));
124 NodeProperties::SetBounds(loaded, Bounds(Type::Number()));
125 Node* convert = t.NumberToUint32(loaded);
126 FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Unsigned32(),
128 t.StoreField(store, t.PointerConstant(&result), convert);
129 t.Return(t.jsgraph.TrueConstant());
133 if (Pipeline::SupportedTarget()) {
134 FOR_FLOAT64_INPUTS(i) {
136 uint32_t expected = DoubleToUint32(*i);
138 CHECK_EQ(static_cast<int32_t>(expected), static_cast<int32_t>(result));
144 // Create a simple JSObject with a unique map.
145 static Handle<JSObject> TestObject() {
146 static int index = 0;
148 v8::base::OS::SNPrintF(buffer, 50, "({'a_%d':1})", index++);
149 return Handle<JSObject>::cast(v8::Utils::OpenHandle(*CompileRun(buffer)));
154 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
155 FieldAccess access = AccessBuilder::ForMap();
156 Node* load = t.LoadField(access, t.Parameter(0));
162 if (Pipeline::SupportedTarget()) {
163 Handle<JSObject> src = TestObject();
164 Handle<Map> src_map(src->map());
165 Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
166 CHECK_EQ(*src_map, result);
172 SimplifiedLoweringTester<int32_t> t(kMachAnyTagged, kMachAnyTagged);
173 FieldAccess access = AccessBuilder::ForMap();
174 t.StoreField(access, t.Parameter(1), t.Parameter(0));
175 t.Return(t.jsgraph.TrueConstant());
180 if (Pipeline::SupportedTarget()) {
181 Handle<JSObject> src = TestObject();
182 Handle<Map> src_map(src->map());
183 Handle<JSObject> dst = TestObject();
184 CHECK(src->map() != dst->map());
185 t.Call(*src_map, *dst); // TODO(titzer): raw pointers in call
186 CHECK(*src_map == dst->map());
191 TEST(RunLoadProperties) {
192 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
193 FieldAccess access = AccessBuilder::ForJSObjectProperties();
194 Node* load = t.LoadField(access, t.Parameter(0));
200 if (Pipeline::SupportedTarget()) {
201 Handle<JSObject> src = TestObject();
202 Handle<FixedArray> src_props(src->properties());
203 Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
204 CHECK_EQ(*src_props, result);
209 TEST(RunLoadStoreMap) {
210 SimplifiedLoweringTester<Object*> t(kMachAnyTagged, kMachAnyTagged);
211 FieldAccess access = AccessBuilder::ForMap();
212 Node* load = t.LoadField(access, t.Parameter(0));
213 t.StoreField(access, t.Parameter(1), load);
219 if (Pipeline::SupportedTarget()) {
220 Handle<JSObject> src = TestObject();
221 Handle<Map> src_map(src->map());
222 Handle<JSObject> dst = TestObject();
223 CHECK(src->map() != dst->map());
224 Object* result = t.Call(*src, *dst); // TODO(titzer): raw pointers in call
225 CHECK(result->IsMap());
226 CHECK_EQ(*src_map, result);
227 CHECK(*src_map == dst->map());
232 TEST(RunLoadStoreFixedArrayIndex) {
233 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
234 ElementAccess access = AccessBuilder::ForFixedArrayElement();
235 Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
236 t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
242 if (Pipeline::SupportedTarget()) {
243 Handle<FixedArray> array = t.factory()->NewFixedArray(2);
244 Handle<JSObject> src = TestObject();
245 Handle<JSObject> dst = TestObject();
248 Object* result = t.Call(*array);
249 CHECK_EQ(*src, result);
250 CHECK_EQ(*src, array->get(0));
251 CHECK_EQ(*src, array->get(1));
256 TEST(RunLoadStoreArrayBuffer) {
257 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
258 const int index = 12;
259 const int array_length = 2 * index;
260 ElementAccess buffer_access =
261 AccessBuilder::ForTypedArrayElement(v8::kExternalInt8Array, true);
262 Node* backing_store = t.LoadField(
263 AccessBuilder::ForJSArrayBufferBackingStore(), t.Parameter(0));
265 t.LoadElement(buffer_access, backing_store, t.Int32Constant(index));
266 t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
268 t.Return(t.jsgraph.TrueConstant());
273 if (Pipeline::SupportedTarget()) {
274 Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
275 Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
276 uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
277 for (int i = 0; i < array_length; i++) {
281 // TODO(titzer): raw pointers in call
282 Object* result = t.Call(*array);
283 CHECK_EQ(t.isolate()->heap()->true_value(), result);
284 for (int i = 0; i < array_length; i++) {
285 uint8_t expected = i;
286 if (i == (index + 1)) expected = index;
287 CHECK_EQ(data[i], expected);
293 TEST(RunLoadFieldFromUntaggedBase) {
294 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
296 for (size_t i = 0; i < arraysize(smis); i++) {
297 int offset = static_cast<int>(i * sizeof(Smi*));
298 FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
299 Type::Integral32(), kMachAnyTagged};
301 SimplifiedLoweringTester<Object*> t;
302 Node* load = t.LoadField(access, t.PointerConstant(smis));
306 if (!Pipeline::SupportedTarget()) continue;
308 for (int j = -5; j <= 5; j++) {
309 Smi* expected = Smi::FromInt(j);
311 CHECK_EQ(expected, t.Call());
317 TEST(RunStoreFieldToUntaggedBase) {
318 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
320 for (size_t i = 0; i < arraysize(smis); i++) {
321 int offset = static_cast<int>(i * sizeof(Smi*));
322 FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
323 Type::Integral32(), kMachAnyTagged};
325 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
326 Node* p0 = t.Parameter(0);
327 t.StoreField(access, t.PointerConstant(smis), p0);
331 if (!Pipeline::SupportedTarget()) continue;
333 for (int j = -5; j <= 5; j++) {
334 Smi* expected = Smi::FromInt(j);
335 smis[i] = Smi::FromInt(-100);
336 CHECK_EQ(expected, t.Call(expected));
337 CHECK_EQ(expected, smis[i]);
343 TEST(RunLoadElementFromUntaggedBase) {
344 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
345 Smi::FromInt(4), Smi::FromInt(5)};
347 for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
348 for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
349 int offset = static_cast<int>(i * sizeof(Smi*));
350 ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
353 SimplifiedLoweringTester<Object*> t;
354 Node* load = t.LoadElement(access, t.PointerConstant(smis),
355 t.Int32Constant(static_cast<int>(j)));
359 if (!Pipeline::SupportedTarget()) continue;
361 for (int k = -5; k <= 5; k++) {
362 Smi* expected = Smi::FromInt(k);
363 smis[i + j] = expected;
364 CHECK_EQ(expected, t.Call());
371 TEST(RunStoreElementFromUntaggedBase) {
372 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
373 Smi::FromInt(4), Smi::FromInt(5)};
375 for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
376 for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
377 int offset = static_cast<int>(i * sizeof(Smi*));
378 ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
381 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
382 Node* p0 = t.Parameter(0);
383 t.StoreElement(access, t.PointerConstant(smis),
384 t.Int32Constant(static_cast<int>(j)), p0);
388 if (!Pipeline::SupportedTarget()) continue;
390 for (int k = -5; k <= 5; k++) {
391 Smi* expected = Smi::FromInt(k);
392 smis[i + j] = Smi::FromInt(-100);
393 CHECK_EQ(expected, t.Call(expected));
394 CHECK_EQ(expected, smis[i + j]);
397 // TODO(titzer): assert the contents of the array.
403 // A helper class for accessing fields and elements of various types, on both
404 // tagged and untagged base pointers. Contains both tagged and untagged buffers
405 // for testing direct memory access from generated code.
406 template <typename E>
407 class AccessTester : public HandleAndZoneScope {
411 E* original_elements;
414 Handle<ByteArray> tagged_array; // TODO(titzer): use FixedArray for tagged.
416 AccessTester(bool t, MachineType r, E* orig, size_t num)
419 original_elements(orig),
421 untagged_array(static_cast<E*>(malloc(ByteSize()))),
422 tagged_array(main_isolate()->factory()->NewByteArray(
423 static_cast<int>(ByteSize()))) {
427 ~AccessTester() { free(untagged_array); }
429 size_t ByteSize() { return num_elements * sizeof(E); }
431 // Nuke both {untagged_array} and {tagged_array} with {original_elements}.
432 void Reinitialize() {
433 memcpy(untagged_array, original_elements, ByteSize());
434 CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
435 E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
436 memcpy(raw, original_elements, ByteSize());
439 // Create and run code that copies the element in either {untagged_array}
440 // or {tagged_array} at index {from_index} to index {to_index}.
441 void RunCopyElement(int from_index, int to_index) {
442 // TODO(titzer): test element and field accesses where the base is not
443 // a constant in the code.
444 BoundsCheck(from_index);
445 BoundsCheck(to_index);
446 ElementAccess access = GetElementAccess();
448 SimplifiedLoweringTester<Object*> t;
449 Node* ptr = GetBaseNode(&t);
450 Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index));
451 t.StoreElement(access, ptr, t.Int32Constant(to_index), load);
452 t.Return(t.jsgraph.TrueConstant());
456 if (Pipeline::SupportedTarget()) {
457 Object* result = t.Call();
458 CHECK_EQ(t.isolate()->heap()->true_value(), result);
462 // Create and run code that copies the field in either {untagged_array}
463 // or {tagged_array} at index {from_index} to index {to_index}.
464 void RunCopyField(int from_index, int to_index) {
465 BoundsCheck(from_index);
466 BoundsCheck(to_index);
467 FieldAccess from_access = GetFieldAccess(from_index);
468 FieldAccess to_access = GetFieldAccess(to_index);
470 SimplifiedLoweringTester<Object*> t;
471 Node* ptr = GetBaseNode(&t);
472 Node* load = t.LoadField(from_access, ptr);
473 t.StoreField(to_access, ptr, load);
474 t.Return(t.jsgraph.TrueConstant());
478 if (Pipeline::SupportedTarget()) {
479 Object* result = t.Call();
480 CHECK_EQ(t.isolate()->heap()->true_value(), result);
484 // Create and run code that copies the elements from {this} to {that}.
485 void RunCopyElements(AccessTester<E>* that) {
486 // TODO(titzer): Rewrite this test without StructuredGraphBuilder support.
488 SimplifiedLoweringTester<Object*> t;
490 Node* one = t.Int32Constant(1);
491 Node* index = t.Int32Constant(0);
492 Node* limit = t.Int32Constant(static_cast<int>(num_elements));
493 t.environment()->Push(index);
494 Node* src = this->GetBaseNode(&t);
495 Node* dst = that->GetBaseNode(&t);
497 LoopBuilder loop(&t);
499 // Loop exit condition
500 index = t.environment()->Top();
501 Node* condition = t.Int32LessThan(index, limit);
502 loop.BreakUnless(condition);
503 // dst[index] = src[index]
504 index = t.environment()->Pop();
505 Node* load = t.LoadElement(this->GetElementAccess(), src, index);
506 t.StoreElement(that->GetElementAccess(), dst, index, load);
508 index = t.Int32Add(index, one);
509 t.environment()->Push(index);
514 index = t.environment()->Pop();
515 t.Return(t.jsgraph.TrueConstant());
519 if (Pipeline::SupportedTarget()) {
520 Object* result = t.Call();
521 CHECK_EQ(t.isolate()->heap()->true_value(), result);
526 E GetElement(int index) {
529 E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
532 return untagged_array[index];
537 ElementAccess GetElementAccess() {
538 ElementAccess access = {tagged ? kTaggedBase : kUntaggedBase,
539 tagged ? FixedArrayBase::kHeaderSize : 0,
544 FieldAccess GetFieldAccess(int field) {
545 int offset = field * sizeof(E);
546 FieldAccess access = {tagged ? kTaggedBase : kUntaggedBase,
547 offset + (tagged ? FixedArrayBase::kHeaderSize : 0),
548 Handle<Name>(), Type::Any(), rep};
552 template <typename T>
553 Node* GetBaseNode(SimplifiedLoweringTester<T>* t) {
554 return tagged ? t->HeapConstant(tagged_array)
555 : t->PointerConstant(untagged_array);
558 void BoundsCheck(int index) {
560 CHECK_LT(index, static_cast<int>(num_elements));
561 CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
566 template <typename E>
567 static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
568 int num_elements = static_cast<int>(num);
570 for (int taggedness = 0; taggedness < 2; taggedness++) {
571 AccessTester<E> a(taggedness == 1, rep, original_elements, num);
572 for (int field = 0; field < 2; field++) {
573 for (int i = 0; i < num_elements - 1; i++) {
576 a.RunCopyField(i, i + 1); // Test field read/write.
578 a.RunCopyElement(i, i + 1); // Test element read/write.
580 if (Pipeline::SupportedTarget()) { // verify.
581 for (int j = 0; j < num_elements; j++) {
583 j == (i + 1) ? original_elements[i] : original_elements[j];
584 CHECK_EQ(expect, a.GetElement(j));
591 for (int tf = 0; tf < 2; tf++) {
592 for (int tt = 0; tt < 2; tt++) {
593 AccessTester<E> a(tf == 1, rep, original_elements, num);
594 AccessTester<E> b(tt == 1, rep, original_elements, num);
595 a.RunCopyElements(&b);
596 if (Pipeline::SupportedTarget()) { // verify.
597 for (int i = 0; i < num_elements; i++) {
598 CHECK_EQ(a.GetElement(i), b.GetElement(i));
606 TEST(RunAccessTests_uint8) {
607 uint8_t data[] = {0x07, 0x16, 0x25, 0x34, 0x43, 0x99,
608 0xab, 0x78, 0x89, 0x19, 0x2b, 0x38};
609 RunAccessTest<uint8_t>(kMachInt8, data, arraysize(data));
613 TEST(RunAccessTests_uint16) {
614 uint16_t data[] = {0x071a, 0x162b, 0x253c, 0x344d, 0x435e, 0x7777};
615 RunAccessTest<uint16_t>(kMachInt16, data, arraysize(data));
619 TEST(RunAccessTests_int32) {
620 int32_t data[] = {-211, 211, 628347, 2000000000, -2000000000, -1, -100000034};
621 RunAccessTest<int32_t>(kMachInt32, data, arraysize(data));
625 #define V8_2PART_INT64(a, b) (((static_cast<int64_t>(a) << 32) + 0x##b##u))
628 TEST(RunAccessTests_int64) {
629 if (kPointerSize != 8) return;
630 int64_t data[] = {V8_2PART_INT64(0x10111213, 14151617),
631 V8_2PART_INT64(0x20212223, 24252627),
632 V8_2PART_INT64(0x30313233, 34353637),
633 V8_2PART_INT64(0xa0a1a2a3, a4a5a6a7),
634 V8_2PART_INT64(0xf0f1f2f3, f4f5f6f7)};
635 RunAccessTest<int64_t>(kMachInt64, data, arraysize(data));
639 TEST(RunAccessTests_float64) {
640 double data[] = {1.25, -1.25, 2.75, 11.0, 11100.8};
641 RunAccessTest<double>(kMachFloat64, data, arraysize(data));
645 TEST(RunAccessTests_Smi) {
646 Smi* data[] = {Smi::FromInt(-1), Smi::FromInt(-9),
647 Smi::FromInt(0), Smi::FromInt(666),
648 Smi::FromInt(77777), Smi::FromInt(Smi::kMaxValue)};
649 RunAccessTest<Smi*>(kMachAnyTagged, data, arraysize(data));
653 // Fills in most of the nodes of the graph in order to make tests shorter.
654 class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
657 JSOperatorBuilder javascript;
666 explicit TestingGraph(Type* p0_type, Type* p1_type = Type::None(),
667 Type* p2_type = Type::None())
668 : GraphAndBuilders(main_zone()),
669 typer(main_isolate(), graph(), MaybeHandle<Context>()),
670 javascript(main_zone()),
671 jsgraph(main_isolate(), graph(), common(), &javascript, machine()) {
672 start = graph()->NewNode(common()->Start(2));
673 graph()->SetStart(start);
675 graph()->NewNode(common()->Return(), jsgraph.Constant(0), start, start);
676 end = graph()->NewNode(common()->End(), ret);
677 graph()->SetEnd(end);
678 p0 = graph()->NewNode(common()->Parameter(0), start);
679 p1 = graph()->NewNode(common()->Parameter(1), start);
680 p2 = graph()->NewNode(common()->Parameter(2), start);
682 NodeProperties::SetBounds(p0, Bounds(p0_type));
683 NodeProperties::SetBounds(p1, Bounds(p1_type));
684 NodeProperties::SetBounds(p2, Bounds(p2_type));
687 void CheckLoweringBinop(IrOpcode::Value expected, const Operator* op) {
688 Node* node = Return(graph()->NewNode(op, p0, p1));
690 CHECK_EQ(expected, node->opcode());
693 void CheckLoweringTruncatedBinop(IrOpcode::Value expected, const Operator* op,
694 const Operator* trunc) {
695 Node* node = graph()->NewNode(op, p0, p1);
696 Return(graph()->NewNode(trunc, node));
698 CHECK_EQ(expected, node->opcode());
702 SourcePositionTable table(jsgraph.graph());
703 SimplifiedLowering(&jsgraph, jsgraph.zone(), &table).LowerAllNodes();
706 // Inserts the node as the return value of the graph.
707 Node* Return(Node* node) {
708 ret->ReplaceInput(0, node);
712 // Inserts the node as the effect input to the return of the graph.
713 void Effect(Node* node) { ret->ReplaceInput(1, node); }
715 Node* ExampleWithOutput(MachineType type) {
716 // TODO(titzer): use parameters with guaranteed representations.
717 if (type & kTypeInt32) {
718 return graph()->NewNode(machine()->Int32Add(), jsgraph.Int32Constant(1),
719 jsgraph.Int32Constant(1));
720 } else if (type & kTypeUint32) {
721 return graph()->NewNode(machine()->Word32Shr(), jsgraph.Int32Constant(1),
722 jsgraph.Int32Constant(1));
723 } else if (type & kRepFloat64) {
724 return graph()->NewNode(machine()->Float64Add(),
725 jsgraph.Float64Constant(1),
726 jsgraph.Float64Constant(1));
727 } else if (type & kRepBit) {
728 return graph()->NewNode(machine()->Word32Equal(),
729 jsgraph.Int32Constant(1),
730 jsgraph.Int32Constant(1));
731 } else if (type & kRepWord64) {
732 return graph()->NewNode(machine()->Int64Add(), Int64Constant(1),
735 CHECK(type & kRepTagged);
740 Node* ExampleWithTypeAndRep(Type* type, MachineType mach_type) {
741 FieldAccess access = {kUntaggedBase, 0, Handle<Name>::null(), type,
743 // TODO(titzer): using loads here just to force the representation is ugly.
744 Node* node = graph()->NewNode(simplified()->LoadField(access),
745 jsgraph.IntPtrConstant(0), graph()->start(),
747 NodeProperties::SetBounds(node, Bounds(type));
751 Node* Use(Node* node, MachineType type) {
752 if (type & kTypeInt32) {
753 return graph()->NewNode(machine()->Int32LessThan(), node,
754 jsgraph.Int32Constant(1));
755 } else if (type & kTypeUint32) {
756 return graph()->NewNode(machine()->Uint32LessThan(), node,
757 jsgraph.Int32Constant(1));
758 } else if (type & kRepFloat64) {
759 return graph()->NewNode(machine()->Float64Add(), node,
760 jsgraph.Float64Constant(1));
761 } else if (type & kRepWord64) {
762 return graph()->NewNode(machine()->Int64LessThan(), node,
764 } else if (type & kRepWord32) {
765 return graph()->NewNode(machine()->Word32Equal(), node,
766 jsgraph.Int32Constant(1));
768 return graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), node,
769 jsgraph.TrueConstant());
773 Node* Branch(Node* cond) {
774 Node* br = graph()->NewNode(common()->Branch(), cond, start);
775 Node* tb = graph()->NewNode(common()->IfTrue(), br);
776 Node* fb = graph()->NewNode(common()->IfFalse(), br);
777 Node* m = graph()->NewNode(common()->Merge(2), tb, fb);
778 NodeProperties::ReplaceControlInput(ret, m);
782 Node* Int64Constant(int64_t v) {
783 return graph()->NewNode(common()->Int64Constant(v));
786 SimplifiedOperatorBuilder* simplified() { return &main_simplified_; }
787 MachineOperatorBuilder* machine() { return &main_machine_; }
788 CommonOperatorBuilder* common() { return &main_common_; }
789 Graph* graph() { return main_graph_; }
793 TEST(LowerBooleanNot_bit_bit) {
794 // BooleanNot(x: kRepBit) used as kRepBit
795 TestingGraph t(Type::Boolean());
796 Node* b = t.ExampleWithOutput(kRepBit);
797 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
798 Node* use = t.Branch(inv);
800 Node* cmp = use->InputAt(0);
801 CHECK_EQ(t.machine()->Word32Equal()->opcode(), cmp->opcode());
802 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
803 Node* f = t.jsgraph.Int32Constant(0);
804 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
808 TEST(LowerBooleanNot_bit_tagged) {
809 // BooleanNot(x: kRepBit) used as kRepTagged
810 TestingGraph t(Type::Boolean());
811 Node* b = t.ExampleWithOutput(kRepBit);
812 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
813 Node* use = t.Use(inv, kRepTagged);
816 CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
817 Node* cmp = use->InputAt(0)->InputAt(0);
818 CHECK_EQ(t.machine()->Word32Equal()->opcode(), cmp->opcode());
819 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
820 Node* f = t.jsgraph.Int32Constant(0);
821 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
825 TEST(LowerBooleanNot_tagged_bit) {
826 // BooleanNot(x: kRepTagged) used as kRepBit
827 TestingGraph t(Type::Boolean());
829 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
830 Node* use = t.Branch(inv);
832 Node* cmp = use->InputAt(0);
833 CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
834 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
835 Node* f = t.jsgraph.FalseConstant();
836 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
840 TEST(LowerBooleanNot_tagged_tagged) {
841 // BooleanNot(x: kRepTagged) used as kRepTagged
842 TestingGraph t(Type::Boolean());
844 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
845 Node* use = t.Use(inv, kRepTagged);
848 CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
849 Node* cmp = use->InputAt(0)->InputAt(0);
850 CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
851 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
852 Node* f = t.jsgraph.FalseConstant();
853 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
857 TEST(LowerBooleanToNumber_bit_int32) {
858 // BooleanToNumber(x: kRepBit) used as kMachInt32
859 TestingGraph t(Type::Boolean());
860 Node* b = t.ExampleWithOutput(kRepBit);
861 Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
862 Node* use = t.Use(cnv, kMachInt32);
865 CHECK_EQ(b, use->InputAt(0));
869 TEST(LowerBooleanToNumber_tagged_int32) {
870 // BooleanToNumber(x: kRepTagged) used as kMachInt32
871 TestingGraph t(Type::Boolean());
873 Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
874 Node* use = t.Use(cnv, kMachInt32);
877 CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
878 CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
879 Node* c = t.jsgraph.TrueConstant();
880 CHECK(c == cnv->InputAt(0) || c == cnv->InputAt(1));
884 TEST(LowerBooleanToNumber_bit_tagged) {
885 // BooleanToNumber(x: kRepBit) used as kMachAnyTagged
886 TestingGraph t(Type::Boolean());
887 Node* b = t.ExampleWithOutput(kRepBit);
888 Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
889 Node* use = t.Use(cnv, kMachAnyTagged);
892 CHECK_EQ(b, use->InputAt(0)->InputAt(0));
893 CHECK_EQ(IrOpcode::kChangeInt32ToTagged, use->InputAt(0)->opcode());
897 TEST(LowerBooleanToNumber_tagged_tagged) {
898 // BooleanToNumber(x: kRepTagged) used as kMachAnyTagged
899 TestingGraph t(Type::Boolean());
901 Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
902 Node* use = t.Use(cnv, kMachAnyTagged);
905 CHECK_EQ(cnv, use->InputAt(0)->InputAt(0));
906 CHECK_EQ(IrOpcode::kChangeInt32ToTagged, use->InputAt(0)->opcode());
907 CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
908 CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
909 Node* c = t.jsgraph.TrueConstant();
910 CHECK(c == cnv->InputAt(0) || c == cnv->InputAt(1));
914 static Type* test_types[] = {Type::Signed32(), Type::Unsigned32(),
915 Type::Number(), Type::Any()};
918 TEST(LowerNumberCmp_to_int32) {
919 TestingGraph t(Type::Signed32(), Type::Signed32());
921 t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
922 t.CheckLoweringBinop(IrOpcode::kInt32LessThan,
923 t.simplified()->NumberLessThan());
924 t.CheckLoweringBinop(IrOpcode::kInt32LessThanOrEqual,
925 t.simplified()->NumberLessThanOrEqual());
929 TEST(LowerNumberCmp_to_uint32) {
930 TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
932 t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
933 t.CheckLoweringBinop(IrOpcode::kUint32LessThan,
934 t.simplified()->NumberLessThan());
935 t.CheckLoweringBinop(IrOpcode::kUint32LessThanOrEqual,
936 t.simplified()->NumberLessThanOrEqual());
940 TEST(LowerNumberCmp_to_float64) {
941 static Type* types[] = {Type::Number(), Type::Any()};
943 for (size_t i = 0; i < arraysize(types); i++) {
944 TestingGraph t(types[i], types[i]);
946 t.CheckLoweringBinop(IrOpcode::kFloat64Equal,
947 t.simplified()->NumberEqual());
948 t.CheckLoweringBinop(IrOpcode::kFloat64LessThan,
949 t.simplified()->NumberLessThan());
950 t.CheckLoweringBinop(IrOpcode::kFloat64LessThanOrEqual,
951 t.simplified()->NumberLessThanOrEqual());
956 TEST(LowerNumberAddSub_to_int32) {
957 HandleAndZoneScope scope;
958 Type* small_range = Type::Range(1, 10, scope.main_zone());
959 Type* large_range = Type::Range(-1e+13, 1e+14, scope.main_zone());
960 static Type* types[] = {Type::Signed32(), Type::Integral32(), small_range,
963 for (size_t i = 0; i < arraysize(types); i++) {
964 for (size_t j = 0; j < arraysize(types); j++) {
965 TestingGraph t(types[i], types[j]);
966 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
967 t.simplified()->NumberAdd(),
968 t.simplified()->NumberToInt32());
969 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
970 t.simplified()->NumberSubtract(),
971 t.simplified()->NumberToInt32());
977 TEST(LowerNumberAddSub_to_uint32) {
978 HandleAndZoneScope scope;
979 Type* small_range = Type::Range(1, 10, scope.main_zone());
980 Type* large_range = Type::Range(-1e+13, 1e+14, scope.main_zone());
981 static Type* types[] = {Type::Signed32(), Type::Integral32(), small_range,
984 for (size_t i = 0; i < arraysize(types); i++) {
985 for (size_t j = 0; j < arraysize(types); j++) {
986 TestingGraph t(types[i], types[j]);
987 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
988 t.simplified()->NumberAdd(),
989 t.simplified()->NumberToUint32());
990 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
991 t.simplified()->NumberSubtract(),
992 t.simplified()->NumberToUint32());
998 TEST(LowerNumberAddSub_to_float64) {
999 for (size_t i = 0; i < arraysize(test_types); i++) {
1000 TestingGraph t(test_types[i], test_types[i]);
1002 t.CheckLoweringBinop(IrOpcode::kFloat64Add, t.simplified()->NumberAdd());
1003 t.CheckLoweringBinop(IrOpcode::kFloat64Sub,
1004 t.simplified()->NumberSubtract());
1009 TEST(LowerNumberDivMod_to_float64) {
1010 for (size_t i = 0; i < arraysize(test_types); i++) {
1011 TestingGraph t(test_types[i], test_types[i]);
1013 t.CheckLoweringBinop(IrOpcode::kFloat64Div, t.simplified()->NumberDivide());
1014 if (!test_types[i]->Is(Type::Unsigned32())) {
1015 t.CheckLoweringBinop(IrOpcode::kFloat64Mod,
1016 t.simplified()->NumberModulus());
1022 static void CheckChangeOf(IrOpcode::Value change, Node* of, Node* node) {
1023 CHECK_EQ(change, node->opcode());
1024 CHECK_EQ(of, node->InputAt(0));
1028 TEST(LowerNumberToInt32_to_nop) {
1029 // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepTagged
1030 TestingGraph t(Type::Signed32());
1031 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
1032 Node* use = t.Use(trunc, kRepTagged);
1035 CHECK_EQ(t.p0, use->InputAt(0));
1039 TEST(LowerNumberToInt32_to_ChangeTaggedToFloat64) {
1040 // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepFloat64
1041 TestingGraph t(Type::Signed32());
1042 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
1043 Node* use = t.Use(trunc, kRepFloat64);
1046 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
1050 TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
1051 // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepWord32
1052 TestingGraph t(Type::Signed32());
1053 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
1054 Node* use = t.Use(trunc, kTypeInt32);
1057 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p0, use->InputAt(0));
1061 TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
1062 // NumberToInt32(x: kRepFloat64) used as kMachInt32
1063 TestingGraph t(Type::Number());
1064 Node* p0 = t.ExampleWithTypeAndRep(Type::Number(), kMachFloat64);
1065 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), p0);
1066 Node* use = t.Use(trunc, kMachInt32);
1069 CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
1073 TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32_with_change) {
1074 // NumberToInt32(x: kTypeNumber | kRepTagged) used as kMachInt32
1075 TestingGraph t(Type::Number());
1076 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
1077 Node* use = t.Use(trunc, kMachInt32);
1080 Node* node = use->InputAt(0);
1081 CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
1082 Node* of = node->InputAt(0);
1083 CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
1084 CHECK_EQ(t.p0, of->InputAt(0));
1088 TEST(LowerNumberToUint32_to_nop) {
1089 // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepTagged
1090 TestingGraph t(Type::Unsigned32());
1091 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
1092 Node* use = t.Use(trunc, kRepTagged);
1095 CHECK_EQ(t.p0, use->InputAt(0));
1099 TEST(LowerNumberToUint32_to_ChangeTaggedToFloat64) {
1100 // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
1101 TestingGraph t(Type::Unsigned32());
1102 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
1103 Node* use = t.Use(trunc, kRepFloat64);
1106 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
1110 TEST(LowerNumberToUint32_to_ChangeTaggedToUint32) {
1111 // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
1112 TestingGraph t(Type::Unsigned32());
1113 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
1114 Node* use = t.Use(trunc, kTypeUint32);
1117 CheckChangeOf(IrOpcode::kChangeTaggedToUint32, t.p0, use->InputAt(0));
1121 TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32) {
1122 // NumberToUint32(x: kRepFloat64) used as kMachUint32
1123 TestingGraph t(Type::Number());
1124 Node* p0 = t.ExampleWithOutput(kMachFloat64);
1125 // TODO(titzer): run the typer here, or attach machine type to param.
1126 NodeProperties::SetBounds(p0, Bounds(Type::Number()));
1127 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), p0);
1128 Node* use = t.Use(trunc, kMachUint32);
1131 CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
1135 TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_with_change) {
1136 // NumberToInt32(x: kTypeNumber | kRepTagged) used as kMachUint32
1137 TestingGraph t(Type::Number());
1138 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
1139 Node* use = t.Use(trunc, kMachUint32);
1142 Node* node = use->InputAt(0);
1143 CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
1144 Node* of = node->InputAt(0);
1145 CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
1146 CHECK_EQ(t.p0, of->InputAt(0));
1150 TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_uint32) {
1151 // NumberToUint32(x: kRepFloat64) used as kRepWord32
1152 TestingGraph t(Type::Unsigned32());
1153 Node* input = t.ExampleWithTypeAndRep(Type::Number(), kMachFloat64);
1154 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), input);
1155 Node* use = t.Use(trunc, kRepWord32);
1158 CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, input, use->InputAt(0));
1162 TEST(LowerNumberToUI32_of_Float64_used_as_word32) {
1163 // NumberTo(Int,Uint)32(x: kRepFloat64 | kType(Int,Uint)32) used as
1164 // kType(Int,Uint)32 | kRepWord32
1165 Type* types[] = {Type::Signed32(), Type::Unsigned32()};
1166 MachineType mach[] = {kTypeInt32, kTypeUint32, kMachNone};
1168 for (int i = 0; i < 2; i++) {
1169 for (int u = 0; u < 3; u++) {
1170 TestingGraph t(types[i]);
1171 Node* input = t.ExampleWithTypeAndRep(
1172 types[i], static_cast<MachineType>(kRepFloat64 | mach[i]));
1173 const Operator* op = i == 0 ? t.simplified()->NumberToInt32()
1174 : t.simplified()->NumberToUint32();
1175 Node* trunc = t.graph()->NewNode(op, input);
1176 Node* use = t.Use(trunc, static_cast<MachineType>(kRepWord32 | mach[u]));
1179 IrOpcode::Value opcode = i == 0 ? IrOpcode::kChangeFloat64ToInt32
1180 : IrOpcode::kChangeFloat64ToUint32;
1181 CheckChangeOf(opcode, input, use->InputAt(0));
1187 TEST(LowerNumberToUI32_of_Float64_used_as_tagged) {
1188 // NumberTo(Int,Uint)32(x: kRepFloat64 | kType(Int,Uint)32) used as
1189 // kType(Int,Uint)32 | kRepTagged
1190 Type* types[] = {Type::Signed32(), Type::Unsigned32(), Type::Any()};
1191 MachineType mach[] = {kTypeInt32, kTypeUint32, kMachNone};
1193 for (int i = 0; i < 2; i++) {
1194 for (int u = 0; u < 3; u++) {
1195 TestingGraph t(types[i]);
1196 Node* input = t.ExampleWithTypeAndRep(
1197 types[i], static_cast<MachineType>(kRepFloat64 | mach[i]));
1198 const Operator* op = i == 0 ? t.simplified()->NumberToInt32()
1199 : t.simplified()->NumberToUint32();
1200 Node* trunc = t.graph()->NewNode(op, input);
1201 // TODO(titzer): we use the store here to force the representation.
1202 FieldAccess access = {kTaggedBase, 0, Handle<Name>(), types[u],
1203 static_cast<MachineType>(mach[u] | kRepTagged)};
1204 Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
1205 trunc, t.start, t.start);
1208 CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, input, store->InputAt(2));
1214 TEST(LowerReferenceEqual_to_wordeq) {
1215 TestingGraph t(Type::Any(), Type::Any());
1216 IrOpcode::Value opcode =
1217 static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
1218 t.CheckLoweringBinop(opcode, t.simplified()->ReferenceEqual(Type::Any()));
1222 TEST(LowerStringOps_to_call_and_compare) {
1223 if (Pipeline::SupportedTarget()) {
1224 // These tests need linkage for the calls.
1225 TestingGraph t(Type::String(), Type::String());
1226 IrOpcode::Value compare_eq =
1227 static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
1228 IrOpcode::Value compare_lt =
1229 static_cast<IrOpcode::Value>(t.machine()->IntLessThan()->opcode());
1230 IrOpcode::Value compare_le = static_cast<IrOpcode::Value>(
1231 t.machine()->IntLessThanOrEqual()->opcode());
1232 t.CheckLoweringBinop(compare_eq, t.simplified()->StringEqual());
1233 t.CheckLoweringBinop(compare_lt, t.simplified()->StringLessThan());
1234 t.CheckLoweringBinop(compare_le, t.simplified()->StringLessThanOrEqual());
1235 t.CheckLoweringBinop(IrOpcode::kCall, t.simplified()->StringAdd());
1240 void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
1242 TestingGraph t(Type::Any());
1243 Node* in = t.ExampleWithOutput(from);
1244 Node* use = t.Use(in, to);
1247 CHECK_EQ(expected, use->InputAt(0)->opcode());
1248 CHECK_EQ(in, use->InputAt(0)->InputAt(0));
1252 TEST(InsertBasicChanges) {
1253 CheckChangeInsertion(IrOpcode::kChangeFloat64ToInt32, kRepFloat64,
1255 CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, kRepFloat64,
1257 CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, kRepTagged, kTypeInt32);
1258 CheckChangeInsertion(IrOpcode::kChangeTaggedToUint32, kRepTagged,
1261 CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, kRepFloat64,
1263 CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64, kRepTagged,
1266 CheckChangeInsertion(IrOpcode::kChangeInt32ToFloat64, kTypeInt32,
1268 CheckChangeInsertion(IrOpcode::kChangeInt32ToTagged, kTypeInt32, kRepTagged);
1270 CheckChangeInsertion(IrOpcode::kChangeUint32ToFloat64, kTypeUint32,
1272 CheckChangeInsertion(IrOpcode::kChangeUint32ToTagged, kTypeUint32,
1277 static void CheckChangesAroundBinop(TestingGraph* t, const Operator* op,
1278 IrOpcode::Value input_change,
1279 IrOpcode::Value output_change) {
1280 Node* binop = t->graph()->NewNode(op, t->p0, t->p1);
1283 CHECK_EQ(input_change, binop->InputAt(0)->opcode());
1284 CHECK_EQ(input_change, binop->InputAt(1)->opcode());
1285 CHECK_EQ(t->p0, binop->InputAt(0)->InputAt(0));
1286 CHECK_EQ(t->p1, binop->InputAt(1)->InputAt(0));
1287 CHECK_EQ(output_change, t->ret->InputAt(0)->opcode());
1288 CHECK_EQ(binop, t->ret->InputAt(0)->InputAt(0));
1292 TEST(InsertChangesAroundInt32Binops) {
1293 TestingGraph t(Type::Signed32(), Type::Signed32());
1295 const Operator* ops[] = {t.machine()->Int32Add(), t.machine()->Int32Sub(),
1296 t.machine()->Int32Mul(), t.machine()->Int32Div(),
1297 t.machine()->Int32Mod(), t.machine()->Word32And(),
1298 t.machine()->Word32Or(), t.machine()->Word32Xor(),
1299 t.machine()->Word32Shl(), t.machine()->Word32Sar()};
1301 for (size_t i = 0; i < arraysize(ops); i++) {
1302 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
1303 IrOpcode::kChangeInt32ToTagged);
1308 TEST(InsertChangesAroundInt32Cmp) {
1309 TestingGraph t(Type::Signed32(), Type::Signed32());
1311 const Operator* ops[] = {t.machine()->Int32LessThan(),
1312 t.machine()->Int32LessThanOrEqual()};
1314 for (size_t i = 0; i < arraysize(ops); i++) {
1315 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
1316 IrOpcode::kChangeBitToBool);
1321 TEST(InsertChangesAroundUint32Cmp) {
1322 TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
1324 const Operator* ops[] = {t.machine()->Uint32LessThan(),
1325 t.machine()->Uint32LessThanOrEqual()};
1327 for (size_t i = 0; i < arraysize(ops); i++) {
1328 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToUint32,
1329 IrOpcode::kChangeBitToBool);
1334 TEST(InsertChangesAroundFloat64Binops) {
1335 TestingGraph t(Type::Number(), Type::Number());
1337 const Operator* ops[] = {
1338 t.machine()->Float64Add(), t.machine()->Float64Sub(),
1339 t.machine()->Float64Mul(), t.machine()->Float64Div(),
1340 t.machine()->Float64Mod(),
1343 for (size_t i = 0; i < arraysize(ops); i++) {
1344 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
1345 IrOpcode::kChangeFloat64ToTagged);
1350 TEST(InsertChangesAroundFloat64Cmp) {
1351 TestingGraph t(Type::Number(), Type::Number());
1353 const Operator* ops[] = {t.machine()->Float64Equal(),
1354 t.machine()->Float64LessThan(),
1355 t.machine()->Float64LessThanOrEqual()};
1357 for (size_t i = 0; i < arraysize(ops); i++) {
1358 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
1359 IrOpcode::kChangeBitToBool);
1366 void CheckFieldAccessArithmetic(FieldAccess access, Node* load_or_store) {
1367 IntPtrMatcher mindex(load_or_store->InputAt(1));
1368 CHECK(mindex.Is(access.offset - access.tag()));
1372 Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
1373 Node* index = load_or_store->InputAt(1);
1374 if (kPointerSize == 8) {
1375 CHECK_EQ(IrOpcode::kChangeUint32ToUint64, index->opcode());
1376 index = index->InputAt(0);
1379 Int32BinopMatcher mindex(index);
1380 CHECK_EQ(IrOpcode::kInt32Add, mindex.node()->opcode());
1381 CHECK(mindex.right().Is(access.header_size - access.tag()));
1383 const int element_size_shift = ElementSizeLog2Of(access.machine_type);
1384 if (element_size_shift) {
1385 Int32BinopMatcher shl(mindex.left().node());
1386 CHECK_EQ(IrOpcode::kWord32Shl, shl.node()->opcode());
1387 CHECK(shl.right().Is(element_size_shift));
1388 return shl.left().node();
1390 return mindex.left().node();
1395 const MachineType kMachineReps[] = {kRepBit, kMachInt8, kMachInt16,
1396 kMachInt32, kMachInt64, kMachFloat64,
1402 TEST(LowerLoadField_to_load) {
1403 TestingGraph t(Type::Any(), Type::Signed32());
1405 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1406 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1407 Handle<Name>::null(), Type::Any(), kMachineReps[i]};
1410 t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
1411 Node* use = t.Use(load, kMachineReps[i]);
1414 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1415 CHECK_EQ(t.p0, load->InputAt(0));
1416 CheckFieldAccessArithmetic(access, load);
1418 MachineType rep = OpParameter<MachineType>(load);
1419 CHECK_EQ(kMachineReps[i], rep);
1424 TEST(LowerStoreField_to_store) {
1426 TestingGraph t(Type::Any(), Type::Signed32());
1428 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1429 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1430 Handle<Name>::null(), Type::Any(), kMachineReps[i]};
1433 Node* val = t.ExampleWithOutput(kMachineReps[i]);
1434 Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
1435 val, t.start, t.start);
1438 CHECK_EQ(IrOpcode::kStore, store->opcode());
1439 CHECK_EQ(val, store->InputAt(2));
1440 CheckFieldAccessArithmetic(access, store);
1442 StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
1443 if (kMachineReps[i] & kRepTagged) {
1444 CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
1446 CHECK_EQ(kMachineReps[i], rep.machine_type());
1450 TestingGraph t(Type::Any(),
1451 Type::Intersect(Type::SignedSmall(), Type::TaggedSigned()));
1452 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1453 Handle<Name>::null(), Type::Any(), kMachAnyTagged};
1454 Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
1455 t.p1, t.start, t.start);
1458 CHECK_EQ(IrOpcode::kStore, store->opcode());
1459 CHECK_EQ(t.p1, store->InputAt(2));
1460 StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
1461 CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
1466 TEST(LowerLoadElement_to_load) {
1467 TestingGraph t(Type::Any(), Type::Signed32());
1469 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1470 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1471 Type::Any(), kMachineReps[i]};
1473 Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
1474 t.p1, t.start, t.start);
1475 Node* use = t.Use(load, kMachineReps[i]);
1478 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1479 CHECK_EQ(t.p0, load->InputAt(0));
1480 CheckElementAccessArithmetic(access, load);
1482 MachineType rep = OpParameter<MachineType>(load);
1483 CHECK_EQ(kMachineReps[i], rep);
1488 TEST(LowerStoreElement_to_store) {
1490 TestingGraph t(Type::Any(), Type::Signed32());
1492 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1493 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1494 Type::Any(), kMachineReps[i]};
1496 Node* val = t.ExampleWithOutput(kMachineReps[i]);
1497 Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access),
1498 t.p0, t.p1, val, t.start, t.start);
1501 CHECK_EQ(IrOpcode::kStore, store->opcode());
1502 CHECK_EQ(val, store->InputAt(2));
1503 CheckElementAccessArithmetic(access, store);
1505 StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
1506 if (kMachineReps[i] & kRepTagged) {
1507 CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
1509 CHECK_EQ(kMachineReps[i], rep.machine_type());
1513 TestingGraph t(Type::Any(), Type::Signed32(),
1514 Type::Intersect(Type::SignedSmall(), Type::TaggedSigned()));
1515 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1516 Type::Any(), kMachAnyTagged};
1517 Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
1518 t.p1, t.p2, t.start, t.start);
1521 CHECK_EQ(IrOpcode::kStore, store->opcode());
1522 CHECK_EQ(t.p2, store->InputAt(2));
1523 StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
1524 CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
1529 TEST(InsertChangeForLoadElementIndex) {
1530 // LoadElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length) =>
1531 // Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
1532 TestingGraph t(Type::Any(), Type::Signed32());
1533 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
1536 Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
1537 t.p1, t.start, t.start);
1540 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1541 CHECK_EQ(t.p0, load->InputAt(0));
1543 Node* index = CheckElementAccessArithmetic(access, load);
1544 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, index);
1548 TEST(InsertChangeForStoreElementIndex) {
1549 // StoreElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length, val) =>
1550 // Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
1551 TestingGraph t(Type::Any(), Type::Signed32());
1552 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
1556 t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1,
1557 t.jsgraph.TrueConstant(), t.start, t.start);
1560 CHECK_EQ(IrOpcode::kStore, store->opcode());
1561 CHECK_EQ(t.p0, store->InputAt(0));
1563 Node* index = CheckElementAccessArithmetic(access, store);
1564 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, index);
1568 TEST(InsertChangeForLoadElement) {
1569 // TODO(titzer): test all load/store representation change insertions.
1570 TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
1571 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
1574 Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
1575 t.p1, t.start, t.start);
1578 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1579 CHECK_EQ(t.p0, load->InputAt(0));
1580 CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
1584 TEST(InsertChangeForLoadField) {
1585 // TODO(titzer): test all load/store representation change insertions.
1586 TestingGraph t(Type::Any(), Type::Signed32());
1587 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1588 Handle<Name>::null(), Type::Any(), kMachFloat64};
1591 t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
1594 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1595 CHECK_EQ(t.p0, load->InputAt(0));
1596 CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
1600 TEST(InsertChangeForStoreElement) {
1601 // TODO(titzer): test all load/store representation change insertions.
1602 TestingGraph t(Type::Any(), Type::Signed32());
1603 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
1607 t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
1608 t.jsgraph.Int32Constant(0), t.p1, t.start, t.start);
1612 CHECK_EQ(IrOpcode::kStore, store->opcode());
1613 CHECK_EQ(t.p0, store->InputAt(0));
1614 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
1618 TEST(InsertChangeForStoreField) {
1619 // TODO(titzer): test all load/store representation change insertions.
1620 TestingGraph t(Type::Any(), Type::Signed32());
1621 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1622 Handle<Name>::null(), Type::Any(), kMachFloat64};
1624 Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
1625 t.p1, t.start, t.start);
1629 CHECK_EQ(IrOpcode::kStore, store->opcode());
1630 CHECK_EQ(t.p0, store->InputAt(0));
1631 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
1636 TestingGraph t(Type::Any(), Type::Signed32());
1637 static const MachineType kMachineTypes[] = {kMachInt32, kMachUint32,
1639 Type* kTypes[] = {Type::Signed32(), Type::Unsigned32(), Type::Number()};
1641 for (size_t i = 0; i < arraysize(kMachineTypes); i++) {
1642 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1643 Handle<Name>::null(), kTypes[i], kMachineTypes[i]};
1646 t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
1648 t.graph()->NewNode(t.simplified()->LoadField(access), t.p1, t.start);
1649 Node* phi = t.graph()->NewNode(t.common()->Phi(kMachAnyTagged, 2), load0,
1651 t.Return(t.Use(phi, kMachineTypes[i]));
1654 CHECK_EQ(IrOpcode::kPhi, phi->opcode());
1655 CHECK_EQ(RepresentationOf(kMachineTypes[i]),
1656 RepresentationOf(OpParameter<MachineType>(phi)));
1661 TEST(RunNumberDivide_minus_1_TruncatingToInt32) {
1662 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1663 Node* num = t.NumberToInt32(t.Parameter(0));
1664 Node* div = t.NumberDivide(num, t.jsgraph.Constant(-1));
1665 Node* trunc = t.NumberToInt32(div);
1668 if (Pipeline::SupportedTarget()) {
1669 t.LowerAllNodesAndLowerChanges();
1672 FOR_INT32_INPUTS(i) {
1674 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1680 TEST(NumberMultiply_TruncatingToInt32) {
1681 int32_t constants[] = {-100, -10, -1, 0, 1, 100, 1000};
1683 for (size_t i = 0; i < arraysize(constants); i++) {
1684 TestingGraph t(Type::Signed32());
1685 Node* k = t.jsgraph.Constant(constants[i]);
1686 Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
1687 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mul);
1691 CHECK_EQ(IrOpcode::kInt32Mul, mul->opcode());
1696 TEST(RunNumberMultiply_TruncatingToInt32) {
1697 int32_t constants[] = {-100, -10, -1, 0, 1, 100, 1000, 3000999};
1699 for (size_t i = 0; i < arraysize(constants); i++) {
1700 double k = static_cast<double>(constants[i]);
1701 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1702 Node* num = t.NumberToInt32(t.Parameter(0));
1703 Node* mul = t.NumberMultiply(num, t.jsgraph.Constant(k));
1704 Node* trunc = t.NumberToInt32(mul);
1707 if (Pipeline::SupportedTarget()) {
1708 t.LowerAllNodesAndLowerChanges();
1711 FOR_INT32_INPUTS(i) {
1712 int32_t x = DoubleToInt32(static_cast<double>(*i) * k);
1713 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1720 TEST(RunNumberMultiply_TruncatingToUint32) {
1721 uint32_t constants[] = {0, 1, 2, 3, 4, 100, 1000, 1024, 2048, 3000999};
1723 for (size_t i = 0; i < arraysize(constants); i++) {
1724 double k = static_cast<double>(constants[i]);
1725 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1726 Node* num = t.NumberToUint32(t.Parameter(0));
1727 Node* mul = t.NumberMultiply(num, t.jsgraph.Constant(k));
1728 Node* trunc = t.NumberToUint32(mul);
1731 if (Pipeline::SupportedTarget()) {
1732 t.LowerAllNodesAndLowerChanges();
1735 FOR_UINT32_INPUTS(i) {
1736 uint32_t x = DoubleToUint32(static_cast<double>(*i) * k);
1737 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1744 TEST(RunNumberDivide_2_TruncatingToUint32) {
1745 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1746 Node* num = t.NumberToUint32(t.Parameter(0));
1747 Node* div = t.NumberDivide(num, t.jsgraph.Constant(2));
1748 Node* trunc = t.NumberToUint32(div);
1751 if (Pipeline::SupportedTarget()) {
1752 t.LowerAllNodesAndLowerChanges();
1755 FOR_UINT32_INPUTS(i) {
1756 uint32_t x = DoubleToUint32(static_cast<double>(*i / 2.0));
1757 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1763 TEST(NumberMultiply_ConstantOutOfRange) {
1764 TestingGraph t(Type::Signed32());
1765 Node* k = t.jsgraph.Constant(1000000023);
1766 Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
1767 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mul);
1771 CHECK_EQ(IrOpcode::kFloat64Mul, mul->opcode());
1775 TEST(NumberMultiply_NonTruncating) {
1776 TestingGraph t(Type::Signed32());
1777 Node* k = t.jsgraph.Constant(111);
1778 Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
1782 CHECK_EQ(IrOpcode::kFloat64Mul, mul->opcode());
1786 TEST(NumberDivide_TruncatingToInt32) {
1787 int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
1789 for (size_t i = 0; i < arraysize(constants); i++) {
1790 TestingGraph t(Type::Signed32());
1791 Node* k = t.jsgraph.Constant(constants[i]);
1792 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1793 Node* use = t.Use(div, kMachInt32);
1797 CHECK_EQ(IrOpcode::kInt32Div, use->InputAt(0)->opcode());
1802 TEST(RunNumberDivide_TruncatingToInt32) {
1803 int32_t constants[] = {-100, -10, -1, 1, 2, 100, 1000, 1024, 2048};
1805 for (size_t i = 0; i < arraysize(constants); i++) {
1806 int32_t k = constants[i];
1807 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1808 Node* num = t.NumberToInt32(t.Parameter(0));
1809 Node* div = t.NumberDivide(num, t.jsgraph.Constant(k));
1810 Node* trunc = t.NumberToInt32(div);
1813 if (Pipeline::SupportedTarget()) {
1814 t.LowerAllNodesAndLowerChanges();
1817 FOR_INT32_INPUTS(i) {
1818 if (*i == INT_MAX) continue; // exclude max int.
1819 int32_t x = DoubleToInt32(static_cast<double>(*i) / k);
1820 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1827 TEST(NumberDivide_TruncatingToUint32) {
1828 double constants[] = {1, 3, 100, 1000, 100998348};
1830 for (size_t i = 0; i < arraysize(constants); i++) {
1831 TestingGraph t(Type::Unsigned32());
1832 Node* k = t.jsgraph.Constant(constants[i]);
1833 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1834 Node* use = t.Use(div, kMachUint32);
1838 CHECK_EQ(IrOpcode::kUint32Div, use->InputAt(0)->opcode());
1843 TEST(RunNumberDivide_TruncatingToUint32) {
1844 uint32_t constants[] = {100, 10, 1, 1, 2, 4, 1000, 1024, 2048};
1846 for (size_t i = 0; i < arraysize(constants); i++) {
1847 uint32_t k = constants[i];
1848 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1849 Node* num = t.NumberToUint32(t.Parameter(0));
1850 Node* div = t.NumberDivide(num, t.jsgraph.Constant(static_cast<double>(k)));
1851 Node* trunc = t.NumberToUint32(div);
1854 if (Pipeline::SupportedTarget()) {
1855 t.LowerAllNodesAndLowerChanges();
1858 FOR_UINT32_INPUTS(i) {
1859 uint32_t x = *i / k;
1860 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1867 TEST(NumberDivide_BadConstants) {
1869 TestingGraph t(Type::Signed32());
1870 Node* k = t.jsgraph.Constant(-1);
1871 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1872 Node* use = t.Use(div, kMachInt32);
1876 CHECK_EQ(IrOpcode::kInt32Sub, use->InputAt(0)->opcode());
1880 TestingGraph t(Type::Signed32());
1881 Node* k = t.jsgraph.Constant(0);
1882 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1883 Node* use = t.Use(div, kMachInt32);
1887 CHECK_EQ(IrOpcode::kInt32Constant, use->InputAt(0)->opcode());
1888 CHECK_EQ(0, OpParameter<int32_t>(use->InputAt(0)));
1892 TestingGraph t(Type::Unsigned32());
1893 Node* k = t.jsgraph.Constant(0);
1894 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1895 Node* use = t.Use(div, kMachUint32);
1899 CHECK_EQ(IrOpcode::kInt32Constant, use->InputAt(0)->opcode());
1900 CHECK_EQ(0, OpParameter<int32_t>(use->InputAt(0)));
1905 TEST(NumberModulus_TruncatingToInt32) {
1906 int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
1908 for (size_t i = 0; i < arraysize(constants); i++) {
1909 TestingGraph t(Type::Signed32());
1910 Node* k = t.jsgraph.Constant(constants[i]);
1911 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
1912 Node* use = t.Use(mod, kMachInt32);
1916 CHECK_EQ(IrOpcode::kInt32Mod, use->InputAt(0)->opcode());
1921 TEST(RunNumberModulus_TruncatingToInt32) {
1922 int32_t constants[] = {-100, -10, -1, 1, 2, 100, 1000, 1024, 2048};
1924 for (size_t i = 0; i < arraysize(constants); i++) {
1925 int32_t k = constants[i];
1926 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1927 Node* num = t.NumberToInt32(t.Parameter(0));
1928 Node* mod = t.NumberModulus(num, t.jsgraph.Constant(k));
1929 Node* trunc = t.NumberToInt32(mod);
1932 if (Pipeline::SupportedTarget()) {
1933 t.LowerAllNodesAndLowerChanges();
1936 FOR_INT32_INPUTS(i) {
1937 if (*i == INT_MAX) continue; // exclude max int.
1938 int32_t x = DoubleToInt32(std::fmod(static_cast<double>(*i), k));
1939 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1946 TEST(NumberModulus_TruncatingToUint32) {
1947 double constants[] = {1, 3, 100, 1000, 100998348};
1949 for (size_t i = 0; i < arraysize(constants); i++) {
1950 TestingGraph t(Type::Unsigned32());
1951 Node* k = t.jsgraph.Constant(constants[i]);
1952 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
1953 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), mod);
1957 CHECK_EQ(IrOpcode::kUint32Mod, t.ret->InputAt(0)->InputAt(0)->opcode());
1962 TEST(RunNumberModulus_TruncatingToUint32) {
1963 uint32_t constants[] = {1, 2, 100, 1000, 1024, 2048};
1965 for (size_t i = 0; i < arraysize(constants); i++) {
1966 uint32_t k = constants[i];
1967 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1968 Node* num = t.NumberToUint32(t.Parameter(0));
1970 t.NumberModulus(num, t.jsgraph.Constant(static_cast<double>(k)));
1971 Node* trunc = t.NumberToUint32(mod);
1974 if (Pipeline::SupportedTarget()) {
1975 t.LowerAllNodesAndLowerChanges();
1978 FOR_UINT32_INPUTS(i) {
1979 uint32_t x = *i % k;
1980 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1987 TEST(NumberModulus_Int32) {
1988 int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
1990 for (size_t i = 0; i < arraysize(constants); i++) {
1991 TestingGraph t(Type::Signed32());
1992 Node* k = t.jsgraph.Constant(constants[i]);
1993 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
1997 CHECK_EQ(IrOpcode::kFloat64Mod, mod->opcode()); // Pesky -0 behavior.
2002 TEST(NumberModulus_Uint32) {
2003 const double kConstants[] = {2, 100, 1000, 1024, 2048};
2004 const MachineType kTypes[] = {kMachInt32, kMachUint32};
2006 for (auto const type : kTypes) {
2007 for (auto const c : kConstants) {
2008 TestingGraph t(Type::Unsigned32());
2009 Node* k = t.jsgraph.Constant(c);
2010 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
2011 Node* use = t.Use(mod, type);
2015 CHECK_EQ(IrOpcode::kUint32Mod, use->InputAt(0)->opcode());
2021 TEST(PhiRepresentation) {
2022 HandleAndZoneScope scope;
2023 Zone* z = scope.main_zone();
2029 MachineTypeUnion expected;
2032 TestData test_data[] = {
2033 {Type::Signed32(), Type::Unsigned32(), kMachInt32,
2034 kRepWord32 | kTypeNumber},
2035 {Type::Signed32(), Type::Unsigned32(), kMachUint32,
2036 kRepWord32 | kTypeNumber},
2037 {Type::Signed32(), Type::Signed32(), kMachInt32, kMachInt32},
2038 {Type::Unsigned32(), Type::Unsigned32(), kMachInt32, kMachUint32},
2039 {Type::Number(), Type::Signed32(), kMachInt32, kMachFloat64},
2040 {Type::Signed32(), Type::String(), kMachInt32, kMachAnyTagged}};
2042 for (auto const d : test_data) {
2043 TestingGraph t(d.arg1, d.arg2, Type::Boolean());
2045 Node* br = t.graph()->NewNode(t.common()->Branch(), t.p2, t.start);
2046 Node* tb = t.graph()->NewNode(t.common()->IfTrue(), br);
2047 Node* fb = t.graph()->NewNode(t.common()->IfFalse(), br);
2048 Node* m = t.graph()->NewNode(t.common()->Merge(2), tb, fb);
2051 t.graph()->NewNode(t.common()->Phi(kMachAnyTagged, 2), t.p0, t.p1, m);
2053 Bounds phi_bounds = Bounds::Either(Bounds(d.arg1), Bounds(d.arg2), z);
2054 NodeProperties::SetBounds(phi, phi_bounds);
2056 Node* use = t.Use(phi, d.use);
2060 CHECK_EQ(d.expected, OpParameter<MachineType>(phi));