1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/compiler/access-builder.h"
8 #include "src/compiler/change-lowering.h"
9 #include "src/compiler/control-builders.h"
10 #include "src/compiler/generic-node-inl.h"
11 #include "src/compiler/graph-reducer.h"
12 #include "src/compiler/graph-visualizer.h"
13 #include "src/compiler/node-properties-inl.h"
14 #include "src/compiler/pipeline.h"
15 #include "src/compiler/representation-change.h"
16 #include "src/compiler/simplified-lowering.h"
17 #include "src/compiler/typer.h"
18 #include "src/compiler/verifier.h"
19 #include "src/execution.h"
20 #include "src/parser.h"
21 #include "src/rewriter.h"
22 #include "src/scopes.h"
23 #include "test/cctest/cctest.h"
24 #include "test/cctest/compiler/codegen-tester.h"
25 #include "test/cctest/compiler/graph-builder-tester.h"
26 #include "test/cctest/compiler/value-helper.h"
28 using namespace v8::internal;
29 using namespace v8::internal::compiler;
31 template <typename ReturnType>
32 class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
34 SimplifiedLoweringTester(MachineType p0 = kMachNone,
35 MachineType p1 = kMachNone,
36 MachineType p2 = kMachNone,
37 MachineType p3 = kMachNone,
38 MachineType p4 = kMachNone)
39 : GraphBuilderTester<ReturnType>(p0, p1, p2, p3, p4),
40 typer(this->graph(), MaybeHandle<Context>()),
41 javascript(this->zone()),
42 jsgraph(this->graph(), this->common(), &javascript, this->machine()),
46 JSOperatorBuilder javascript;
48 SimplifiedLowering lowering;
50 void LowerAllNodes() {
53 lowering.LowerAllNodes();
56 void LowerAllNodesAndLowerChanges() {
59 lowering.LowerAllNodes();
61 Zone* zone = this->zone();
62 CompilationInfo info(zone->isolate(), zone);
64 zone, Linkage::GetSimplifiedCDescriptor(zone, this->machine_sig_));
65 ChangeLowering lowering(&jsgraph, &linkage);
66 GraphReducer reducer(this->graph());
67 reducer.AddReducer(&lowering);
68 reducer.ReduceGraph();
69 Verifier::Run(this->graph());
72 void CheckNumberCall(double expected, double input) {
73 // TODO(titzer): make calls to NewNumber work in cctests.
74 if (expected <= Smi::kMinValue) return;
75 if (expected >= Smi::kMaxValue) return;
76 Handle<Object> num = factory()->NewNumber(input);
77 Object* result = this->Call(*num);
78 CHECK(factory()->NewNumber(expected)->SameValue(result));
81 Factory* factory() { return this->isolate()->factory(); }
82 Heap* heap() { return this->isolate()->heap(); }
86 // TODO(titzer): factor these tests out to test-run-simplifiedops.cc.
87 // TODO(titzer): test tagged representation for input to NumberToInt32.
88 TEST(RunNumberToInt32_float64) {
89 // TODO(titzer): explicit load/stores here are only because of representations
92 SimplifiedLoweringTester<Object*> t;
93 FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
95 Node* loaded = t.LoadField(load, t.PointerConstant(&input));
96 NodeProperties::SetBounds(loaded, Bounds(Type::Number()));
97 Node* convert = t.NumberToInt32(loaded);
98 FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Signed32(),
100 t.StoreField(store, t.PointerConstant(&result), convert);
101 t.Return(t.jsgraph.TrueConstant());
105 if (Pipeline::SupportedTarget()) {
106 FOR_FLOAT64_INPUTS(i) {
108 int32_t expected = DoubleToInt32(*i);
110 CHECK_EQ(expected, result);
116 // TODO(titzer): test tagged representation for input to NumberToUint32.
117 TEST(RunNumberToUint32_float64) {
118 // TODO(titzer): explicit load/stores here are only because of representations
121 SimplifiedLoweringTester<Object*> t;
122 FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
124 Node* loaded = t.LoadField(load, t.PointerConstant(&input));
125 NodeProperties::SetBounds(loaded, Bounds(Type::Number()));
126 Node* convert = t.NumberToUint32(loaded);
127 FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Unsigned32(),
129 t.StoreField(store, t.PointerConstant(&result), convert);
130 t.Return(t.jsgraph.TrueConstant());
134 if (Pipeline::SupportedTarget()) {
135 FOR_FLOAT64_INPUTS(i) {
137 uint32_t expected = DoubleToUint32(*i);
139 CHECK_EQ(static_cast<int32_t>(expected), static_cast<int32_t>(result));
145 // Create a simple JSObject with a unique map.
146 static Handle<JSObject> TestObject() {
147 static int index = 0;
149 v8::base::OS::SNPrintF(buffer, 50, "({'a_%d':1})", index++);
150 return Handle<JSObject>::cast(v8::Utils::OpenHandle(*CompileRun(buffer)));
155 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
156 FieldAccess access = AccessBuilder::ForMap();
157 Node* load = t.LoadField(access, t.Parameter(0));
163 if (Pipeline::SupportedTarget()) {
164 Handle<JSObject> src = TestObject();
165 Handle<Map> src_map(src->map());
166 Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
167 CHECK_EQ(*src_map, result);
173 SimplifiedLoweringTester<int32_t> t(kMachAnyTagged, kMachAnyTagged);
174 FieldAccess access = AccessBuilder::ForMap();
175 t.StoreField(access, t.Parameter(1), t.Parameter(0));
176 t.Return(t.jsgraph.TrueConstant());
181 if (Pipeline::SupportedTarget()) {
182 Handle<JSObject> src = TestObject();
183 Handle<Map> src_map(src->map());
184 Handle<JSObject> dst = TestObject();
185 CHECK(src->map() != dst->map());
186 t.Call(*src_map, *dst); // TODO(titzer): raw pointers in call
187 CHECK(*src_map == dst->map());
192 TEST(RunLoadProperties) {
193 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
194 FieldAccess access = AccessBuilder::ForJSObjectProperties();
195 Node* load = t.LoadField(access, t.Parameter(0));
201 if (Pipeline::SupportedTarget()) {
202 Handle<JSObject> src = TestObject();
203 Handle<FixedArray> src_props(src->properties());
204 Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
205 CHECK_EQ(*src_props, result);
210 TEST(RunLoadStoreMap) {
211 SimplifiedLoweringTester<Object*> t(kMachAnyTagged, kMachAnyTagged);
212 FieldAccess access = AccessBuilder::ForMap();
213 Node* load = t.LoadField(access, t.Parameter(0));
214 t.StoreField(access, t.Parameter(1), load);
220 if (Pipeline::SupportedTarget()) {
221 Handle<JSObject> src = TestObject();
222 Handle<Map> src_map(src->map());
223 Handle<JSObject> dst = TestObject();
224 CHECK(src->map() != dst->map());
225 Object* result = t.Call(*src, *dst); // TODO(titzer): raw pointers in call
226 CHECK(result->IsMap());
227 CHECK_EQ(*src_map, result);
228 CHECK(*src_map == dst->map());
233 TEST(RunLoadStoreFixedArrayIndex) {
234 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
235 ElementAccess access = AccessBuilder::ForFixedArrayElement();
236 Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0),
238 t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), t.Int32Constant(2),
245 if (Pipeline::SupportedTarget()) {
246 Handle<FixedArray> array = t.factory()->NewFixedArray(2);
247 Handle<JSObject> src = TestObject();
248 Handle<JSObject> dst = TestObject();
251 Object* result = t.Call(*array);
252 CHECK_EQ(*src, result);
253 CHECK_EQ(*src, array->get(0));
254 CHECK_EQ(*src, array->get(1));
259 TEST(RunLoadStoreArrayBuffer) {
260 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
261 const int index = 12;
262 const int array_length = 2 * index;
263 ElementAccess buffer_access =
264 AccessBuilder::ForTypedArrayElement(v8::kExternalInt8Array, true);
265 Node* backing_store = t.LoadField(
266 AccessBuilder::ForJSArrayBufferBackingStore(), t.Parameter(0));
268 t.LoadElement(buffer_access, backing_store, t.Int32Constant(index),
269 t.Int32Constant(array_length));
270 t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
271 t.Int32Constant(array_length), load);
272 t.Return(t.jsgraph.TrueConstant());
277 if (Pipeline::SupportedTarget()) {
278 Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
279 Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
280 uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
281 for (int i = 0; i < array_length; i++) {
285 // TODO(titzer): raw pointers in call
286 Object* result = t.Call(*array);
287 CHECK_EQ(t.isolate()->heap()->true_value(), result);
288 for (int i = 0; i < array_length; i++) {
289 uint8_t expected = i;
290 if (i == (index + 1)) expected = index;
291 CHECK_EQ(data[i], expected);
297 TEST(RunLoadFieldFromUntaggedBase) {
298 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
300 for (size_t i = 0; i < arraysize(smis); i++) {
301 int offset = static_cast<int>(i * sizeof(Smi*));
302 FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
303 Type::Integral32(), kMachAnyTagged};
305 SimplifiedLoweringTester<Object*> t;
306 Node* load = t.LoadField(access, t.PointerConstant(smis));
310 if (!Pipeline::SupportedTarget()) continue;
312 for (int j = -5; j <= 5; j++) {
313 Smi* expected = Smi::FromInt(j);
315 CHECK_EQ(expected, t.Call());
321 TEST(RunStoreFieldToUntaggedBase) {
322 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
324 for (size_t i = 0; i < arraysize(smis); i++) {
325 int offset = static_cast<int>(i * sizeof(Smi*));
326 FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
327 Type::Integral32(), kMachAnyTagged};
329 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
330 Node* p0 = t.Parameter(0);
331 t.StoreField(access, t.PointerConstant(smis), p0);
335 if (!Pipeline::SupportedTarget()) continue;
337 for (int j = -5; j <= 5; j++) {
338 Smi* expected = Smi::FromInt(j);
339 smis[i] = Smi::FromInt(-100);
340 CHECK_EQ(expected, t.Call(expected));
341 CHECK_EQ(expected, smis[i]);
347 TEST(RunLoadElementFromUntaggedBase) {
348 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
349 Smi::FromInt(4), Smi::FromInt(5)};
351 for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
352 for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
353 int offset = static_cast<int>(i * sizeof(Smi*));
354 ElementAccess access = {kNoBoundsCheck, kUntaggedBase, offset,
355 Type::Integral32(), kMachAnyTagged};
357 SimplifiedLoweringTester<Object*> t;
358 Node* load = t.LoadElement(
359 access, t.PointerConstant(smis), t.Int32Constant(static_cast<int>(j)),
360 t.Int32Constant(static_cast<int>(arraysize(smis))));
364 if (!Pipeline::SupportedTarget()) continue;
366 for (int k = -5; k <= 5; k++) {
367 Smi* expected = Smi::FromInt(k);
368 smis[i + j] = expected;
369 CHECK_EQ(expected, t.Call());
376 TEST(RunStoreElementFromUntaggedBase) {
377 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
378 Smi::FromInt(4), Smi::FromInt(5)};
380 for (size_t i = 0; i < arraysize(smis); i++) { // for header sizes
381 for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
382 int offset = static_cast<int>(i * sizeof(Smi*));
383 ElementAccess access = {kNoBoundsCheck, kUntaggedBase, offset,
384 Type::Integral32(), kMachAnyTagged};
386 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
387 Node* p0 = t.Parameter(0);
388 t.StoreElement(access, t.PointerConstant(smis),
389 t.Int32Constant(static_cast<int>(j)),
390 t.Int32Constant(static_cast<int>(arraysize(smis))), p0);
394 if (!Pipeline::SupportedTarget()) continue;
396 for (int k = -5; k <= 5; k++) {
397 Smi* expected = Smi::FromInt(k);
398 smis[i + j] = Smi::FromInt(-100);
399 CHECK_EQ(expected, t.Call(expected));
400 CHECK_EQ(expected, smis[i + j]);
403 // TODO(titzer): assert the contents of the array.
409 // A helper class for accessing fields and elements of various types, on both
410 // tagged and untagged base pointers. Contains both tagged and untagged buffers
411 // for testing direct memory access from generated code.
412 template <typename E>
413 class AccessTester : public HandleAndZoneScope {
417 E* original_elements;
420 Handle<ByteArray> tagged_array; // TODO(titzer): use FixedArray for tagged.
422 AccessTester(bool t, MachineType r, E* orig, size_t num)
425 original_elements(orig),
427 untagged_array(static_cast<E*>(malloc(ByteSize()))),
428 tagged_array(main_isolate()->factory()->NewByteArray(
429 static_cast<int>(ByteSize()))) {
433 ~AccessTester() { free(untagged_array); }
435 size_t ByteSize() { return num_elements * sizeof(E); }
437 // Nuke both {untagged_array} and {tagged_array} with {original_elements}.
438 void Reinitialize() {
439 memcpy(untagged_array, original_elements, ByteSize());
440 CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
441 E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
442 memcpy(raw, original_elements, ByteSize());
445 // Create and run code that copies the element in either {untagged_array}
446 // or {tagged_array} at index {from_index} to index {to_index}.
447 void RunCopyElement(int from_index, int to_index) {
448 // TODO(titzer): test element and field accesses where the base is not
449 // a constant in the code.
450 BoundsCheck(from_index);
451 BoundsCheck(to_index);
452 ElementAccess access = GetElementAccess();
454 SimplifiedLoweringTester<Object*> t;
455 Node* ptr = GetBaseNode(&t);
456 Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index),
457 t.Int32Constant(static_cast<int>(num_elements)));
458 t.StoreElement(access, ptr, t.Int32Constant(to_index),
459 t.Int32Constant(static_cast<int>(num_elements)), load);
460 t.Return(t.jsgraph.TrueConstant());
464 if (Pipeline::SupportedTarget()) {
465 Object* result = t.Call();
466 CHECK_EQ(t.isolate()->heap()->true_value(), result);
470 // Create and run code that copies the field in either {untagged_array}
471 // or {tagged_array} at index {from_index} to index {to_index}.
472 void RunCopyField(int from_index, int to_index) {
473 BoundsCheck(from_index);
474 BoundsCheck(to_index);
475 FieldAccess from_access = GetFieldAccess(from_index);
476 FieldAccess to_access = GetFieldAccess(to_index);
478 SimplifiedLoweringTester<Object*> t;
479 Node* ptr = GetBaseNode(&t);
480 Node* load = t.LoadField(from_access, ptr);
481 t.StoreField(to_access, ptr, load);
482 t.Return(t.jsgraph.TrueConstant());
486 if (Pipeline::SupportedTarget()) {
487 Object* result = t.Call();
488 CHECK_EQ(t.isolate()->heap()->true_value(), result);
492 // Create and run code that copies the elements from {this} to {that}.
493 void RunCopyElements(AccessTester<E>* that) {
494 // TODO(titzer): Rewrite this test without StructuredGraphBuilder support.
496 SimplifiedLoweringTester<Object*> t;
498 Node* one = t.Int32Constant(1);
499 Node* index = t.Int32Constant(0);
500 Node* limit = t.Int32Constant(static_cast<int>(num_elements));
501 t.environment()->Push(index);
502 Node* src = this->GetBaseNode(&t);
503 Node* dst = that->GetBaseNode(&t);
505 LoopBuilder loop(&t);
507 // Loop exit condition
508 index = t.environment()->Top();
509 Node* condition = t.Int32LessThan(index, limit);
510 loop.BreakUnless(condition);
511 // dst[index] = src[index]
512 index = t.environment()->Pop();
513 Node* load = t.LoadElement(this->GetElementAccess(), src, index);
514 t.StoreElement(that->GetElementAccess(), dst, index, load);
516 index = t.Int32Add(index, one);
517 t.environment()->Push(index);
522 index = t.environment()->Pop();
523 t.Return(t.jsgraph.TrueConstant());
527 if (Pipeline::SupportedTarget()) {
528 Object* result = t.Call();
529 CHECK_EQ(t.isolate()->heap()->true_value(), result);
534 E GetElement(int index) {
537 E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
540 return untagged_array[index];
545 ElementAccess GetElementAccess() {
546 ElementAccess access = {
547 kNoBoundsCheck, tagged ? kTaggedBase : kUntaggedBase,
548 tagged ? FixedArrayBase::kHeaderSize : 0, Type::Any(), rep};
552 FieldAccess GetFieldAccess(int field) {
553 int offset = field * sizeof(E);
554 FieldAccess access = {tagged ? kTaggedBase : kUntaggedBase,
555 offset + (tagged ? FixedArrayBase::kHeaderSize : 0),
556 Handle<Name>(), Type::Any(), rep};
560 template <typename T>
561 Node* GetBaseNode(SimplifiedLoweringTester<T>* t) {
562 return tagged ? t->HeapConstant(tagged_array)
563 : t->PointerConstant(untagged_array);
566 void BoundsCheck(int index) {
568 CHECK_LT(index, static_cast<int>(num_elements));
569 CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
574 template <typename E>
575 static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
576 int num_elements = static_cast<int>(num);
578 for (int taggedness = 0; taggedness < 2; taggedness++) {
579 AccessTester<E> a(taggedness == 1, rep, original_elements, num);
580 for (int field = 0; field < 2; field++) {
581 for (int i = 0; i < num_elements - 1; i++) {
584 a.RunCopyField(i, i + 1); // Test field read/write.
586 a.RunCopyElement(i, i + 1); // Test element read/write.
588 if (Pipeline::SupportedTarget()) { // verify.
589 for (int j = 0; j < num_elements; j++) {
591 j == (i + 1) ? original_elements[i] : original_elements[j];
592 CHECK_EQ(expect, a.GetElement(j));
599 for (int tf = 0; tf < 2; tf++) {
600 for (int tt = 0; tt < 2; tt++) {
601 AccessTester<E> a(tf == 1, rep, original_elements, num);
602 AccessTester<E> b(tt == 1, rep, original_elements, num);
603 a.RunCopyElements(&b);
604 if (Pipeline::SupportedTarget()) { // verify.
605 for (int i = 0; i < num_elements; i++) {
606 CHECK_EQ(a.GetElement(i), b.GetElement(i));
614 TEST(RunAccessTests_uint8) {
615 uint8_t data[] = {0x07, 0x16, 0x25, 0x34, 0x43, 0x99,
616 0xab, 0x78, 0x89, 0x19, 0x2b, 0x38};
617 RunAccessTest<uint8_t>(kMachInt8, data, arraysize(data));
621 TEST(RunAccessTests_uint16) {
622 uint16_t data[] = {0x071a, 0x162b, 0x253c, 0x344d, 0x435e, 0x7777};
623 RunAccessTest<uint16_t>(kMachInt16, data, arraysize(data));
627 TEST(RunAccessTests_int32) {
628 int32_t data[] = {-211, 211, 628347, 2000000000, -2000000000, -1, -100000034};
629 RunAccessTest<int32_t>(kMachInt32, data, arraysize(data));
633 #define V8_2PART_INT64(a, b) (((static_cast<int64_t>(a) << 32) + 0x##b##u))
636 TEST(RunAccessTests_int64) {
637 if (kPointerSize != 8) return;
638 int64_t data[] = {V8_2PART_INT64(0x10111213, 14151617),
639 V8_2PART_INT64(0x20212223, 24252627),
640 V8_2PART_INT64(0x30313233, 34353637),
641 V8_2PART_INT64(0xa0a1a2a3, a4a5a6a7),
642 V8_2PART_INT64(0xf0f1f2f3, f4f5f6f7)};
643 RunAccessTest<int64_t>(kMachInt64, data, arraysize(data));
647 TEST(RunAccessTests_float64) {
648 double data[] = {1.25, -1.25, 2.75, 11.0, 11100.8};
649 RunAccessTest<double>(kMachFloat64, data, arraysize(data));
653 TEST(RunAccessTests_Smi) {
654 Smi* data[] = {Smi::FromInt(-1), Smi::FromInt(-9),
655 Smi::FromInt(0), Smi::FromInt(666),
656 Smi::FromInt(77777), Smi::FromInt(Smi::kMaxValue)};
657 RunAccessTest<Smi*>(kMachAnyTagged, data, arraysize(data));
661 // Fills in most of the nodes of the graph in order to make tests shorter.
662 class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
665 JSOperatorBuilder javascript;
674 explicit TestingGraph(Type* p0_type, Type* p1_type = Type::None(),
675 Type* p2_type = Type::None())
676 : GraphAndBuilders(main_zone()),
677 typer(graph(), MaybeHandle<Context>()),
678 javascript(main_zone()),
679 jsgraph(graph(), common(), &javascript, machine()) {
680 start = graph()->NewNode(common()->Start(2));
681 graph()->SetStart(start);
683 graph()->NewNode(common()->Return(), jsgraph.Constant(0), start, start);
684 end = graph()->NewNode(common()->End(), ret);
685 graph()->SetEnd(end);
686 p0 = graph()->NewNode(common()->Parameter(0), start);
687 p1 = graph()->NewNode(common()->Parameter(1), start);
688 p2 = graph()->NewNode(common()->Parameter(2), start);
690 NodeProperties::SetBounds(p0, Bounds(p0_type));
691 NodeProperties::SetBounds(p1, Bounds(p1_type));
692 NodeProperties::SetBounds(p2, Bounds(p2_type));
695 void CheckLoweringBinop(IrOpcode::Value expected, const Operator* op) {
696 Node* node = Return(graph()->NewNode(op, p0, p1));
698 CHECK_EQ(expected, node->opcode());
701 void CheckLoweringTruncatedBinop(IrOpcode::Value expected, const Operator* op,
702 const Operator* trunc) {
703 Node* node = graph()->NewNode(op, p0, p1);
704 Return(graph()->NewNode(trunc, node));
706 CHECK_EQ(expected, node->opcode());
710 SimplifiedLowering(&jsgraph).LowerAllNodes();
713 // Inserts the node as the return value of the graph.
714 Node* Return(Node* node) {
715 ret->ReplaceInput(0, node);
719 // Inserts the node as the effect input to the return of the graph.
720 void Effect(Node* node) { ret->ReplaceInput(1, node); }
722 Node* ExampleWithOutput(MachineType type) {
723 // TODO(titzer): use parameters with guaranteed representations.
724 if (type & kTypeInt32) {
725 return graph()->NewNode(machine()->Int32Add(), jsgraph.Int32Constant(1),
726 jsgraph.Int32Constant(1));
727 } else if (type & kTypeUint32) {
728 return graph()->NewNode(machine()->Word32Shr(), jsgraph.Int32Constant(1),
729 jsgraph.Int32Constant(1));
730 } else if (type & kRepFloat64) {
731 return graph()->NewNode(machine()->Float64Add(),
732 jsgraph.Float64Constant(1),
733 jsgraph.Float64Constant(1));
734 } else if (type & kRepBit) {
735 return graph()->NewNode(machine()->Word32Equal(),
736 jsgraph.Int32Constant(1),
737 jsgraph.Int32Constant(1));
738 } else if (type & kRepWord64) {
739 return graph()->NewNode(machine()->Int64Add(), Int64Constant(1),
742 CHECK(type & kRepTagged);
747 Node* Use(Node* node, MachineType type) {
748 if (type & kTypeInt32) {
749 return graph()->NewNode(machine()->Int32LessThan(), node,
750 jsgraph.Int32Constant(1));
751 } else if (type & kTypeUint32) {
752 return graph()->NewNode(machine()->Uint32LessThan(), node,
753 jsgraph.Int32Constant(1));
754 } else if (type & kRepFloat64) {
755 return graph()->NewNode(machine()->Float64Add(), node,
756 jsgraph.Float64Constant(1));
757 } else if (type & kRepWord64) {
758 return graph()->NewNode(machine()->Int64LessThan(), node,
761 return graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), node,
762 jsgraph.TrueConstant());
766 Node* Branch(Node* cond) {
767 Node* br = graph()->NewNode(common()->Branch(), cond, start);
768 Node* tb = graph()->NewNode(common()->IfTrue(), br);
769 Node* fb = graph()->NewNode(common()->IfFalse(), br);
770 Node* m = graph()->NewNode(common()->Merge(2), tb, fb);
771 NodeProperties::ReplaceControlInput(ret, m);
775 Node* Int64Constant(int64_t v) {
776 return graph()->NewNode(common()->Int64Constant(v));
779 SimplifiedOperatorBuilder* simplified() { return &main_simplified_; }
780 MachineOperatorBuilder* machine() { return &main_machine_; }
781 CommonOperatorBuilder* common() { return &main_common_; }
782 Graph* graph() { return main_graph_; }
786 TEST(LowerBooleanNot_bit_bit) {
787 // BooleanNot(x: kRepBit) used as kRepBit
788 TestingGraph t(Type::Boolean());
789 Node* b = t.ExampleWithOutput(kRepBit);
790 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
791 Node* use = t.Branch(inv);
793 Node* cmp = use->InputAt(0);
794 CHECK_EQ(t.machine()->Word32Equal()->opcode(), cmp->opcode());
795 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
796 Node* f = t.jsgraph.Int32Constant(0);
797 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
801 TEST(LowerBooleanNot_bit_tagged) {
802 // BooleanNot(x: kRepBit) used as kRepTagged
803 TestingGraph t(Type::Boolean());
804 Node* b = t.ExampleWithOutput(kRepBit);
805 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
806 Node* use = t.Use(inv, kRepTagged);
809 CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
810 Node* cmp = use->InputAt(0)->InputAt(0);
811 CHECK_EQ(t.machine()->Word32Equal()->opcode(), cmp->opcode());
812 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
813 Node* f = t.jsgraph.Int32Constant(0);
814 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
818 TEST(LowerBooleanNot_tagged_bit) {
819 // BooleanNot(x: kRepTagged) used as kRepBit
820 TestingGraph t(Type::Boolean());
822 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
823 Node* use = t.Branch(inv);
825 Node* cmp = use->InputAt(0);
826 CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
827 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
828 Node* f = t.jsgraph.FalseConstant();
829 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
833 TEST(LowerBooleanNot_tagged_tagged) {
834 // BooleanNot(x: kRepTagged) used as kRepTagged
835 TestingGraph t(Type::Boolean());
837 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
838 Node* use = t.Use(inv, kRepTagged);
841 CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
842 Node* cmp = use->InputAt(0)->InputAt(0);
843 CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
844 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
845 Node* f = t.jsgraph.FalseConstant();
846 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
850 TEST(LowerBooleanToNumber_bit_int32) {
851 // BooleanToNumber(x: kRepBit) used as kMachInt32
852 TestingGraph t(Type::Boolean());
853 Node* b = t.ExampleWithOutput(kRepBit);
854 Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
855 Node* use = t.Use(cnv, kMachInt32);
858 CHECK_EQ(b, use->InputAt(0));
862 TEST(LowerBooleanToNumber_tagged_int32) {
863 // BooleanToNumber(x: kRepTagged) used as kMachInt32
864 TestingGraph t(Type::Boolean());
866 Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
867 Node* use = t.Use(cnv, kMachInt32);
870 CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
871 CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
872 Node* c = t.jsgraph.TrueConstant();
873 CHECK(c == cnv->InputAt(0) || c == cnv->InputAt(1));
877 TEST(LowerBooleanToNumber_bit_tagged) {
878 // BooleanToNumber(x: kRepBit) used as kMachAnyTagged
879 TestingGraph t(Type::Boolean());
880 Node* b = t.ExampleWithOutput(kRepBit);
881 Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
882 Node* use = t.Use(cnv, kMachAnyTagged);
885 CHECK_EQ(b, use->InputAt(0)->InputAt(0));
886 CHECK_EQ(IrOpcode::kChangeInt32ToTagged, use->InputAt(0)->opcode());
890 TEST(LowerBooleanToNumber_tagged_tagged) {
891 // BooleanToNumber(x: kRepTagged) used as kMachAnyTagged
892 TestingGraph t(Type::Boolean());
894 Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
895 Node* use = t.Use(cnv, kMachAnyTagged);
898 CHECK_EQ(cnv, use->InputAt(0)->InputAt(0));
899 CHECK_EQ(IrOpcode::kChangeInt32ToTagged, use->InputAt(0)->opcode());
900 CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
901 CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
902 Node* c = t.jsgraph.TrueConstant();
903 CHECK(c == cnv->InputAt(0) || c == cnv->InputAt(1));
907 static Type* test_types[] = {Type::Signed32(), Type::Unsigned32(),
908 Type::Number(), Type::Any()};
911 TEST(LowerNumberCmp_to_int32) {
912 TestingGraph t(Type::Signed32(), Type::Signed32());
914 t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
915 t.CheckLoweringBinop(IrOpcode::kInt32LessThan,
916 t.simplified()->NumberLessThan());
917 t.CheckLoweringBinop(IrOpcode::kInt32LessThanOrEqual,
918 t.simplified()->NumberLessThanOrEqual());
922 TEST(LowerNumberCmp_to_uint32) {
923 TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
925 t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
926 t.CheckLoweringBinop(IrOpcode::kUint32LessThan,
927 t.simplified()->NumberLessThan());
928 t.CheckLoweringBinop(IrOpcode::kUint32LessThanOrEqual,
929 t.simplified()->NumberLessThanOrEqual());
933 TEST(LowerNumberCmp_to_float64) {
934 static Type* types[] = {Type::Number(), Type::Any()};
936 for (size_t i = 0; i < arraysize(types); i++) {
937 TestingGraph t(types[i], types[i]);
939 t.CheckLoweringBinop(IrOpcode::kFloat64Equal,
940 t.simplified()->NumberEqual());
941 t.CheckLoweringBinop(IrOpcode::kFloat64LessThan,
942 t.simplified()->NumberLessThan());
943 t.CheckLoweringBinop(IrOpcode::kFloat64LessThanOrEqual,
944 t.simplified()->NumberLessThanOrEqual());
949 TEST(LowerNumberAddSub_to_int32) {
950 HandleAndZoneScope scope;
951 Factory* f = scope.main_zone()->isolate()->factory();
953 Type::Range(f->NewNumber(1), f->NewNumber(10), scope.main_zone());
955 Type::Range(f->NewNumber(-1e+13), f->NewNumber(1e+14), scope.main_zone());
956 static Type* types[] = {Type::Signed32(), Type::Integral32(), small_range,
959 for (size_t i = 0; i < arraysize(types); i++) {
960 for (size_t j = 0; j < arraysize(types); j++) {
961 TestingGraph t(types[i], types[j]);
962 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
963 t.simplified()->NumberAdd(),
964 t.simplified()->NumberToInt32());
965 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
966 t.simplified()->NumberSubtract(),
967 t.simplified()->NumberToInt32());
973 TEST(LowerNumberAddSub_to_uint32) {
974 HandleAndZoneScope scope;
975 Factory* f = scope.main_zone()->isolate()->factory();
977 Type::Range(f->NewNumber(1), f->NewNumber(10), scope.main_zone());
979 Type::Range(f->NewNumber(-1e+13), f->NewNumber(1e+14), scope.main_zone());
980 static Type* types[] = {Type::Signed32(), Type::Integral32(), small_range,
983 for (size_t i = 0; i < arraysize(types); i++) {
984 for (size_t j = 0; j < arraysize(types); j++) {
985 TestingGraph t(types[i], types[j]);
986 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
987 t.simplified()->NumberAdd(),
988 t.simplified()->NumberToUint32());
989 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
990 t.simplified()->NumberSubtract(),
991 t.simplified()->NumberToUint32());
997 TEST(LowerNumberAddSub_to_float64) {
998 for (size_t i = 0; i < arraysize(test_types); i++) {
999 TestingGraph t(test_types[i], test_types[i]);
1001 t.CheckLoweringBinop(IrOpcode::kFloat64Add, t.simplified()->NumberAdd());
1002 t.CheckLoweringBinop(IrOpcode::kFloat64Sub,
1003 t.simplified()->NumberSubtract());
1008 TEST(LowerNumberDivMod_to_float64) {
1009 for (size_t i = 0; i < arraysize(test_types); i++) {
1010 TestingGraph t(test_types[i], test_types[i]);
1012 t.CheckLoweringBinop(IrOpcode::kFloat64Div, t.simplified()->NumberDivide());
1013 if (!test_types[i]->Is(Type::Unsigned32())) {
1014 t.CheckLoweringBinop(IrOpcode::kFloat64Mod,
1015 t.simplified()->NumberModulus());
1021 static void CheckChangeOf(IrOpcode::Value change, Node* of, Node* node) {
1022 CHECK_EQ(change, node->opcode());
1023 CHECK_EQ(of, node->InputAt(0));
1027 TEST(LowerNumberToInt32_to_nop) {
1028 // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepTagged
1029 TestingGraph t(Type::Signed32());
1030 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
1031 Node* use = t.Use(trunc, kRepTagged);
1034 CHECK_EQ(t.p0, use->InputAt(0));
1038 TEST(LowerNumberToInt32_to_ChangeTaggedToFloat64) {
1039 // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepFloat64
1040 TestingGraph t(Type::Signed32());
1041 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
1042 Node* use = t.Use(trunc, kRepFloat64);
1045 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
1049 TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
1050 // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepWord32
1051 TestingGraph t(Type::Signed32());
1052 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
1053 Node* use = t.Use(trunc, kTypeInt32);
1056 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p0, use->InputAt(0));
1060 TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
1061 // NumberToInt32(x: kRepFloat64) used as kMachInt32
1062 TestingGraph t(Type::Number());
1063 Node* p0 = t.ExampleWithOutput(kMachFloat64);
1064 // TODO(titzer): run the typer here, or attach machine type to param.
1065 NodeProperties::SetBounds(p0, Bounds(Type::Number()));
1066 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), p0);
1067 Node* use = t.Use(trunc, kMachInt32);
1070 CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
1074 TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32_with_change) {
1075 // NumberToInt32(x: kTypeNumber | kRepTagged) used as kMachInt32
1076 TestingGraph t(Type::Number());
1077 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
1078 Node* use = t.Use(trunc, kMachInt32);
1081 Node* node = use->InputAt(0);
1082 CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
1083 Node* of = node->InputAt(0);
1084 CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
1085 CHECK_EQ(t.p0, of->InputAt(0));
1089 TEST(LowerNumberToInt32_to_ChangeFloat64ToTagged) {
1090 // TODO(titzer): NumberToInt32(x: kRepFloat64 | kTypeInt32) used as kRepTagged
1094 TEST(LowerNumberToInt32_to_ChangeFloat64ToInt32) {
1095 // TODO(titzer): NumberToInt32(x: kRepFloat64 | kTypeInt32) used as kRepWord32
1100 TEST(LowerNumberToUint32_to_nop) {
1101 // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepTagged
1102 TestingGraph t(Type::Unsigned32());
1103 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
1104 Node* use = t.Use(trunc, kRepTagged);
1107 CHECK_EQ(t.p0, use->InputAt(0));
1111 TEST(LowerNumberToUint32_to_ChangeTaggedToFloat64) {
1112 // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
1113 TestingGraph t(Type::Unsigned32());
1114 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
1115 Node* use = t.Use(trunc, kRepFloat64);
1118 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
1122 TEST(LowerNumberToUint32_to_ChangeTaggedToUint32) {
1123 // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
1124 TestingGraph t(Type::Unsigned32());
1125 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
1126 Node* use = t.Use(trunc, kTypeUint32);
1129 CheckChangeOf(IrOpcode::kChangeTaggedToUint32, t.p0, use->InputAt(0));
1133 TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32) {
1134 // NumberToUint32(x: kRepFloat64) used as kMachUint32
1135 TestingGraph t(Type::Number());
1136 Node* p0 = t.ExampleWithOutput(kMachFloat64);
1137 // TODO(titzer): run the typer here, or attach machine type to param.
1138 NodeProperties::SetBounds(p0, Bounds(Type::Number()));
1139 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), p0);
1140 Node* use = t.Use(trunc, kMachUint32);
1143 CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
1147 TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_with_change) {
1148 // NumberToInt32(x: kTypeNumber | kRepTagged) used as kMachUint32
1149 TestingGraph t(Type::Number());
1150 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
1151 Node* use = t.Use(trunc, kMachUint32);
1154 Node* node = use->InputAt(0);
1155 CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
1156 Node* of = node->InputAt(0);
1157 CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
1158 CHECK_EQ(t.p0, of->InputAt(0));
1162 TEST(LowerNumberToUint32_to_ChangeFloat64ToTagged) {
1163 // TODO(titzer): NumberToUint32(x: kRepFloat64 | kTypeUint32) used as
1168 TEST(LowerNumberToUint32_to_ChangeFloat64ToUint32) {
1169 // TODO(titzer): NumberToUint32(x: kRepFloat64 | kTypeUint32) used as
1174 TEST(LowerNumberToUint32_to_TruncateFloat64ToUint32) {
1175 // TODO(titzer): NumberToUint32(x: kRepFloat64) used as kRepWord32
1179 TEST(LowerReferenceEqual_to_wordeq) {
1180 TestingGraph t(Type::Any(), Type::Any());
1181 IrOpcode::Value opcode =
1182 static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
1183 t.CheckLoweringBinop(opcode, t.simplified()->ReferenceEqual(Type::Any()));
1187 TEST(LowerStringOps_to_call_and_compare) {
1188 if (Pipeline::SupportedTarget()) {
1189 // These tests need linkage for the calls.
1190 TestingGraph t(Type::String(), Type::String());
1191 IrOpcode::Value compare_eq =
1192 static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
1193 IrOpcode::Value compare_lt =
1194 static_cast<IrOpcode::Value>(t.machine()->IntLessThan()->opcode());
1195 IrOpcode::Value compare_le = static_cast<IrOpcode::Value>(
1196 t.machine()->IntLessThanOrEqual()->opcode());
1197 t.CheckLoweringBinop(compare_eq, t.simplified()->StringEqual());
1198 t.CheckLoweringBinop(compare_lt, t.simplified()->StringLessThan());
1199 t.CheckLoweringBinop(compare_le, t.simplified()->StringLessThanOrEqual());
1200 t.CheckLoweringBinop(IrOpcode::kCall, t.simplified()->StringAdd());
1205 void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
1207 TestingGraph t(Type::Any());
1208 Node* in = t.ExampleWithOutput(from);
1209 Node* use = t.Use(in, to);
1212 CHECK_EQ(expected, use->InputAt(0)->opcode());
1213 CHECK_EQ(in, use->InputAt(0)->InputAt(0));
1217 TEST(InsertBasicChanges) {
1218 CheckChangeInsertion(IrOpcode::kChangeFloat64ToInt32, kRepFloat64,
1220 CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, kRepFloat64,
1222 CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, kRepTagged, kTypeInt32);
1223 CheckChangeInsertion(IrOpcode::kChangeTaggedToUint32, kRepTagged,
1226 CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, kRepFloat64,
1228 CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64, kRepTagged,
1231 CheckChangeInsertion(IrOpcode::kChangeInt32ToFloat64, kTypeInt32,
1233 CheckChangeInsertion(IrOpcode::kChangeInt32ToTagged, kTypeInt32, kRepTagged);
1235 CheckChangeInsertion(IrOpcode::kChangeUint32ToFloat64, kTypeUint32,
1237 CheckChangeInsertion(IrOpcode::kChangeUint32ToTagged, kTypeUint32,
1242 static void CheckChangesAroundBinop(TestingGraph* t, const Operator* op,
1243 IrOpcode::Value input_change,
1244 IrOpcode::Value output_change) {
1245 Node* binop = t->graph()->NewNode(op, t->p0, t->p1);
1248 CHECK_EQ(input_change, binop->InputAt(0)->opcode());
1249 CHECK_EQ(input_change, binop->InputAt(1)->opcode());
1250 CHECK_EQ(t->p0, binop->InputAt(0)->InputAt(0));
1251 CHECK_EQ(t->p1, binop->InputAt(1)->InputAt(0));
1252 CHECK_EQ(output_change, t->ret->InputAt(0)->opcode());
1253 CHECK_EQ(binop, t->ret->InputAt(0)->InputAt(0));
1257 TEST(InsertChangesAroundInt32Binops) {
1258 TestingGraph t(Type::Signed32(), Type::Signed32());
1260 const Operator* ops[] = {t.machine()->Int32Add(), t.machine()->Int32Sub(),
1261 t.machine()->Int32Mul(), t.machine()->Int32Div(),
1262 t.machine()->Int32Mod(), t.machine()->Word32And(),
1263 t.machine()->Word32Or(), t.machine()->Word32Xor(),
1264 t.machine()->Word32Shl(), t.machine()->Word32Sar()};
1266 for (size_t i = 0; i < arraysize(ops); i++) {
1267 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
1268 IrOpcode::kChangeInt32ToTagged);
1273 TEST(InsertChangesAroundInt32Cmp) {
1274 TestingGraph t(Type::Signed32(), Type::Signed32());
1276 const Operator* ops[] = {t.machine()->Int32LessThan(),
1277 t.machine()->Int32LessThanOrEqual()};
1279 for (size_t i = 0; i < arraysize(ops); i++) {
1280 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
1281 IrOpcode::kChangeBitToBool);
1286 TEST(InsertChangesAroundUint32Cmp) {
1287 TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
1289 const Operator* ops[] = {t.machine()->Uint32LessThan(),
1290 t.machine()->Uint32LessThanOrEqual()};
1292 for (size_t i = 0; i < arraysize(ops); i++) {
1293 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToUint32,
1294 IrOpcode::kChangeBitToBool);
1299 TEST(InsertChangesAroundFloat64Binops) {
1300 TestingGraph t(Type::Number(), Type::Number());
1302 const Operator* ops[] = {
1303 t.machine()->Float64Add(), t.machine()->Float64Sub(),
1304 t.machine()->Float64Mul(), t.machine()->Float64Div(),
1305 t.machine()->Float64Mod(),
1308 for (size_t i = 0; i < arraysize(ops); i++) {
1309 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
1310 IrOpcode::kChangeFloat64ToTagged);
1315 TEST(InsertChangesAroundFloat64Cmp) {
1316 TestingGraph t(Type::Number(), Type::Number());
1318 const Operator* ops[] = {t.machine()->Float64Equal(),
1319 t.machine()->Float64LessThan(),
1320 t.machine()->Float64LessThanOrEqual()};
1322 for (size_t i = 0; i < arraysize(ops); i++) {
1323 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
1324 IrOpcode::kChangeBitToBool);
1331 void CheckFieldAccessArithmetic(FieldAccess access, Node* load_or_store) {
1332 IntPtrMatcher mindex(load_or_store->InputAt(1));
1333 CHECK(mindex.Is(access.offset - access.tag()));
1337 Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
1338 Node* index = load_or_store->InputAt(1);
1339 if (kPointerSize == 8) {
1340 CHECK_EQ(IrOpcode::kChangeUint32ToUint64, index->opcode());
1341 index = index->InputAt(0);
1344 Int32BinopMatcher mindex(index);
1345 CHECK_EQ(IrOpcode::kInt32Add, mindex.node()->opcode());
1346 CHECK(mindex.right().Is(access.header_size - access.tag()));
1348 const int element_size_shift = ElementSizeLog2Of(access.machine_type);
1349 if (element_size_shift) {
1350 Int32BinopMatcher shl(mindex.left().node());
1351 CHECK_EQ(IrOpcode::kWord32Shl, shl.node()->opcode());
1352 CHECK(shl.right().Is(element_size_shift));
1353 return shl.left().node();
1355 return mindex.left().node();
1360 const MachineType kMachineReps[] = {kRepBit, kMachInt8, kMachInt16,
1361 kMachInt32, kMachInt64, kMachFloat64,
1367 TEST(LowerLoadField_to_load) {
1368 TestingGraph t(Type::Any(), Type::Signed32());
1370 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1371 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1372 Handle<Name>::null(), Type::Any(), kMachineReps[i]};
1375 t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
1376 Node* use = t.Use(load, kMachineReps[i]);
1379 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1380 CHECK_EQ(t.p0, load->InputAt(0));
1381 CheckFieldAccessArithmetic(access, load);
1383 MachineType rep = OpParameter<MachineType>(load);
1384 CHECK_EQ(kMachineReps[i], rep);
1389 TEST(LowerStoreField_to_store) {
1390 TestingGraph t(Type::Any(), Type::Signed32());
1392 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1393 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1394 Handle<Name>::null(), Type::Any(), kMachineReps[i]};
1397 Node* val = t.ExampleWithOutput(kMachineReps[i]);
1398 Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
1399 val, t.start, t.start);
1402 CHECK_EQ(IrOpcode::kStore, store->opcode());
1403 CHECK_EQ(val, store->InputAt(2));
1404 CheckFieldAccessArithmetic(access, store);
1406 StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
1407 if (kMachineReps[i] & kRepTagged) {
1408 CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
1410 CHECK_EQ(kMachineReps[i], rep.machine_type());
1415 TEST(LowerLoadElement_to_load) {
1416 TestingGraph t(Type::Any(), Type::Signed32());
1418 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1419 ElementAccess access = {kNoBoundsCheck, kTaggedBase,
1420 FixedArrayBase::kHeaderSize, Type::Any(),
1424 t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0, t.p1,
1425 t.jsgraph.Int32Constant(1024), t.start, t.start);
1426 Node* use = t.Use(load, kMachineReps[i]);
1429 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1430 CHECK_EQ(t.p0, load->InputAt(0));
1431 CheckElementAccessArithmetic(access, load);
1433 MachineType rep = OpParameter<MachineType>(load);
1434 CHECK_EQ(kMachineReps[i], rep);
1439 TEST(LowerStoreElement_to_store) {
1440 TestingGraph t(Type::Any(), Type::Signed32());
1442 for (size_t i = 0; i < arraysize(kMachineReps); i++) {
1443 ElementAccess access = {kNoBoundsCheck, kTaggedBase,
1444 FixedArrayBase::kHeaderSize, Type::Any(),
1447 Node* val = t.ExampleWithOutput(kMachineReps[i]);
1448 Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
1449 t.p1, t.jsgraph.Int32Constant(1024), val,
1453 CHECK_EQ(IrOpcode::kStore, store->opcode());
1454 CHECK_EQ(val, store->InputAt(2));
1455 CheckElementAccessArithmetic(access, store);
1457 StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
1458 if (kMachineReps[i] & kRepTagged) {
1459 CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
1461 CHECK_EQ(kMachineReps[i], rep.machine_type());
1466 TEST(InsertChangeForLoadElementIndex) {
1467 // LoadElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length) =>
1468 // Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
1469 TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
1470 ElementAccess access = {kNoBoundsCheck, kTaggedBase,
1471 FixedArrayBase::kHeaderSize, Type::Any(),
1474 Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
1475 t.p1, t.p2, t.start, t.start);
1478 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1479 CHECK_EQ(t.p0, load->InputAt(0));
1481 Node* index = CheckElementAccessArithmetic(access, load);
1482 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, index);
1486 TEST(InsertChangeForStoreElementIndex) {
1487 // StoreElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length, val) =>
1488 // Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
1489 TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
1490 ElementAccess access = {kNoBoundsCheck, kTaggedBase,
1491 FixedArrayBase::kHeaderSize, Type::Any(),
1495 t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1, t.p2,
1496 t.jsgraph.TrueConstant(), t.start, t.start);
1499 CHECK_EQ(IrOpcode::kStore, store->opcode());
1500 CHECK_EQ(t.p0, store->InputAt(0));
1502 Node* index = CheckElementAccessArithmetic(access, store);
1503 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, index);
1507 TEST(InsertChangeForLoadElement) {
1508 // TODO(titzer): test all load/store representation change insertions.
1509 TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
1510 ElementAccess access = {kNoBoundsCheck, kTaggedBase,
1511 FixedArrayBase::kHeaderSize, Type::Any(),
1514 Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
1515 t.p1, t.p1, t.start, t.start);
1518 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1519 CHECK_EQ(t.p0, load->InputAt(0));
1520 CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
1524 TEST(InsertChangeForLoadField) {
1525 // TODO(titzer): test all load/store representation change insertions.
1526 TestingGraph t(Type::Any(), Type::Signed32());
1527 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1528 Handle<Name>::null(), Type::Any(), kMachFloat64};
1531 t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
1534 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1535 CHECK_EQ(t.p0, load->InputAt(0));
1536 CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
1540 TEST(InsertChangeForStoreElement) {
1541 // TODO(titzer): test all load/store representation change insertions.
1542 TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
1543 ElementAccess access = {kNoBoundsCheck, kTaggedBase,
1544 FixedArrayBase::kHeaderSize, Type::Any(),
1547 Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
1548 t.jsgraph.Int32Constant(0), t.p2, t.p1,
1553 CHECK_EQ(IrOpcode::kStore, store->opcode());
1554 CHECK_EQ(t.p0, store->InputAt(0));
1555 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
1559 TEST(InsertChangeForStoreField) {
1560 // TODO(titzer): test all load/store representation change insertions.
1561 TestingGraph t(Type::Any(), Type::Signed32());
1562 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1563 Handle<Name>::null(), Type::Any(), kMachFloat64};
1565 Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
1566 t.p1, t.start, t.start);
1570 CHECK_EQ(IrOpcode::kStore, store->opcode());
1571 CHECK_EQ(t.p0, store->InputAt(0));
1572 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
1577 TestingGraph t(Type::Any(), Type::Signed32());
1578 static const MachineType kMachineTypes[] = {kMachInt32, kMachUint32,
1580 Type* kTypes[] = {Type::Signed32(), Type::Unsigned32(), Type::Number()};
1582 for (size_t i = 0; i < arraysize(kMachineTypes); i++) {
1583 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1584 Handle<Name>::null(), kTypes[i], kMachineTypes[i]};
1587 t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
1589 t.graph()->NewNode(t.simplified()->LoadField(access), t.p1, t.start);
1590 Node* phi = t.graph()->NewNode(t.common()->Phi(kMachAnyTagged, 2), load0,
1592 t.Return(t.Use(phi, kMachineTypes[i]));
1595 CHECK_EQ(IrOpcode::kPhi, phi->opcode());
1596 CHECK_EQ(RepresentationOf(kMachineTypes[i]),
1597 RepresentationOf(OpParameter<MachineType>(phi)));
1602 TEST(RunNumberDivide_minus_1_TruncatingToInt32) {
1603 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1604 Node* num = t.NumberToInt32(t.Parameter(0));
1605 Node* div = t.NumberDivide(num, t.jsgraph.Constant(-1));
1606 Node* trunc = t.NumberToInt32(div);
1609 if (Pipeline::SupportedTarget()) {
1610 t.LowerAllNodesAndLowerChanges();
1613 FOR_INT32_INPUTS(i) {
1615 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1621 TEST(NumberMultiply_TruncatingToInt32) {
1622 int32_t constants[] = {-100, -10, -1, 0, 1, 100, 1000};
1624 for (size_t i = 0; i < arraysize(constants); i++) {
1625 TestingGraph t(Type::Signed32());
1626 Node* k = t.jsgraph.Constant(constants[i]);
1627 Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
1628 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mul);
1632 CHECK_EQ(IrOpcode::kInt32Mul, mul->opcode());
1637 TEST(RunNumberMultiply_TruncatingToInt32) {
1638 int32_t constants[] = {-100, -10, -1, 0, 1, 100, 1000, 3000999};
1640 for (size_t i = 0; i < arraysize(constants); i++) {
1641 double k = static_cast<double>(constants[i]);
1642 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1643 Node* num = t.NumberToInt32(t.Parameter(0));
1644 Node* mul = t.NumberMultiply(num, t.jsgraph.Constant(k));
1645 Node* trunc = t.NumberToInt32(mul);
1648 if (Pipeline::SupportedTarget()) {
1649 t.LowerAllNodesAndLowerChanges();
1652 FOR_INT32_INPUTS(i) {
1653 int32_t x = DoubleToInt32(static_cast<double>(*i) * k);
1654 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1661 TEST(RunNumberMultiply_TruncatingToUint32) {
1662 uint32_t constants[] = {0, 1, 2, 3, 4, 100, 1000, 1024, 2048, 3000999};
1664 for (size_t i = 0; i < arraysize(constants); i++) {
1665 double k = static_cast<double>(constants[i]);
1666 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1667 Node* num = t.NumberToUint32(t.Parameter(0));
1668 Node* mul = t.NumberMultiply(num, t.jsgraph.Constant(k));
1669 Node* trunc = t.NumberToUint32(mul);
1672 if (Pipeline::SupportedTarget()) {
1673 t.LowerAllNodesAndLowerChanges();
1676 FOR_UINT32_INPUTS(i) {
1677 uint32_t x = DoubleToUint32(static_cast<double>(*i) * k);
1678 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1685 TEST(RunNumberDivide_2_TruncatingToUint32) {
1686 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1687 Node* num = t.NumberToUint32(t.Parameter(0));
1688 Node* div = t.NumberDivide(num, t.jsgraph.Constant(2));
1689 Node* trunc = t.NumberToUint32(div);
1692 if (Pipeline::SupportedTarget()) {
1693 t.LowerAllNodesAndLowerChanges();
1696 FOR_UINT32_INPUTS(i) {
1697 uint32_t x = DoubleToUint32(static_cast<double>(*i / 2.0));
1698 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1704 TEST(NumberMultiply_ConstantOutOfRange) {
1705 TestingGraph t(Type::Signed32());
1706 Node* k = t.jsgraph.Constant(1000000023);
1707 Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
1708 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), mul);
1712 CHECK_EQ(IrOpcode::kFloat64Mul, mul->opcode());
1716 TEST(NumberMultiply_NonTruncating) {
1717 TestingGraph t(Type::Signed32());
1718 Node* k = t.jsgraph.Constant(111);
1719 Node* mul = t.graph()->NewNode(t.simplified()->NumberMultiply(), t.p0, k);
1723 CHECK_EQ(IrOpcode::kFloat64Mul, mul->opcode());
1727 TEST(NumberDivide_TruncatingToInt32) {
1728 int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
1730 for (size_t i = 0; i < arraysize(constants); i++) {
1731 TestingGraph t(Type::Signed32());
1732 Node* k = t.jsgraph.Constant(constants[i]);
1733 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1734 Node* use = t.Use(div, kMachInt32);
1738 CHECK_EQ(IrOpcode::kInt32Div, use->InputAt(0)->opcode());
1743 TEST(RunNumberDivide_TruncatingToInt32) {
1744 int32_t constants[] = {-100, -10, -1, 1, 2, 100, 1000, 1024, 2048};
1746 for (size_t i = 0; i < arraysize(constants); i++) {
1747 int32_t k = constants[i];
1748 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1749 Node* num = t.NumberToInt32(t.Parameter(0));
1750 Node* div = t.NumberDivide(num, t.jsgraph.Constant(k));
1751 Node* trunc = t.NumberToInt32(div);
1754 if (Pipeline::SupportedTarget()) {
1755 t.LowerAllNodesAndLowerChanges();
1758 FOR_INT32_INPUTS(i) {
1759 if (*i == INT_MAX) continue; // exclude max int.
1760 int32_t x = DoubleToInt32(static_cast<double>(*i) / k);
1761 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1768 TEST(NumberDivide_TruncatingToUint32) {
1769 double constants[] = {1, 3, 100, 1000, 100998348};
1771 for (size_t i = 0; i < arraysize(constants); i++) {
1772 TestingGraph t(Type::Unsigned32());
1773 Node* k = t.jsgraph.Constant(constants[i]);
1774 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1775 Node* use = t.Use(div, kMachUint32);
1779 CHECK_EQ(IrOpcode::kUint32Div, use->InputAt(0)->opcode());
1784 TEST(RunNumberDivide_TruncatingToUint32) {
1785 uint32_t constants[] = {100, 10, 1, 1, 2, 4, 1000, 1024, 2048};
1787 for (size_t i = 0; i < arraysize(constants); i++) {
1788 uint32_t k = constants[i];
1789 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1790 Node* num = t.NumberToUint32(t.Parameter(0));
1791 Node* div = t.NumberDivide(num, t.jsgraph.Constant(static_cast<double>(k)));
1792 Node* trunc = t.NumberToUint32(div);
1795 if (Pipeline::SupportedTarget()) {
1796 t.LowerAllNodesAndLowerChanges();
1799 FOR_UINT32_INPUTS(i) {
1800 uint32_t x = *i / k;
1801 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1808 TEST(NumberDivide_BadConstants) {
1810 TestingGraph t(Type::Signed32());
1811 Node* k = t.jsgraph.Constant(-1);
1812 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1813 Node* use = t.Use(div, kMachInt32);
1817 CHECK_EQ(IrOpcode::kInt32Sub, use->InputAt(0)->opcode());
1821 TestingGraph t(Type::Signed32());
1822 Node* k = t.jsgraph.Constant(0);
1823 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1824 Node* use = t.Use(div, kMachInt32);
1828 CHECK_EQ(IrOpcode::kInt32Constant, use->InputAt(0)->opcode());
1829 CHECK_EQ(0, OpParameter<int32_t>(use->InputAt(0)));
1833 TestingGraph t(Type::Unsigned32());
1834 Node* k = t.jsgraph.Constant(0);
1835 Node* div = t.graph()->NewNode(t.simplified()->NumberDivide(), t.p0, k);
1836 Node* use = t.Use(div, kMachUint32);
1840 CHECK_EQ(IrOpcode::kInt32Constant, use->InputAt(0)->opcode());
1841 CHECK_EQ(0, OpParameter<int32_t>(use->InputAt(0)));
1846 TEST(NumberModulus_TruncatingToInt32) {
1847 int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
1849 for (size_t i = 0; i < arraysize(constants); i++) {
1850 TestingGraph t(Type::Signed32());
1851 Node* k = t.jsgraph.Constant(constants[i]);
1852 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
1853 Node* use = t.Use(mod, kMachInt32);
1857 CHECK_EQ(IrOpcode::kInt32Mod, use->InputAt(0)->opcode());
1862 TEST(RunNumberModulus_TruncatingToInt32) {
1863 int32_t constants[] = {-100, -10, -1, 1, 2, 100, 1000, 1024, 2048};
1865 for (size_t i = 0; i < arraysize(constants); i++) {
1866 int32_t k = constants[i];
1867 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1868 Node* num = t.NumberToInt32(t.Parameter(0));
1869 Node* mod = t.NumberModulus(num, t.jsgraph.Constant(k));
1870 Node* trunc = t.NumberToInt32(mod);
1873 if (Pipeline::SupportedTarget()) {
1874 t.LowerAllNodesAndLowerChanges();
1877 FOR_INT32_INPUTS(i) {
1878 if (*i == INT_MAX) continue; // exclude max int.
1879 int32_t x = DoubleToInt32(std::fmod(static_cast<double>(*i), k));
1880 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1887 TEST(NumberModulus_TruncatingToUint32) {
1888 double constants[] = {1, 3, 100, 1000, 100998348};
1890 for (size_t i = 0; i < arraysize(constants); i++) {
1891 TestingGraph t(Type::Unsigned32());
1892 Node* k = t.jsgraph.Constant(constants[i]);
1893 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
1894 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), mod);
1895 Node* ret = t.Return(trunc);
1898 CHECK_EQ(IrOpcode::kUint32Mod, ret->InputAt(0)->opcode());
1903 TEST(RunNumberModulus_TruncatingToUint32) {
1904 uint32_t constants[] = {1, 2, 100, 1000, 1024, 2048};
1906 for (size_t i = 0; i < arraysize(constants); i++) {
1907 uint32_t k = constants[i];
1908 SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
1909 Node* num = t.NumberToUint32(t.Parameter(0));
1911 t.NumberModulus(num, t.jsgraph.Constant(static_cast<double>(k)));
1912 Node* trunc = t.NumberToUint32(mod);
1915 if (Pipeline::SupportedTarget()) {
1916 t.LowerAllNodesAndLowerChanges();
1919 FOR_UINT32_INPUTS(i) {
1920 uint32_t x = *i % k;
1921 t.CheckNumberCall(static_cast<double>(x), static_cast<double>(*i));
1928 TEST(NumberModulus_Int32) {
1929 int32_t constants[] = {-100, -10, 1, 4, 100, 1000};
1931 for (size_t i = 0; i < arraysize(constants); i++) {
1932 TestingGraph t(Type::Signed32());
1933 Node* k = t.jsgraph.Constant(constants[i]);
1934 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
1938 CHECK_EQ(IrOpcode::kFloat64Mod, mod->opcode()); // Pesky -0 behavior.
1943 TEST(NumberModulus_Uint32) {
1944 const double kConstants[] = {2, 100, 1000, 1024, 2048};
1945 const MachineType kTypes[] = {kMachInt32, kMachUint32};
1947 for (auto const type : kTypes) {
1948 for (auto const c : kConstants) {
1949 TestingGraph t(Type::Unsigned32());
1950 Node* k = t.jsgraph.Constant(c);
1951 Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
1952 Node* use = t.Use(mod, type);
1956 CHECK_EQ(IrOpcode::kUint32Mod, use->InputAt(0)->opcode());
1962 TEST(PhiRepresentation) {
1963 HandleAndZoneScope scope;
1964 Zone* z = scope.main_zone();
1966 Factory* f = z->isolate()->factory();
1967 Handle<Object> range_min = f->NewNumber(-1e13);
1968 Handle<Object> range_max = f->NewNumber(1e+15);
1969 Type* range = Type::Range(range_min, range_max, z);
1975 MachineTypeUnion expected;
1978 TestData test_data[] = {
1979 {Type::Signed32(), Type::Unsigned32(), kMachInt32,
1980 kRepWord32 | kTypeNumber},
1981 {range, range, kMachUint32, kRepWord32 | kTypeNumber},
1982 {Type::Signed32(), Type::Signed32(), kMachInt32, kMachInt32},
1983 {Type::Unsigned32(), Type::Unsigned32(), kMachInt32, kMachUint32},
1984 {Type::Number(), Type::Signed32(), kMachInt32, kMachFloat64},
1985 {Type::Signed32(), Type::String(), kMachInt32, kMachAnyTagged}};
1987 for (auto const d : test_data) {
1988 TestingGraph t(d.arg1, d.arg2, Type::Boolean());
1990 Node* br = t.graph()->NewNode(t.common()->Branch(), t.p2, t.start);
1991 Node* tb = t.graph()->NewNode(t.common()->IfTrue(), br);
1992 Node* fb = t.graph()->NewNode(t.common()->IfFalse(), br);
1993 Node* m = t.graph()->NewNode(t.common()->Merge(2), tb, fb);
1996 t.graph()->NewNode(t.common()->Phi(kMachAnyTagged, 2), t.p0, t.p1, m);
1998 Bounds phi_bounds = Bounds::Either(Bounds(d.arg1), Bounds(d.arg2), z);
1999 NodeProperties::SetBounds(phi, phi_bounds);
2001 Node* use = t.Use(phi, d.use);
2005 CHECK_EQ(d.expected, OpParameter<MachineType>(phi));