1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/compiler/control-builders.h"
8 #include "src/compiler/generic-node-inl.h"
9 #include "src/compiler/graph-visualizer.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/compiler/pipeline.h"
12 #include "src/compiler/representation-change.h"
13 #include "src/compiler/simplified-lowering.h"
14 #include "src/compiler/simplified-node-factory.h"
15 #include "src/compiler/typer.h"
16 #include "src/compiler/verifier.h"
17 #include "src/execution.h"
18 #include "src/parser.h"
19 #include "src/rewriter.h"
20 #include "src/scopes.h"
21 #include "test/cctest/cctest.h"
22 #include "test/cctest/compiler/codegen-tester.h"
23 #include "test/cctest/compiler/graph-builder-tester.h"
24 #include "test/cctest/compiler/value-helper.h"
26 using namespace v8::internal;
27 using namespace v8::internal::compiler;
29 template <typename ReturnType>
30 class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
32 SimplifiedLoweringTester(MachineType p0 = kMachineLast,
33 MachineType p1 = kMachineLast,
34 MachineType p2 = kMachineLast,
35 MachineType p3 = kMachineLast,
36 MachineType p4 = kMachineLast)
37 : GraphBuilderTester<ReturnType>(p0, p1, p2, p3, p4),
39 source_positions(this->graph()),
40 jsgraph(this->graph(), this->common(), &typer),
41 lowering(&jsgraph, &source_positions) {}
44 SourcePositionTable source_positions;
46 SimplifiedLowering lowering;
48 void LowerAllNodes() {
50 lowering.LowerAllNodes();
53 Factory* factory() { return this->isolate()->factory(); }
54 Heap* heap() { return this->isolate()->heap(); }
58 // TODO(dcarney): find a home for these functions.
61 FieldAccess ForJSObjectMap() {
62 FieldAccess access = {kTaggedBase, JSObject::kMapOffset, Handle<Name>(),
63 Type::Any(), kMachineTagged};
68 FieldAccess ForJSObjectProperties() {
69 FieldAccess access = {kTaggedBase, JSObject::kPropertiesOffset,
70 Handle<Name>(), Type::Any(), kMachineTagged};
75 FieldAccess ForArrayBufferBackingStore() {
76 FieldAccess access = {
77 kTaggedBase, JSArrayBuffer::kBackingStoreOffset,
78 Handle<Name>(), Type::UntaggedPtr(),
79 MachineOperatorBuilder::pointer_rep(),
85 ElementAccess ForFixedArrayElement() {
86 ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
92 ElementAccess ForBackingStoreElement(MachineType rep) {
93 ElementAccess access = {kUntaggedBase,
94 kNonHeapObjectHeaderSize - kHeapObjectTag,
101 // Create a simple JSObject with a unique map.
102 static Handle<JSObject> TestObject() {
103 static int index = 0;
105 v8::base::OS::SNPrintF(buffer, 50, "({'a_%d':1})", index++);
106 return Handle<JSObject>::cast(v8::Utils::OpenHandle(*CompileRun(buffer)));
111 SimplifiedLoweringTester<Object*> t(kMachineTagged);
112 FieldAccess access = ForJSObjectMap();
113 Node* load = t.LoadField(access, t.Parameter(0));
118 if (Pipeline::SupportedTarget()) {
120 Handle<JSObject> src = TestObject();
121 Handle<Map> src_map(src->map());
122 Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
123 CHECK_EQ(*src_map, result);
129 SimplifiedLoweringTester<int32_t> t(kMachineTagged, kMachineTagged);
130 FieldAccess access = ForJSObjectMap();
131 t.StoreField(access, t.Parameter(1), t.Parameter(0));
132 t.Return(t.jsgraph.TrueConstant());
136 if (Pipeline::SupportedTarget()) {
138 Handle<JSObject> src = TestObject();
139 Handle<Map> src_map(src->map());
140 Handle<JSObject> dst = TestObject();
141 CHECK(src->map() != dst->map());
142 t.Call(*src_map, *dst); // TODO(titzer): raw pointers in call
143 CHECK(*src_map == dst->map());
148 TEST(RunLoadProperties) {
149 SimplifiedLoweringTester<Object*> t(kMachineTagged);
150 FieldAccess access = ForJSObjectProperties();
151 Node* load = t.LoadField(access, t.Parameter(0));
156 if (Pipeline::SupportedTarget()) {
158 Handle<JSObject> src = TestObject();
159 Handle<FixedArray> src_props(src->properties());
160 Object* result = t.Call(*src); // TODO(titzer): raw pointers in call
161 CHECK_EQ(*src_props, result);
166 TEST(RunLoadStoreMap) {
167 SimplifiedLoweringTester<Object*> t(kMachineTagged, kMachineTagged);
168 FieldAccess access = ForJSObjectMap();
169 Node* load = t.LoadField(access, t.Parameter(0));
170 t.StoreField(access, t.Parameter(1), load);
175 if (Pipeline::SupportedTarget()) {
177 Handle<JSObject> src = TestObject();
178 Handle<Map> src_map(src->map());
179 Handle<JSObject> dst = TestObject();
180 CHECK(src->map() != dst->map());
181 Object* result = t.Call(*src, *dst); // TODO(titzer): raw pointers in call
182 CHECK(result->IsMap());
183 CHECK_EQ(*src_map, result);
184 CHECK(*src_map == dst->map());
189 TEST(RunLoadStoreFixedArrayIndex) {
190 SimplifiedLoweringTester<Object*> t(kMachineTagged);
191 ElementAccess access = ForFixedArrayElement();
192 Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
193 t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
198 if (Pipeline::SupportedTarget()) {
200 Handle<FixedArray> array = t.factory()->NewFixedArray(2);
201 Handle<JSObject> src = TestObject();
202 Handle<JSObject> dst = TestObject();
205 Object* result = t.Call(*array);
206 CHECK_EQ(*src, result);
207 CHECK_EQ(*src, array->get(0));
208 CHECK_EQ(*src, array->get(1));
213 TEST(RunLoadStoreArrayBuffer) {
214 SimplifiedLoweringTester<Object*> t(kMachineTagged);
215 const int index = 12;
216 ElementAccess buffer_access = ForBackingStoreElement(kMachineWord8);
217 Node* backing_store =
218 t.LoadField(ForArrayBufferBackingStore(), t.Parameter(0));
220 t.LoadElement(buffer_access, backing_store, t.Int32Constant(index));
221 t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
223 t.Return(t.jsgraph.TrueConstant());
227 if (Pipeline::SupportedTarget()) {
229 Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
230 const int array_length = 2 * index;
231 Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
232 uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
233 for (int i = 0; i < array_length; i++) {
237 // TODO(titzer): raw pointers in call
238 Object* result = t.Call(*array);
239 CHECK_EQ(t.isolate()->heap()->true_value(), result);
240 for (int i = 0; i < array_length; i++) {
241 uint8_t expected = i;
242 if (i == (index + 1)) expected = index;
243 CHECK_EQ(data[i], expected);
249 TEST(RunLoadFieldFromUntaggedBase) {
250 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
252 for (size_t i = 0; i < ARRAY_SIZE(smis); i++) {
253 int offset = static_cast<int>(i * sizeof(Smi*));
254 FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
255 Type::Integral32(), kMachineTagged};
257 SimplifiedLoweringTester<Object*> t;
258 Node* load = t.LoadField(access, t.PointerConstant(smis));
262 if (!Pipeline::SupportedTarget()) continue;
264 for (int j = -5; j <= 5; j++) {
265 Smi* expected = Smi::FromInt(j);
267 CHECK_EQ(expected, t.Call());
273 TEST(RunStoreFieldToUntaggedBase) {
274 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
276 for (size_t i = 0; i < ARRAY_SIZE(smis); i++) {
277 int offset = static_cast<int>(i * sizeof(Smi*));
278 FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
279 Type::Integral32(), kMachineTagged};
281 SimplifiedLoweringTester<Object*> t(kMachineTagged);
282 Node* p0 = t.Parameter(0);
283 t.StoreField(access, t.PointerConstant(smis), p0);
287 if (!Pipeline::SupportedTarget()) continue;
289 for (int j = -5; j <= 5; j++) {
290 Smi* expected = Smi::FromInt(j);
291 smis[i] = Smi::FromInt(-100);
292 CHECK_EQ(expected, t.Call(expected));
293 CHECK_EQ(expected, smis[i]);
299 TEST(RunLoadElementFromUntaggedBase) {
300 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
301 Smi::FromInt(4), Smi::FromInt(5)};
303 for (size_t i = 0; i < ARRAY_SIZE(smis); i++) { // for header sizes
304 for (size_t j = 0; (i + j) < ARRAY_SIZE(smis); j++) { // for element index
305 int offset = static_cast<int>(i * sizeof(Smi*));
306 ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
309 SimplifiedLoweringTester<Object*> t;
310 Node* load = t.LoadElement(access, t.PointerConstant(smis),
311 t.Int32Constant(static_cast<int>(j)));
315 if (!Pipeline::SupportedTarget()) continue;
317 for (int k = -5; k <= 5; k++) {
318 Smi* expected = Smi::FromInt(k);
319 smis[i + j] = expected;
320 CHECK_EQ(expected, t.Call());
327 TEST(RunStoreElementFromUntaggedBase) {
328 Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
329 Smi::FromInt(4), Smi::FromInt(5)};
331 for (size_t i = 0; i < ARRAY_SIZE(smis); i++) { // for header sizes
332 for (size_t j = 0; (i + j) < ARRAY_SIZE(smis); j++) { // for element index
333 int offset = static_cast<int>(i * sizeof(Smi*));
334 ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
337 SimplifiedLoweringTester<Object*> t(kMachineTagged);
338 Node* p0 = t.Parameter(0);
339 t.StoreElement(access, t.PointerConstant(smis),
340 t.Int32Constant(static_cast<int>(j)), p0);
344 if (!Pipeline::SupportedTarget()) continue;
346 for (int k = -5; k <= 5; k++) {
347 Smi* expected = Smi::FromInt(k);
348 smis[i + j] = Smi::FromInt(-100);
349 CHECK_EQ(expected, t.Call(expected));
350 CHECK_EQ(expected, smis[i + j]);
353 // TODO(titzer): assert the contents of the array.
359 // A helper class for accessing fields and elements of various types, on both
360 // tagged and untagged base pointers. Contains both tagged and untagged buffers
361 // for testing direct memory access from generated code.
362 template <typename E>
363 class AccessTester : public HandleAndZoneScope {
367 E* original_elements;
370 Handle<ByteArray> tagged_array; // TODO(titzer): use FixedArray for tagged.
372 AccessTester(bool t, MachineType r, E* orig, size_t num)
375 original_elements(orig),
377 untagged_array(static_cast<E*>(malloc(ByteSize()))),
378 tagged_array(main_isolate()->factory()->NewByteArray(
379 static_cast<int>(ByteSize()))) {
383 ~AccessTester() { free(untagged_array); }
385 size_t ByteSize() { return num_elements * sizeof(E); }
387 // Nuke both {untagged_array} and {tagged_array} with {original_elements}.
388 void Reinitialize() {
389 memcpy(untagged_array, original_elements, ByteSize());
390 CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
391 E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
392 memcpy(raw, original_elements, ByteSize());
395 // Create and run code that copies the element in either {untagged_array}
396 // or {tagged_array} at index {from_index} to index {to_index}.
397 void RunCopyElement(int from_index, int to_index) {
398 // TODO(titzer): test element and field accesses where the base is not
399 // a constant in the code.
400 BoundsCheck(from_index);
401 BoundsCheck(to_index);
402 ElementAccess access = GetElementAccess();
404 SimplifiedLoweringTester<Object*> t;
405 Node* ptr = GetBaseNode(&t);
406 Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index));
407 t.StoreElement(access, ptr, t.Int32Constant(to_index), load);
408 t.Return(t.jsgraph.TrueConstant());
411 if (Pipeline::SupportedTarget()) {
413 Object* result = t.Call();
414 CHECK_EQ(t.isolate()->heap()->true_value(), result);
418 // Create and run code that copies the field in either {untagged_array}
419 // or {tagged_array} at index {from_index} to index {to_index}.
420 void RunCopyField(int from_index, int to_index) {
421 BoundsCheck(from_index);
422 BoundsCheck(to_index);
423 FieldAccess from_access = GetFieldAccess(from_index);
424 FieldAccess to_access = GetFieldAccess(to_index);
426 SimplifiedLoweringTester<Object*> t;
427 Node* ptr = GetBaseNode(&t);
428 Node* load = t.LoadField(from_access, ptr);
429 t.StoreField(to_access, ptr, load);
430 t.Return(t.jsgraph.TrueConstant());
433 if (Pipeline::SupportedTarget()) {
435 Object* result = t.Call();
436 CHECK_EQ(t.isolate()->heap()->true_value(), result);
440 // Create and run code that copies the elements from {this} to {that}.
441 void RunCopyElements(AccessTester<E>* that) {
442 SimplifiedLoweringTester<Object*> t;
444 Node* one = t.Int32Constant(1);
445 Node* index = t.Int32Constant(0);
446 Node* limit = t.Int32Constant(static_cast<int>(num_elements));
447 t.environment()->Push(index);
448 Node* src = this->GetBaseNode(&t);
449 Node* dst = that->GetBaseNode(&t);
451 LoopBuilder loop(&t);
453 // Loop exit condition
454 index = t.environment()->Top();
455 Node* condition = t.Int32LessThan(index, limit);
456 loop.BreakUnless(condition);
457 // dst[index] = src[index]
458 index = t.environment()->Pop();
459 Node* load = t.LoadElement(this->GetElementAccess(), src, index);
460 t.StoreElement(that->GetElementAccess(), dst, index, load);
462 index = t.Int32Add(index, one);
463 t.environment()->Push(index);
468 index = t.environment()->Pop();
469 t.Return(t.jsgraph.TrueConstant());
472 if (Pipeline::SupportedTarget()) {
474 Object* result = t.Call();
475 CHECK_EQ(t.isolate()->heap()->true_value(), result);
479 E GetElement(int index) {
482 E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
485 return untagged_array[index];
490 ElementAccess GetElementAccess() {
491 ElementAccess access = {tagged ? kTaggedBase : kUntaggedBase,
492 tagged ? FixedArrayBase::kHeaderSize : 0,
497 FieldAccess GetFieldAccess(int field) {
498 int offset = field * sizeof(E);
499 FieldAccess access = {tagged ? kTaggedBase : kUntaggedBase,
500 offset + (tagged ? FixedArrayBase::kHeaderSize : 0),
501 Handle<Name>(), Type::Any(), rep};
505 template <typename T>
506 Node* GetBaseNode(SimplifiedLoweringTester<T>* t) {
507 return tagged ? t->HeapConstant(tagged_array)
508 : t->PointerConstant(untagged_array);
511 void BoundsCheck(int index) {
513 CHECK_LT(index, static_cast<int>(num_elements));
514 CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
519 template <typename E>
520 static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
521 int num_elements = static_cast<int>(num);
523 for (int taggedness = 0; taggedness < 2; taggedness++) {
524 AccessTester<E> a(taggedness == 1, rep, original_elements, num);
525 for (int field = 0; field < 2; field++) {
526 for (int i = 0; i < num_elements - 1; i++) {
529 a.RunCopyField(i, i + 1); // Test field read/write.
531 a.RunCopyElement(i, i + 1); // Test element read/write.
533 if (Pipeline::SupportedTarget()) { // verify.
534 for (int j = 0; j < num_elements; j++) {
536 j == (i + 1) ? original_elements[i] : original_elements[j];
537 CHECK_EQ(expect, a.GetElement(j));
544 for (int tf = 0; tf < 2; tf++) {
545 for (int tt = 0; tt < 2; tt++) {
546 AccessTester<E> a(tf == 1, rep, original_elements, num);
547 AccessTester<E> b(tt == 1, rep, original_elements, num);
548 a.RunCopyElements(&b);
549 if (Pipeline::SupportedTarget()) { // verify.
550 for (int i = 0; i < num_elements; i++) {
551 CHECK_EQ(a.GetElement(i), b.GetElement(i));
559 TEST(RunAccessTests_uint8) {
560 uint8_t data[] = {0x07, 0x16, 0x25, 0x34, 0x43, 0x99,
561 0xab, 0x78, 0x89, 0x19, 0x2b, 0x38};
562 RunAccessTest<uint8_t>(kMachineWord8, data, ARRAY_SIZE(data));
566 TEST(RunAccessTests_uint16) {
567 uint16_t data[] = {0x071a, 0x162b, 0x253c, 0x344d, 0x435e, 0x7777};
568 RunAccessTest<uint16_t>(kMachineWord16, data, ARRAY_SIZE(data));
572 TEST(RunAccessTests_int32) {
573 int32_t data[] = {-211, 211, 628347, 2000000000, -2000000000, -1, -100000034};
574 RunAccessTest<int32_t>(kMachineWord32, data, ARRAY_SIZE(data));
578 #define V8_2PART_INT64(a, b) (((static_cast<int64_t>(a) << 32) + 0x##b##u))
581 TEST(RunAccessTests_int64) {
582 if (kPointerSize != 8) return;
583 int64_t data[] = {V8_2PART_INT64(0x10111213, 14151617),
584 V8_2PART_INT64(0x20212223, 24252627),
585 V8_2PART_INT64(0x30313233, 34353637),
586 V8_2PART_INT64(0xa0a1a2a3, a4a5a6a7),
587 V8_2PART_INT64(0xf0f1f2f3, f4f5f6f7)};
588 RunAccessTest<int64_t>(kMachineWord64, data, ARRAY_SIZE(data));
592 TEST(RunAccessTests_float64) {
593 double data[] = {1.25, -1.25, 2.75, 11.0, 11100.8};
594 RunAccessTest<double>(kMachineFloat64, data, ARRAY_SIZE(data));
598 TEST(RunAccessTests_Smi) {
599 Smi* data[] = {Smi::FromInt(-1), Smi::FromInt(-9),
600 Smi::FromInt(0), Smi::FromInt(666),
601 Smi::FromInt(77777), Smi::FromInt(Smi::kMaxValue)};
602 RunAccessTest<Smi*>(kMachineTagged, data, ARRAY_SIZE(data));
606 // Fills in most of the nodes of the graph in order to make tests shorter.
607 class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
617 explicit TestingGraph(Type* p0_type, Type* p1_type = Type::None())
618 : GraphAndBuilders(main_zone()),
620 jsgraph(graph(), common(), &typer) {
621 start = graph()->NewNode(common()->Start(2));
622 graph()->SetStart(start);
624 graph()->NewNode(common()->Return(), jsgraph.Constant(0), start, start);
625 end = graph()->NewNode(common()->End(), ret);
626 graph()->SetEnd(end);
627 p0 = graph()->NewNode(common()->Parameter(0), start);
628 p1 = graph()->NewNode(common()->Parameter(1), start);
629 NodeProperties::SetBounds(p0, Bounds(p0_type));
630 NodeProperties::SetBounds(p1, Bounds(p1_type));
633 void CheckLoweringBinop(IrOpcode::Value expected, Operator* op) {
634 Node* node = Return(graph()->NewNode(op, p0, p1));
636 CHECK_EQ(expected, node->opcode());
639 void CheckLoweringTruncatedBinop(IrOpcode::Value expected, Operator* op,
641 Node* node = graph()->NewNode(op, p0, p1);
642 Return(graph()->NewNode(trunc, node));
644 CHECK_EQ(expected, node->opcode());
648 SimplifiedLowering lowering(&jsgraph, NULL);
649 lowering.LowerAllNodes();
652 // Inserts the node as the return value of the graph.
653 Node* Return(Node* node) {
654 ret->ReplaceInput(0, node);
658 // Inserts the node as the effect input to the return of the graph.
659 void Effect(Node* node) { ret->ReplaceInput(1, node); }
661 Node* ExampleWithOutput(RepType type) {
662 // TODO(titzer): use parameters with guaranteed representations.
664 return graph()->NewNode(machine()->Int32Add(), jsgraph.Int32Constant(1),
665 jsgraph.Int32Constant(1));
666 } else if (type & tUint32) {
667 return graph()->NewNode(machine()->Word32Shr(), jsgraph.Int32Constant(1),
668 jsgraph.Int32Constant(1));
669 } else if (type & rFloat64) {
670 return graph()->NewNode(machine()->Float64Add(),
671 jsgraph.Float64Constant(1),
672 jsgraph.Float64Constant(1));
673 } else if (type & rBit) {
674 return graph()->NewNode(machine()->Word32Equal(),
675 jsgraph.Int32Constant(1),
676 jsgraph.Int32Constant(1));
677 } else if (type & rWord64) {
678 return graph()->NewNode(machine()->Int64Add(), Int64Constant(1),
681 CHECK(type & rTagged);
686 Node* Use(Node* node, RepType type) {
688 return graph()->NewNode(machine()->Int32LessThan(), node,
689 jsgraph.Int32Constant(1));
690 } else if (type & tUint32) {
691 return graph()->NewNode(machine()->Uint32LessThan(), node,
692 jsgraph.Int32Constant(1));
693 } else if (type & rFloat64) {
694 return graph()->NewNode(machine()->Float64Add(), node,
695 jsgraph.Float64Constant(1));
696 } else if (type & rWord64) {
697 return graph()->NewNode(machine()->Int64LessThan(), node,
700 return graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), node,
701 jsgraph.TrueConstant());
705 Node* Branch(Node* cond) {
706 Node* br = graph()->NewNode(common()->Branch(), cond, start);
707 Node* tb = graph()->NewNode(common()->IfTrue(), br);
708 Node* fb = graph()->NewNode(common()->IfFalse(), br);
709 Node* m = graph()->NewNode(common()->Merge(2), tb, fb);
710 ret->ReplaceInput(NodeProperties::FirstControlIndex(ret), m);
714 Node* Int64Constant(int64_t v) {
715 return graph()->NewNode(common()->Int64Constant(v));
718 SimplifiedOperatorBuilder* simplified() { return &main_simplified_; }
719 MachineOperatorBuilder* machine() { return &main_machine_; }
720 CommonOperatorBuilder* common() { return &main_common_; }
721 Graph* graph() { return main_graph_; }
725 TEST(LowerBooleanNot_bit_bit) {
726 // BooleanNot(x: rBit) used as rBit
727 TestingGraph t(Type::Boolean());
728 Node* b = t.ExampleWithOutput(rBit);
729 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
730 Node* use = t.Branch(inv);
732 Node* cmp = use->InputAt(0);
733 CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
734 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
735 Node* f = t.jsgraph.Int32Constant(0);
736 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
740 TEST(LowerBooleanNot_bit_tagged) {
741 // BooleanNot(x: rBit) used as rTagged
742 TestingGraph t(Type::Boolean());
743 Node* b = t.ExampleWithOutput(rBit);
744 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
745 Node* use = t.Use(inv, rTagged);
748 CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
749 Node* cmp = use->InputAt(0)->InputAt(0);
750 CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
751 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
752 Node* f = t.jsgraph.Int32Constant(0);
753 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
757 TEST(LowerBooleanNot_tagged_bit) {
758 // BooleanNot(x: rTagged) used as rBit
759 TestingGraph t(Type::Boolean());
761 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
762 Node* use = t.Branch(inv);
764 Node* cmp = use->InputAt(0);
765 CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
766 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
767 Node* f = t.jsgraph.FalseConstant();
768 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
772 TEST(LowerBooleanNot_tagged_tagged) {
773 // BooleanNot(x: rTagged) used as rTagged
774 TestingGraph t(Type::Boolean());
776 Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
777 Node* use = t.Use(inv, rTagged);
780 CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
781 Node* cmp = use->InputAt(0)->InputAt(0);
782 CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
783 CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
784 Node* f = t.jsgraph.FalseConstant();
785 CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
789 static Type* test_types[] = {Type::Signed32(), Type::Unsigned32(),
790 Type::Number(), Type::Any()};
793 TEST(LowerNumberCmp_to_int32) {
794 TestingGraph t(Type::Signed32(), Type::Signed32());
796 t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
797 t.CheckLoweringBinop(IrOpcode::kInt32LessThan,
798 t.simplified()->NumberLessThan());
799 t.CheckLoweringBinop(IrOpcode::kInt32LessThanOrEqual,
800 t.simplified()->NumberLessThanOrEqual());
804 TEST(LowerNumberCmp_to_uint32) {
805 TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
807 t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
808 t.CheckLoweringBinop(IrOpcode::kUint32LessThan,
809 t.simplified()->NumberLessThan());
810 t.CheckLoweringBinop(IrOpcode::kUint32LessThanOrEqual,
811 t.simplified()->NumberLessThanOrEqual());
815 TEST(LowerNumberCmp_to_float64) {
816 static Type* types[] = {Type::Number(), Type::Any()};
818 for (size_t i = 0; i < ARRAY_SIZE(types); i++) {
819 TestingGraph t(types[i], types[i]);
821 t.CheckLoweringBinop(IrOpcode::kFloat64Equal,
822 t.simplified()->NumberEqual());
823 t.CheckLoweringBinop(IrOpcode::kFloat64LessThan,
824 t.simplified()->NumberLessThan());
825 t.CheckLoweringBinop(IrOpcode::kFloat64LessThanOrEqual,
826 t.simplified()->NumberLessThanOrEqual());
831 TEST(LowerNumberAddSub_to_int32) {
832 TestingGraph t(Type::Signed32(), Type::Signed32());
833 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
834 t.simplified()->NumberAdd(),
835 t.simplified()->NumberToInt32());
836 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
837 t.simplified()->NumberSubtract(),
838 t.simplified()->NumberToInt32());
842 TEST(LowerNumberAddSub_to_uint32) {
843 TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
844 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
845 t.simplified()->NumberAdd(),
846 t.simplified()->NumberToUint32());
847 t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
848 t.simplified()->NumberSubtract(),
849 t.simplified()->NumberToUint32());
853 TEST(LowerNumberAddSub_to_float64) {
854 for (size_t i = 0; i < ARRAY_SIZE(test_types); i++) {
855 TestingGraph t(test_types[i], test_types[i]);
857 t.CheckLoweringBinop(IrOpcode::kFloat64Add, t.simplified()->NumberAdd());
858 t.CheckLoweringBinop(IrOpcode::kFloat64Sub,
859 t.simplified()->NumberSubtract());
864 TEST(LowerNumberDivMod_to_float64) {
865 for (size_t i = 0; i < ARRAY_SIZE(test_types); i++) {
866 TestingGraph t(test_types[i], test_types[i]);
868 t.CheckLoweringBinop(IrOpcode::kFloat64Div, t.simplified()->NumberDivide());
869 t.CheckLoweringBinop(IrOpcode::kFloat64Mod,
870 t.simplified()->NumberModulus());
875 static void CheckChangeOf(IrOpcode::Value change, Node* of, Node* node) {
876 CHECK_EQ(change, node->opcode());
877 CHECK_EQ(of, node->InputAt(0));
881 TEST(LowerNumberToInt32_to_nop) {
882 // NumberToInt32(x: rTagged | tInt32) used as rTagged
883 TestingGraph t(Type::Signed32());
884 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
885 Node* use = t.Use(trunc, rTagged);
888 CHECK_EQ(t.p0, use->InputAt(0));
892 TEST(LowerNumberToInt32_to_ChangeTaggedToFloat64) {
893 // NumberToInt32(x: rTagged | tInt32) used as rFloat64
894 TestingGraph t(Type::Signed32());
895 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
896 Node* use = t.Use(trunc, rFloat64);
899 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
903 TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
904 // NumberToInt32(x: rTagged | tInt32) used as rWord32
905 TestingGraph t(Type::Signed32());
906 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
907 Node* use = t.Use(trunc, tInt32);
910 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p0, use->InputAt(0));
914 TEST(LowerNumberToInt32_to_ChangeFloat64ToTagged) {
915 // TODO(titzer): NumberToInt32(x: rFloat64 | tInt32) used as rTagged
919 TEST(LowerNumberToInt32_to_ChangeFloat64ToInt32) {
920 // TODO(titzer): NumberToInt32(x: rFloat64 | tInt32) used as rWord32 | tInt32
924 TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
925 // TODO(titzer): NumberToInt32(x: rFloat64) used as rWord32 | tUint32
929 TEST(LowerNumberToUint32_to_nop) {
930 // NumberToUint32(x: rTagged | tUint32) used as rTagged
931 TestingGraph t(Type::Unsigned32());
932 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
933 Node* use = t.Use(trunc, rTagged);
936 CHECK_EQ(t.p0, use->InputAt(0));
940 TEST(LowerNumberToUint32_to_ChangeTaggedToFloat64) {
941 // NumberToUint32(x: rTagged | tUint32) used as rWord32
942 TestingGraph t(Type::Unsigned32());
943 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
944 Node* use = t.Use(trunc, rFloat64);
947 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
951 TEST(LowerNumberToUint32_to_ChangeTaggedToUint32) {
952 // NumberToUint32(x: rTagged | tUint32) used as rWord32
953 TestingGraph t(Type::Unsigned32());
954 Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
955 Node* use = t.Use(trunc, tUint32);
958 CheckChangeOf(IrOpcode::kChangeTaggedToUint32, t.p0, use->InputAt(0));
962 TEST(LowerNumberToUint32_to_ChangeFloat64ToTagged) {
963 // TODO(titzer): NumberToUint32(x: rFloat64 | tUint32) used as rTagged
967 TEST(LowerNumberToUint32_to_ChangeFloat64ToUint32) {
968 // TODO(titzer): NumberToUint32(x: rFloat64 | tUint32) used as rWord32
972 TEST(LowerNumberToUint32_to_TruncateFloat64ToUint32) {
973 // TODO(titzer): NumberToUint32(x: rFloat64) used as rWord32
977 TEST(LowerReferenceEqual_to_wordeq) {
978 TestingGraph t(Type::Any(), Type::Any());
979 IrOpcode::Value opcode =
980 static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
981 t.CheckLoweringBinop(opcode, t.simplified()->ReferenceEqual(Type::Any()));
985 TEST(LowerStringOps_to_rtcalls) {
986 if (false) { // TODO(titzer): lower StringOps to runtime calls
987 TestingGraph t(Type::String(), Type::String());
988 t.CheckLoweringBinop(IrOpcode::kCall, t.simplified()->StringEqual());
989 t.CheckLoweringBinop(IrOpcode::kCall, t.simplified()->StringLessThan());
990 t.CheckLoweringBinop(IrOpcode::kCall,
991 t.simplified()->StringLessThanOrEqual());
992 t.CheckLoweringBinop(IrOpcode::kCall, t.simplified()->StringAdd());
997 void CheckChangeInsertion(IrOpcode::Value expected, RepType from, RepType to) {
998 TestingGraph t(Type::Any());
999 Node* in = t.ExampleWithOutput(from);
1000 Node* use = t.Use(in, to);
1003 CHECK_EQ(expected, use->InputAt(0)->opcode());
1004 CHECK_EQ(in, use->InputAt(0)->InputAt(0));
1008 TEST(InsertBasicChanges) {
1010 // TODO(titzer): these changes need the output to have the right type.
1011 CheckChangeInsertion(IrOpcode::kChangeFloat64ToInt32, rFloat64, tInt32);
1012 CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, rFloat64, tUint32);
1013 CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, rTagged, tInt32);
1014 CheckChangeInsertion(IrOpcode::kChangeTaggedToUint32, rTagged, tUint32);
1017 CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, rFloat64, rTagged);
1018 CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64, rTagged, rFloat64);
1020 CheckChangeInsertion(IrOpcode::kChangeInt32ToFloat64, tInt32, rFloat64);
1021 CheckChangeInsertion(IrOpcode::kChangeInt32ToTagged, tInt32, rTagged);
1023 CheckChangeInsertion(IrOpcode::kChangeUint32ToFloat64, tUint32, rFloat64);
1024 CheckChangeInsertion(IrOpcode::kChangeUint32ToTagged, tUint32, rTagged);
1028 static void CheckChangesAroundBinop(TestingGraph* t, Operator* op,
1029 IrOpcode::Value input_change,
1030 IrOpcode::Value output_change) {
1031 Node* binop = t->graph()->NewNode(op, t->p0, t->p1);
1034 CHECK_EQ(input_change, binop->InputAt(0)->opcode());
1035 CHECK_EQ(input_change, binop->InputAt(1)->opcode());
1036 CHECK_EQ(t->p0, binop->InputAt(0)->InputAt(0));
1037 CHECK_EQ(t->p1, binop->InputAt(1)->InputAt(0));
1038 CHECK_EQ(output_change, t->ret->InputAt(0)->opcode());
1039 CHECK_EQ(binop, t->ret->InputAt(0)->InputAt(0));
1043 TEST(InsertChangesAroundInt32Binops) {
1044 TestingGraph t(Type::Signed32(), Type::Signed32());
1046 Operator* ops[] = {t.machine()->Int32Add(), t.machine()->Int32Sub(),
1047 t.machine()->Int32Mul(), t.machine()->Int32Div(),
1048 t.machine()->Int32Mod(), t.machine()->Word32And(),
1049 t.machine()->Word32Or(), t.machine()->Word32Xor(),
1050 t.machine()->Word32Shl(), t.machine()->Word32Sar()};
1052 for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
1053 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
1054 IrOpcode::kChangeInt32ToTagged);
1059 TEST(InsertChangesAroundInt32Cmp) {
1060 TestingGraph t(Type::Signed32(), Type::Signed32());
1062 Operator* ops[] = {t.machine()->Int32LessThan(),
1063 t.machine()->Int32LessThanOrEqual()};
1065 for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
1066 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
1067 IrOpcode::kChangeBitToBool);
1072 TEST(InsertChangesAroundUint32Cmp) {
1073 TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
1075 Operator* ops[] = {t.machine()->Uint32LessThan(),
1076 t.machine()->Uint32LessThanOrEqual()};
1078 for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
1079 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToUint32,
1080 IrOpcode::kChangeBitToBool);
1085 TEST(InsertChangesAroundFloat64Binops) {
1086 TestingGraph t(Type::Number(), Type::Number());
1089 t.machine()->Float64Add(), t.machine()->Float64Sub(),
1090 t.machine()->Float64Mul(), t.machine()->Float64Div(),
1091 t.machine()->Float64Mod(),
1094 for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
1095 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
1096 IrOpcode::kChangeFloat64ToTagged);
1101 TEST(InsertChangesAroundFloat64Cmp) {
1102 TestingGraph t(Type::Number(), Type::Number());
1104 Operator* ops[] = {t.machine()->Float64Equal(),
1105 t.machine()->Float64LessThan(),
1106 t.machine()->Float64LessThanOrEqual()};
1108 for (size_t i = 0; i < ARRAY_SIZE(ops); i++) {
1109 CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
1110 IrOpcode::kChangeBitToBool);
1115 void CheckFieldAccessArithmetic(FieldAccess access, Node* load_or_store) {
1116 Int32Matcher index = Int32Matcher(load_or_store->InputAt(1));
1117 CHECK(index.Is(access.offset - access.tag()));
1121 Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
1122 Int32BinopMatcher index(load_or_store->InputAt(1));
1123 CHECK_EQ(IrOpcode::kInt32Add, index.node()->opcode());
1124 CHECK(index.right().Is(access.header_size - access.tag()));
1126 int element_size = 0;
1127 switch (access.representation) {
1128 case kMachineTagged:
1129 element_size = kPointerSize;
1134 case kMachineWord16:
1137 case kMachineWord32:
1140 case kMachineWord64:
1141 case kMachineFloat64:
1149 if (element_size != 1) {
1150 Int32BinopMatcher mul(index.left().node());
1151 CHECK_EQ(IrOpcode::kInt32Mul, mul.node()->opcode());
1152 CHECK(mul.right().Is(element_size));
1153 return mul.left().node();
1155 return index.left().node();
1160 static const MachineType machine_reps[] = {kMachineWord8, kMachineWord16,
1161 kMachineWord32, kMachineWord64,
1162 kMachineFloat64, kMachineTagged};
1165 // Representation types corresponding to those above.
1166 static const RepType rep_types[] = {static_cast<RepType>(rWord32 | tUint32),
1167 static_cast<RepType>(rWord32 | tUint32),
1168 static_cast<RepType>(rWord32 | tInt32),
1169 static_cast<RepType>(rWord64),
1170 static_cast<RepType>(rFloat64 | tNumber),
1171 static_cast<RepType>(rTagged | tAny)};
1174 TEST(LowerLoadField_to_load) {
1175 TestingGraph t(Type::Any(), Type::Signed32());
1177 for (size_t i = 0; i < ARRAY_SIZE(machine_reps); i++) {
1178 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1179 Handle<Name>::null(), Type::Any(), machine_reps[i]};
1182 t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
1183 Node* use = t.Use(load, rep_types[i]);
1186 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1187 CHECK_EQ(t.p0, load->InputAt(0));
1188 CheckFieldAccessArithmetic(access, load);
1190 MachineType rep = OpParameter<MachineType>(load);
1191 CHECK_EQ(machine_reps[i], rep);
1196 TEST(LowerStoreField_to_store) {
1197 TestingGraph t(Type::Any(), Type::Signed32());
1199 for (size_t i = 0; i < ARRAY_SIZE(machine_reps); i++) {
1200 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1201 Handle<Name>::null(), Type::Any(), machine_reps[i]};
1204 Node* val = t.ExampleWithOutput(rep_types[i]);
1205 Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
1206 val, t.start, t.start);
1209 CHECK_EQ(IrOpcode::kStore, store->opcode());
1210 CHECK_EQ(val, store->InputAt(2));
1211 CheckFieldAccessArithmetic(access, store);
1213 StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
1214 if (rep_types[i] & rTagged) {
1215 CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind);
1217 CHECK_EQ(machine_reps[i], rep.rep);
1222 TEST(LowerLoadElement_to_load) {
1223 TestingGraph t(Type::Any(), Type::Signed32());
1225 for (size_t i = 0; i < ARRAY_SIZE(machine_reps); i++) {
1226 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1227 Type::Any(), machine_reps[i]};
1229 Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
1231 Node* use = t.Use(load, rep_types[i]);
1234 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1235 CHECK_EQ(t.p0, load->InputAt(0));
1236 CheckElementAccessArithmetic(access, load);
1238 MachineType rep = OpParameter<MachineType>(load);
1239 CHECK_EQ(machine_reps[i], rep);
1244 TEST(LowerStoreElement_to_store) {
1245 TestingGraph t(Type::Any(), Type::Signed32());
1247 for (size_t i = 0; i < ARRAY_SIZE(machine_reps); i++) {
1248 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1249 Type::Any(), machine_reps[i]};
1251 Node* val = t.ExampleWithOutput(rep_types[i]);
1252 Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
1253 t.p1, val, t.start, t.start);
1256 CHECK_EQ(IrOpcode::kStore, store->opcode());
1257 CHECK_EQ(val, store->InputAt(2));
1258 CheckElementAccessArithmetic(access, store);
1260 StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
1261 if (rep_types[i] & rTagged) {
1262 CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind);
1264 CHECK_EQ(machine_reps[i], rep.rep);
1269 TEST(InsertChangeForLoadElementIndex) {
1270 // LoadElement(obj: Tagged, index: tInt32 | rTagged) =>
1271 // Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
1272 TestingGraph t(Type::Any(), Type::Signed32());
1273 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
1276 Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
1280 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1281 CHECK_EQ(t.p0, load->InputAt(0));
1283 Node* index = CheckElementAccessArithmetic(access, load);
1284 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, index);
1288 TEST(InsertChangeForStoreElementIndex) {
1289 // StoreElement(obj: Tagged, index: tInt32 | rTagged, val) =>
1290 // Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
1291 TestingGraph t(Type::Any(), Type::Signed32());
1292 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
1296 t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1,
1297 t.jsgraph.TrueConstant(), t.start, t.start);
1300 CHECK_EQ(IrOpcode::kStore, store->opcode());
1301 CHECK_EQ(t.p0, store->InputAt(0));
1303 Node* index = CheckElementAccessArithmetic(access, store);
1304 CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, index);
1308 TEST(InsertChangeForLoadElement) {
1309 // TODO(titzer): test all load/store representation change insertions.
1310 TestingGraph t(Type::Any(), Type::Signed32());
1311 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
1314 Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
1318 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1319 CHECK_EQ(t.p0, load->InputAt(0));
1320 CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
1324 TEST(InsertChangeForLoadField) {
1325 // TODO(titzer): test all load/store representation change insertions.
1326 TestingGraph t(Type::Any(), Type::Signed32());
1327 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1328 Handle<Name>::null(), Type::Any(), kMachineFloat64};
1331 t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
1334 CHECK_EQ(IrOpcode::kLoad, load->opcode());
1335 CHECK_EQ(t.p0, load->InputAt(0));
1336 CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
1340 TEST(InsertChangeForStoreElement) {
1341 // TODO(titzer): test all load/store representation change insertions.
1342 TestingGraph t(Type::Any(), Type::Signed32());
1343 ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
1347 t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
1348 t.jsgraph.Int32Constant(0), t.p1, t.start, t.start);
1352 CHECK_EQ(IrOpcode::kStore, store->opcode());
1353 CHECK_EQ(t.p0, store->InputAt(0));
1354 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
1358 TEST(InsertChangeForStoreField) {
1359 // TODO(titzer): test all load/store representation change insertions.
1360 TestingGraph t(Type::Any(), Type::Signed32());
1361 FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
1362 Handle<Name>::null(), Type::Any(), kMachineFloat64};
1364 Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
1365 t.p1, t.start, t.start);
1369 CHECK_EQ(IrOpcode::kStore, store->opcode());
1370 CHECK_EQ(t.p0, store->InputAt(0));
1371 CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));