1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/simplified-lowering.h"
9 #include "src/base/bits.h"
10 #include "src/code-factory.h"
11 #include "src/compiler/common-operator.h"
12 #include "src/compiler/diamond.h"
13 #include "src/compiler/linkage.h"
14 #include "src/compiler/node-matchers.h"
15 #include "src/compiler/node-properties.h"
16 #include "src/compiler/operator-properties.h"
17 #include "src/compiler/representation-change.h"
18 #include "src/compiler/simplified-lowering.h"
19 #include "src/compiler/simplified-operator.h"
20 #include "src/compiler/source-position.h"
21 #include "src/objects.h"
27 // Macro for outputting trace information from representation inference.
29 if (FLAG_trace_representation) PrintF x
31 // Representation selection and lowering of {Simplified} operators to machine
32 // operators are interwined. We use a fixpoint calculation to compute both the
33 // output representation and the best possible lowering for {Simplified} nodes.
34 // Representation change insertion ensures that all values are in the correct
35 // machine representation after this phase, as dictated by the machine
36 // operators themselves.
38 // 1.) PROPAGATE: Traverse the graph from the end, pushing usage information
39 // backwards from uses to definitions, around cycles in phis, according
40 // to local rules for each operator.
41 // During this phase, the usage information for a node determines the best
42 // possible lowering for each operator so far, and that in turn determines
43 // the output representation.
44 // Therefore, to be correct, this phase must iterate to a fixpoint before
45 // the next phase can begin.
48 // 2.) LOWER: perform lowering for all {Simplified} nodes by replacing some
49 // operators for some nodes, expanding some nodes to multiple nodes, or
50 // removing some (redundant) nodes.
51 // During this phase, use the {RepresentationChanger} to insert
52 // representation changes between uses that demand a particular
53 // representation and nodes that produce a different representation.
58 class RepresentationSelector {
60 // Information for each node tracked during the fixpoint.
62 MachineTypeUnion use : 15; // Union of all usages for the node.
63 bool queued : 1; // Bookkeeping for the traversal.
64 bool visited : 1; // Bookkeeping for the traversal.
65 MachineTypeUnion output : 15; // Output type of the node.
68 RepresentationSelector(JSGraph* jsgraph, Zone* zone,
69 RepresentationChanger* changer,
70 SourcePositionTable* source_positions)
72 count_(jsgraph->graph()->NodeCount()),
73 info_(zone->NewArray<NodeInfo>(count_)),
79 source_positions_(source_positions) {
80 memset(info_, 0, sizeof(NodeInfo) * count_);
82 safe_int_additive_range_ =
83 Type::Range(-std::pow(2.0, 52.0), std::pow(2.0, 52.0), zone);
86 void Run(SimplifiedLowering* lowering) {
87 // Run propagation phase to a fixpoint.
88 TRACE(("--{Propagation phase}--\n"));
90 Enqueue(jsgraph_->graph()->end());
91 // Process nodes from the queue until it is empty.
92 while (!queue_.empty()) {
93 Node* node = queue_.front();
94 NodeInfo* info = GetInfo(node);
97 TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
98 VisitNode(node, info->use, NULL);
99 TRACE((" ==> output "));
100 PrintInfo(info->output);
104 // Run lowering and change insertion phase.
105 TRACE(("--{Simplified lowering phase}--\n"));
107 // Process nodes from the collected {nodes_} vector.
108 for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
110 TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
111 // Reuse {VisitNode()} so the representation rules are in one place.
112 if (FLAG_turbo_source_positions) {
113 SourcePositionTable::Scope scope(
114 source_positions_, source_positions_->GetSourcePosition(node));
115 VisitNode(node, GetUseInfo(node), lowering);
117 VisitNode(node, GetUseInfo(node), lowering);
121 // Perform the final replacements.
122 for (NodeVector::iterator i = replacements_.begin();
123 i != replacements_.end(); ++i) {
125 Node* replacement = *(++i);
126 node->ReplaceUses(replacement);
130 // Enqueue {node} if the {use} contains new information for that node.
131 // Add {node} to {nodes_} if this is the first time it's been visited.
132 void Enqueue(Node* node, MachineTypeUnion use = 0) {
133 if (phase_ != PROPAGATE) return;
134 NodeInfo* info = GetInfo(node);
135 if (!info->visited) {
136 // First visit of this node.
137 info->visited = true;
139 nodes_.push_back(node);
141 TRACE((" initial: "));
146 TRACE((" queue?: "));
148 if ((info->use & use) != use) {
149 // New usage information for the node is available.
155 TRACE((" inqueue: "));
162 bool lower() { return phase_ == LOWER; }
164 void Enqueue(Node* node, MachineType use) {
165 Enqueue(node, static_cast<MachineTypeUnion>(use));
168 void SetOutput(Node* node, MachineTypeUnion output) {
169 // Every node should have at most one output representation. Note that
170 // phis can have 0, if they have not been used in a representation-inducing
172 DCHECK((output & kRepMask) == 0 ||
173 base::bits::IsPowerOfTwo32(output & kRepMask));
174 GetInfo(node)->output = output;
177 bool BothInputsAre(Node* node, Type* type) {
178 DCHECK_EQ(2, node->InputCount());
179 return NodeProperties::GetBounds(node->InputAt(0)).upper->Is(type) &&
180 NodeProperties::GetBounds(node->InputAt(1)).upper->Is(type);
183 void ProcessTruncateWord32Input(Node* node, int index, MachineTypeUnion use) {
184 Node* input = node->InputAt(index);
185 if (phase_ == PROPAGATE) {
186 // In the propagate phase, propagate the usage information backward.
189 // In the change phase, insert a change before the use if necessary.
190 MachineTypeUnion output = GetInfo(input)->output;
191 if ((output & (kRepBit | kRepWord8 | kRepWord16 | kRepWord32)) == 0) {
192 // Output representation doesn't match usage.
193 TRACE((" truncate-to-int32: #%d:%s(@%d #%d:%s) ", node->id(),
194 node->op()->mnemonic(), index, input->id(),
195 input->op()->mnemonic()));
201 Node* n = changer_->GetTruncatedWord32For(input, output);
202 node->ReplaceInput(index, n);
207 void ProcessInput(Node* node, int index, MachineTypeUnion use) {
208 Node* input = node->InputAt(index);
209 if (phase_ == PROPAGATE) {
210 // In the propagate phase, propagate the usage information backward.
213 // In the change phase, insert a change before the use if necessary.
214 if ((use & kRepMask) == 0) return; // No input requirement on the use.
215 MachineTypeUnion output = GetInfo(input)->output;
216 if ((output & kRepMask & use) == 0) {
217 // Output representation doesn't match usage.
218 TRACE((" change: #%d:%s(@%d #%d:%s) ", node->id(),
219 node->op()->mnemonic(), index, input->id(),
220 input->op()->mnemonic()));
226 Node* n = changer_->GetRepresentationFor(input, output, use);
227 node->ReplaceInput(index, n);
232 void ProcessRemainingInputs(Node* node, int index) {
233 DCHECK_GE(index, NodeProperties::PastValueIndex(node));
234 DCHECK_GE(index, NodeProperties::PastContextIndex(node));
235 for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
236 i < NodeProperties::PastEffectIndex(node); ++i) {
237 Enqueue(node->InputAt(i)); // Effect inputs: just visit
239 for (int i = std::max(index, NodeProperties::FirstControlIndex(node));
240 i < NodeProperties::PastControlIndex(node); ++i) {
241 Enqueue(node->InputAt(i)); // Control inputs: just visit
245 // The default, most general visitation case. For {node}, process all value,
246 // context, effect, and control inputs, assuming that value inputs should have
247 // {kRepTagged} representation and can observe all output values {kTypeAny}.
248 void VisitInputs(Node* node) {
249 auto i = node->input_edges().begin();
250 for (int j = node->op()->ValueInputCount(); j > 0; ++i, j--) {
251 ProcessInput(node, (*i).index(), kMachAnyTagged); // Value inputs
253 for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
255 ProcessInput(node, (*i).index(), kMachAnyTagged); // Context inputs
257 for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
259 Enqueue((*i).to()); // FrameState inputs: just visit
261 for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
262 Enqueue((*i).to()); // Effect inputs: just visit
264 for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
265 Enqueue((*i).to()); // Control inputs: just visit
267 DCHECK(i == node->input_edges().end());
268 SetOutput(node, kMachAnyTagged);
271 // Helper for binops of the I x I -> O variety.
272 void VisitBinop(Node* node, MachineTypeUnion input_use,
273 MachineTypeUnion output) {
274 DCHECK_EQ(2, node->InputCount());
275 ProcessInput(node, 0, input_use);
276 ProcessInput(node, 1, input_use);
277 SetOutput(node, output);
280 // Helper for unops of the I -> O variety.
281 void VisitUnop(Node* node, MachineTypeUnion input_use,
282 MachineTypeUnion output) {
283 DCHECK_EQ(1, node->InputCount());
284 ProcessInput(node, 0, input_use);
285 SetOutput(node, output);
288 // Helper for leaf nodes.
289 void VisitLeaf(Node* node, MachineTypeUnion output) {
290 DCHECK_EQ(0, node->InputCount());
291 SetOutput(node, output);
294 // Helpers for specific types of binops.
295 void VisitFloat64Binop(Node* node) {
296 VisitBinop(node, kMachFloat64, kMachFloat64);
298 void VisitInt32Binop(Node* node) { VisitBinop(node, kMachInt32, kMachInt32); }
299 void VisitUint32Binop(Node* node) {
300 VisitBinop(node, kMachUint32, kMachUint32);
302 void VisitInt64Binop(Node* node) { VisitBinop(node, kMachInt64, kMachInt64); }
303 void VisitUint64Binop(Node* node) {
304 VisitBinop(node, kMachUint64, kMachUint64);
306 void VisitFloat64Cmp(Node* node) { VisitBinop(node, kMachFloat64, kRepBit); }
307 void VisitInt32Cmp(Node* node) { VisitBinop(node, kMachInt32, kRepBit); }
308 void VisitUint32Cmp(Node* node) { VisitBinop(node, kMachUint32, kRepBit); }
309 void VisitInt64Cmp(Node* node) { VisitBinop(node, kMachInt64, kRepBit); }
310 void VisitUint64Cmp(Node* node) { VisitBinop(node, kMachUint64, kRepBit); }
312 // Infer representation for phi-like nodes.
313 MachineType GetRepresentationForPhi(Node* node, MachineTypeUnion use) {
314 // Phis adapt to the output representation their uses demand.
315 Type* upper = NodeProperties::GetBounds(node).upper;
316 if ((use & kRepMask) == kRepTagged) {
319 } else if (upper->Is(Type::Integral32())) {
320 // Integer within [-2^31, 2^32[ range.
321 if ((use & kRepMask) == kRepFloat64) {
322 // only float64 uses.
324 } else if (upper->Is(Type::Signed32()) || upper->Is(Type::Unsigned32())) {
325 // multiple uses, but we are within 32 bits range => pick kRepWord32.
327 } else if ((use & kRepMask) == kRepWord32 ||
328 (use & kTypeMask) == kTypeInt32 ||
329 (use & kTypeMask) == kTypeUint32) {
330 // We only use 32 bits or we use the result consistently.
335 } else if (upper->Is(Type::Boolean())) {
336 // multiple uses => pick kRepBit.
338 } else if (upper->Is(Type::Number())) {
339 // multiple uses => pick kRepFloat64.
345 // Helper for handling selects.
346 void VisitSelect(Node* node, MachineTypeUnion use,
347 SimplifiedLowering* lowering) {
348 ProcessInput(node, 0, kRepBit);
349 MachineType output = GetRepresentationForPhi(node, use);
351 Type* upper = NodeProperties::GetBounds(node).upper;
352 MachineType output_type =
353 static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
354 SetOutput(node, output_type);
357 // Update the select operator.
358 SelectParameters p = SelectParametersOf(node->op());
359 MachineType type = static_cast<MachineType>(output_type);
360 if (type != p.type()) {
361 node->set_op(lowering->common()->Select(type, p.hint()));
364 // Convert inputs to the output representation of this select.
365 ProcessInput(node, 1, output_type);
366 ProcessInput(node, 2, output_type);
368 // Propagate {use} of the select to value inputs.
369 MachineType use_type =
370 static_cast<MachineType>((use & kTypeMask) | output);
371 ProcessInput(node, 1, use_type);
372 ProcessInput(node, 2, use_type);
376 // Helper for handling phis.
377 void VisitPhi(Node* node, MachineTypeUnion use,
378 SimplifiedLowering* lowering) {
379 MachineType output = GetRepresentationForPhi(node, use);
381 Type* upper = NodeProperties::GetBounds(node).upper;
382 MachineType output_type =
383 static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
384 SetOutput(node, output_type);
386 int values = node->op()->ValueInputCount();
389 // Update the phi operator.
390 MachineType type = static_cast<MachineType>(output_type);
391 if (type != OpParameter<MachineType>(node)) {
392 node->set_op(lowering->common()->Phi(type, values));
395 // Convert inputs to the output representation of this phi.
396 for (Edge const edge : node->input_edges()) {
397 // TODO(titzer): it'd be nice to have distinguished edge kinds here.
398 ProcessInput(node, edge.index(), values > 0 ? output_type : 0);
402 // Propagate {use} of the phi to value inputs, and 0 to control.
403 MachineType use_type =
404 static_cast<MachineType>((use & kTypeMask) | output);
405 for (Edge const edge : node->input_edges()) {
406 // TODO(titzer): it'd be nice to have distinguished edge kinds here.
407 ProcessInput(node, edge.index(), values > 0 ? use_type : 0);
413 const Operator* Int32Op(Node* node) {
414 return changer_->Int32OperatorFor(node->opcode());
417 const Operator* Uint32Op(Node* node) {
418 return changer_->Uint32OperatorFor(node->opcode());
421 const Operator* Float64Op(Node* node) {
422 return changer_->Float64OperatorFor(node->opcode());
425 bool CanLowerToInt32Binop(Node* node, MachineTypeUnion use) {
426 return BothInputsAre(node, Type::Signed32()) && !CanObserveNonInt32(use);
429 bool IsSafeIntAdditiveOperand(Node* node) {
430 Type* type = NodeProperties::GetBounds(node).upper;
431 // TODO(jarin): Unfortunately, bitset types are not subtypes of larger
432 // range types, so we have to explicitly check for Integral32 here
433 // (in addition to the safe integer range). Once we fix subtyping for
434 // ranges, we should simplify this.
435 return type->Is(safe_int_additive_range_) || type->Is(Type::Integral32());
438 bool CanLowerToInt32AdditiveBinop(Node* node, MachineTypeUnion use) {
439 return IsSafeIntAdditiveOperand(node->InputAt(0)) &&
440 IsSafeIntAdditiveOperand(node->InputAt(1)) &&
441 !CanObserveNonInt32(use);
444 bool CanLowerToUint32Binop(Node* node, MachineTypeUnion use) {
445 return BothInputsAre(node, Type::Unsigned32()) && !CanObserveNonUint32(use);
448 bool CanLowerToUint32AdditiveBinop(Node* node, MachineTypeUnion use) {
449 return IsSafeIntAdditiveOperand(node->InputAt(0)) &&
450 IsSafeIntAdditiveOperand(node->InputAt(1)) &&
451 !CanObserveNonUint32(use);
454 bool CanObserveNonInt32(MachineTypeUnion use) {
455 return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
458 bool CanObserveMinusZero(MachineTypeUnion use) {
459 // TODO(turbofan): technically Uint32 cannot observe minus zero either.
460 return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
463 bool CanObserveNaN(MachineTypeUnion use) {
464 return (use & (kTypeNumber | kTypeAny)) != 0;
467 bool CanObserveNonUint32(MachineTypeUnion use) {
468 return (use & (kTypeInt32 | kTypeNumber | kTypeAny)) != 0;
471 // Dispatching routine for visiting the node {node} with the usage {use}.
472 // Depending on the operator, propagate new usage info to the inputs.
473 void VisitNode(Node* node, MachineTypeUnion use,
474 SimplifiedLowering* lowering) {
475 switch (node->opcode()) {
476 //------------------------------------------------------------------
478 //------------------------------------------------------------------
479 case IrOpcode::kStart:
480 case IrOpcode::kDead:
481 return VisitLeaf(node, 0);
482 case IrOpcode::kParameter: {
483 // TODO(titzer): use representation from linkage.
484 Type* upper = NodeProperties::GetBounds(node).upper;
485 ProcessInput(node, 0, 0);
486 SetOutput(node, kRepTagged | changer_->TypeFromUpperBound(upper));
489 case IrOpcode::kAlways:
490 return VisitLeaf(node, kRepBit);
491 case IrOpcode::kInt32Constant:
492 return VisitLeaf(node, kRepWord32);
493 case IrOpcode::kInt64Constant:
494 return VisitLeaf(node, kRepWord64);
495 case IrOpcode::kFloat64Constant:
496 return VisitLeaf(node, kRepFloat64);
497 case IrOpcode::kExternalConstant:
498 return VisitLeaf(node, kMachPtr);
499 case IrOpcode::kNumberConstant:
500 return VisitLeaf(node, kRepTagged);
501 case IrOpcode::kHeapConstant:
502 return VisitLeaf(node, kRepTagged);
504 case IrOpcode::kBranch:
505 ProcessInput(node, 0, kRepBit);
506 Enqueue(NodeProperties::GetControlInput(node, 0));
508 case IrOpcode::kSwitch:
509 ProcessInput(node, 0, kRepWord32);
510 Enqueue(NodeProperties::GetControlInput(node, 0));
512 case IrOpcode::kSelect:
513 return VisitSelect(node, use, lowering);
515 return VisitPhi(node, use, lowering);
517 //------------------------------------------------------------------
518 // JavaScript operators.
519 //------------------------------------------------------------------
520 // For now, we assume that all JS operators were too complex to lower
521 // to Simplified and that they will always require tagged value inputs
522 // and produce tagged value outputs.
523 // TODO(turbofan): it might be possible to lower some JSOperators here,
524 // but that responsibility really lies in the typed lowering phase.
525 #define DEFINE_JS_CASE(x) case IrOpcode::k##x:
526 JS_OP_LIST(DEFINE_JS_CASE)
527 #undef DEFINE_JS_CASE
529 return SetOutput(node, kRepTagged);
531 //------------------------------------------------------------------
532 // Simplified operators.
533 //------------------------------------------------------------------
534 case IrOpcode::kAnyToBoolean: {
535 VisitUnop(node, kMachAnyTagged, kTypeBool | kRepTagged);
537 // AnyToBoolean(x) => Call(ToBooleanStub, x, no-context)
538 Operator::Properties properties = node->op()->properties();
539 Callable callable = CodeFactory::ToBoolean(
540 jsgraph_->isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
541 CallDescriptor::Flags flags = CallDescriptor::kPatchableCallSite;
542 CallDescriptor* desc = Linkage::GetStubCallDescriptor(
543 jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
545 node->set_op(jsgraph_->common()->Call(desc));
546 node->InsertInput(jsgraph_->zone(), 0,
547 jsgraph_->HeapConstant(callable.code()));
548 node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
552 case IrOpcode::kBooleanNot: {
554 MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
555 if (input & kRepBit) {
556 // BooleanNot(x: kRepBit) => Word32Equal(x, #0)
557 node->set_op(lowering->machine()->Word32Equal());
558 node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
560 // BooleanNot(x: kRepTagged) => WordEqual(x, #false)
561 node->set_op(lowering->machine()->WordEqual());
562 node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
565 // No input representation requirement; adapt during lowering.
566 ProcessInput(node, 0, kTypeBool);
567 SetOutput(node, kRepBit);
571 case IrOpcode::kBooleanToNumber: {
573 MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
574 if (input & kRepBit) {
575 // BooleanToNumber(x: kRepBit) => x
576 DeferReplacement(node, node->InputAt(0));
578 // BooleanToNumber(x: kRepTagged) => WordEqual(x, #true)
579 node->set_op(lowering->machine()->WordEqual());
580 node->AppendInput(jsgraph_->zone(), jsgraph_->TrueConstant());
583 // No input representation requirement; adapt during lowering.
584 ProcessInput(node, 0, kTypeBool);
585 SetOutput(node, kMachInt32);
589 case IrOpcode::kNumberEqual:
590 case IrOpcode::kNumberLessThan:
591 case IrOpcode::kNumberLessThanOrEqual: {
592 // Number comparisons reduce to integer comparisons for integer inputs.
593 if (BothInputsAre(node, Type::Signed32())) {
594 // => signed Int32Cmp
596 if (lower()) node->set_op(Int32Op(node));
597 } else if (BothInputsAre(node, Type::Unsigned32())) {
598 // => unsigned Int32Cmp
599 VisitUint32Cmp(node);
600 if (lower()) node->set_op(Uint32Op(node));
603 VisitFloat64Cmp(node);
604 if (lower()) node->set_op(Float64Op(node));
608 case IrOpcode::kNumberAdd:
609 case IrOpcode::kNumberSubtract: {
610 // Add and subtract reduce to Int32Add/Sub if the inputs
611 // are already integers and all uses are truncating.
612 if (CanLowerToInt32Binop(node, use)) {
613 // => signed Int32Add/Sub
614 VisitInt32Binop(node);
615 if (lower()) node->set_op(Int32Op(node));
616 } else if (CanLowerToInt32AdditiveBinop(node, use)) {
617 // => signed Int32Add/Sub, truncating inputs
618 ProcessTruncateWord32Input(node, 0, kTypeInt32);
619 ProcessTruncateWord32Input(node, 1, kTypeInt32);
620 SetOutput(node, kMachInt32);
621 if (lower()) node->set_op(Int32Op(node));
622 } else if (CanLowerToUint32Binop(node, use)) {
623 // => unsigned Int32Add/Sub
624 VisitUint32Binop(node);
625 if (lower()) node->set_op(Uint32Op(node));
626 } else if (CanLowerToUint32AdditiveBinop(node, use)) {
627 // => signed Int32Add/Sub, truncating inputs
628 ProcessTruncateWord32Input(node, 0, kTypeUint32);
629 ProcessTruncateWord32Input(node, 1, kTypeUint32);
630 SetOutput(node, kMachUint32);
631 if (lower()) node->set_op(Uint32Op(node));
634 VisitFloat64Binop(node);
635 if (lower()) node->set_op(Float64Op(node));
639 case IrOpcode::kNumberMultiply: {
640 NumberMatcher right(node->InputAt(1));
641 if (right.IsInRange(-1048576, 1048576)) { // must fit double mantissa.
642 if (CanLowerToInt32Binop(node, use)) {
643 // => signed Int32Mul
644 VisitInt32Binop(node);
645 if (lower()) node->set_op(Int32Op(node));
650 VisitFloat64Binop(node);
651 if (lower()) node->set_op(Float64Op(node));
654 case IrOpcode::kNumberDivide: {
655 if (CanLowerToInt32Binop(node, use)) {
656 // => signed Int32Div
657 VisitInt32Binop(node);
658 if (lower()) DeferReplacement(node, lowering->Int32Div(node));
661 if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
662 // => unsigned Uint32Div
663 VisitUint32Binop(node);
664 if (lower()) DeferReplacement(node, lowering->Uint32Div(node));
668 VisitFloat64Binop(node);
669 if (lower()) node->set_op(Float64Op(node));
672 case IrOpcode::kNumberModulus: {
673 if (CanLowerToInt32Binop(node, use)) {
674 // => signed Int32Mod
675 VisitInt32Binop(node);
676 if (lower()) DeferReplacement(node, lowering->Int32Mod(node));
679 if (BothInputsAre(node, Type::Unsigned32()) && !CanObserveNaN(use)) {
680 // => unsigned Uint32Mod
681 VisitUint32Binop(node);
682 if (lower()) DeferReplacement(node, lowering->Uint32Mod(node));
686 VisitFloat64Binop(node);
687 if (lower()) node->set_op(Float64Op(node));
690 case IrOpcode::kNumberToInt32: {
691 MachineTypeUnion use_rep = use & kRepMask;
692 Node* input = node->InputAt(0);
693 Type* in_upper = NodeProperties::GetBounds(input).upper;
694 MachineTypeUnion in = GetInfo(input)->output;
695 if (in_upper->Is(Type::Signed32())) {
696 // If the input has type int32, pass through representation.
697 VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
698 if (lower()) DeferReplacement(node, node->InputAt(0));
699 } else if ((in & kTypeMask) == kTypeUint32 ||
700 in_upper->Is(Type::Unsigned32())) {
701 // Just change representation if necessary.
702 VisitUnop(node, kTypeUint32 | kRepWord32, kTypeInt32 | kRepWord32);
703 if (lower()) DeferReplacement(node, node->InputAt(0));
704 } else if ((in & kTypeMask) == kTypeInt32 ||
705 (in & kRepMask) == kRepWord32) {
706 // Just change representation if necessary.
707 VisitUnop(node, kTypeInt32 | kRepWord32, kTypeInt32 | kRepWord32);
708 if (lower()) DeferReplacement(node, node->InputAt(0));
710 // Require the input in float64 format and perform truncation.
711 // TODO(turbofan): avoid a truncation with a smi check.
712 VisitUnop(node, kTypeInt32 | kRepFloat64, kTypeInt32 | kRepWord32);
714 node->set_op(lowering->machine()->TruncateFloat64ToInt32());
718 case IrOpcode::kNumberToUint32: {
719 MachineTypeUnion use_rep = use & kRepMask;
720 Node* input = node->InputAt(0);
721 Type* in_upper = NodeProperties::GetBounds(input).upper;
722 MachineTypeUnion in = GetInfo(input)->output;
723 if (in_upper->Is(Type::Unsigned32())) {
724 // If the input has type uint32, pass through representation.
725 VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
726 if (lower()) DeferReplacement(node, node->InputAt(0));
727 } else if ((in & kTypeMask) == kTypeInt32 ||
728 in_upper->Is(Type::Signed32())) {
729 // Just change representation if necessary.
730 VisitUnop(node, kTypeInt32 | kRepWord32, kTypeUint32 | kRepWord32);
731 if (lower()) DeferReplacement(node, node->InputAt(0));
732 } else if ((in & kTypeMask) == kTypeUint32 ||
733 (in & kRepMask) == kRepWord32) {
734 // Just change representation if necessary.
735 VisitUnop(node, kTypeUint32 | kRepWord32, kTypeUint32 | kRepWord32);
736 if (lower()) DeferReplacement(node, node->InputAt(0));
738 // Require the input in float64 format and perform truncation.
739 // TODO(turbofan): avoid a truncation with a smi check.
740 VisitUnop(node, kTypeUint32 | kRepFloat64, kTypeUint32 | kRepWord32);
742 node->set_op(lowering->machine()->TruncateFloat64ToInt32());
746 case IrOpcode::kPlainPrimitiveToNumber: {
747 VisitUnop(node, kMachAnyTagged, kTypeNumber | kRepTagged);
749 // PlainPrimitiveToNumber(x) => Call(ToNumberStub, x, no-context)
750 Operator::Properties properties = node->op()->properties();
751 Callable callable = CodeFactory::ToNumber(jsgraph_->isolate());
752 CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
753 CallDescriptor* desc = Linkage::GetStubCallDescriptor(
754 jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
756 node->set_op(jsgraph_->common()->Call(desc));
757 node->InsertInput(jsgraph_->zone(), 0,
758 jsgraph_->HeapConstant(callable.code()));
759 node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
763 case IrOpcode::kReferenceEqual: {
764 VisitBinop(node, kMachAnyTagged, kRepBit);
765 if (lower()) node->set_op(lowering->machine()->WordEqual());
768 case IrOpcode::kStringEqual: {
769 VisitBinop(node, kMachAnyTagged, kRepBit);
770 if (lower()) lowering->DoStringEqual(node);
773 case IrOpcode::kStringLessThan: {
774 VisitBinop(node, kMachAnyTagged, kRepBit);
775 if (lower()) lowering->DoStringLessThan(node);
778 case IrOpcode::kStringLessThanOrEqual: {
779 VisitBinop(node, kMachAnyTagged, kRepBit);
780 if (lower()) lowering->DoStringLessThanOrEqual(node);
783 case IrOpcode::kStringAdd: {
784 VisitBinop(node, kMachAnyTagged, kMachAnyTagged);
785 if (lower()) lowering->DoStringAdd(node);
788 case IrOpcode::kLoadField: {
789 FieldAccess access = FieldAccessOf(node->op());
790 ProcessInput(node, 0, changer_->TypeForBasePointer(access));
791 ProcessRemainingInputs(node, 1);
792 SetOutput(node, access.machine_type);
793 if (lower()) lowering->DoLoadField(node);
796 case IrOpcode::kStoreField: {
797 FieldAccess access = FieldAccessOf(node->op());
798 ProcessInput(node, 0, changer_->TypeForBasePointer(access));
799 ProcessInput(node, 1, access.machine_type);
800 ProcessRemainingInputs(node, 2);
802 if (lower()) lowering->DoStoreField(node);
805 case IrOpcode::kLoadBuffer: {
806 BufferAccess access = BufferAccessOf(node->op());
807 ProcessInput(node, 0, kMachPtr); // buffer
808 ProcessInput(node, 1, kMachInt32); // offset
809 ProcessInput(node, 2, kMachInt32); // length
810 ProcessRemainingInputs(node, 3);
811 // Tagged overrides everything if we have to do a typed array bounds
812 // check, because we may need to return undefined then.
813 MachineType output_type;
814 if (use & kRepTagged) {
815 output_type = kMachAnyTagged;
816 } else if (use & kRepFloat64) {
817 if (access.machine_type() & kRepFloat32) {
818 output_type = access.machine_type();
820 output_type = kMachFloat64;
822 } else if (use & kRepFloat32) {
823 output_type = kMachFloat32;
825 output_type = access.machine_type();
827 SetOutput(node, output_type);
828 if (lower()) lowering->DoLoadBuffer(node, output_type, changer_);
831 case IrOpcode::kStoreBuffer: {
832 BufferAccess access = BufferAccessOf(node->op());
833 ProcessInput(node, 0, kMachPtr); // buffer
834 ProcessInput(node, 1, kMachInt32); // offset
835 ProcessInput(node, 2, kMachInt32); // length
836 ProcessInput(node, 3, access.machine_type()); // value
837 ProcessRemainingInputs(node, 4);
839 if (lower()) lowering->DoStoreBuffer(node);
842 case IrOpcode::kLoadElement: {
843 ElementAccess access = ElementAccessOf(node->op());
844 ProcessInput(node, 0, changer_->TypeForBasePointer(access)); // base
845 ProcessInput(node, 1, kMachInt32); // index
846 ProcessRemainingInputs(node, 2);
847 SetOutput(node, access.machine_type);
848 if (lower()) lowering->DoLoadElement(node);
851 case IrOpcode::kStoreElement: {
852 ElementAccess access = ElementAccessOf(node->op());
853 ProcessInput(node, 0, changer_->TypeForBasePointer(access)); // base
854 ProcessInput(node, 1, kMachInt32); // index
855 ProcessInput(node, 2, access.machine_type); // value
856 ProcessRemainingInputs(node, 3);
858 if (lower()) lowering->DoStoreElement(node);
861 case IrOpcode::kObjectIsSmi: {
862 ProcessInput(node, 0, kMachAnyTagged);
863 SetOutput(node, kRepBit | kTypeBool);
865 Node* is_tagged = jsgraph_->graph()->NewNode(
866 jsgraph_->machine()->WordAnd(), node->InputAt(0),
867 jsgraph_->IntPtrConstant(kSmiTagMask));
868 Node* is_smi = jsgraph_->graph()->NewNode(
869 jsgraph_->machine()->WordEqual(), is_tagged,
870 jsgraph_->IntPtrConstant(kSmiTag));
871 DeferReplacement(node, is_smi);
875 case IrOpcode::kObjectIsNonNegativeSmi: {
876 ProcessInput(node, 0, kMachAnyTagged);
877 SetOutput(node, kRepBit | kTypeBool);
879 Node* is_tagged = jsgraph_->graph()->NewNode(
880 jsgraph_->machine()->WordAnd(), node->InputAt(0),
881 jsgraph_->IntPtrConstant(kSmiTagMask));
882 Node* is_smi = jsgraph_->graph()->NewNode(
883 jsgraph_->machine()->WordEqual(), is_tagged,
884 jsgraph_->IntPtrConstant(kSmiTag));
885 Node* is_non_neg = jsgraph_->graph()->NewNode(
886 jsgraph_->machine()->IntLessThanOrEqual(),
887 jsgraph_->IntPtrConstant(0), node->InputAt(0));
888 Node* is_non_neg_smi = jsgraph_->graph()->NewNode(
889 jsgraph_->machine()->Word32And(), is_smi, is_non_neg);
890 DeferReplacement(node, is_non_neg_smi);
895 //------------------------------------------------------------------
896 // Machine-level operators.
897 //------------------------------------------------------------------
898 case IrOpcode::kLoad: {
899 // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
900 MachineTypeUnion tBase = kRepTagged | kMachPtr;
901 LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
902 ProcessInput(node, 0, tBase); // pointer or object
903 ProcessInput(node, 1, kMachInt32); // index
904 ProcessRemainingInputs(node, 2);
905 SetOutput(node, rep);
908 case IrOpcode::kStore: {
909 // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
910 MachineTypeUnion tBase = kRepTagged | kMachPtr;
911 StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
912 ProcessInput(node, 0, tBase); // pointer or object
913 ProcessInput(node, 1, kMachInt32); // index
914 ProcessInput(node, 2, rep.machine_type());
915 ProcessRemainingInputs(node, 3);
919 case IrOpcode::kWord32Shr:
920 // We output unsigned int32 for shift right because JavaScript.
921 return VisitBinop(node, kMachUint32, kMachUint32);
922 case IrOpcode::kWord32And:
923 case IrOpcode::kWord32Or:
924 case IrOpcode::kWord32Xor:
925 case IrOpcode::kWord32Shl:
926 case IrOpcode::kWord32Sar:
927 // We use signed int32 as the output type for these word32 operations,
928 // though the machine bits are the same for either signed or unsigned,
929 // because JavaScript considers the result from these operations signed.
930 return VisitBinop(node, kRepWord32, kRepWord32 | kTypeInt32);
931 case IrOpcode::kWord32Equal:
932 return VisitBinop(node, kRepWord32, kRepBit);
934 case IrOpcode::kInt32Add:
935 case IrOpcode::kInt32Sub:
936 case IrOpcode::kInt32Mul:
937 case IrOpcode::kInt32MulHigh:
938 case IrOpcode::kInt32Div:
939 case IrOpcode::kInt32Mod:
940 return VisitInt32Binop(node);
941 case IrOpcode::kUint32Div:
942 case IrOpcode::kUint32Mod:
943 case IrOpcode::kUint32MulHigh:
944 return VisitUint32Binop(node);
945 case IrOpcode::kInt32LessThan:
946 case IrOpcode::kInt32LessThanOrEqual:
947 return VisitInt32Cmp(node);
949 case IrOpcode::kUint32LessThan:
950 case IrOpcode::kUint32LessThanOrEqual:
951 return VisitUint32Cmp(node);
953 case IrOpcode::kInt64Add:
954 case IrOpcode::kInt64Sub:
955 case IrOpcode::kInt64Mul:
956 case IrOpcode::kInt64Div:
957 case IrOpcode::kInt64Mod:
958 return VisitInt64Binop(node);
959 case IrOpcode::kInt64LessThan:
960 case IrOpcode::kInt64LessThanOrEqual:
961 return VisitInt64Cmp(node);
963 case IrOpcode::kUint64LessThan:
964 return VisitUint64Cmp(node);
966 case IrOpcode::kUint64Div:
967 case IrOpcode::kUint64Mod:
968 return VisitUint64Binop(node);
970 case IrOpcode::kWord64And:
971 case IrOpcode::kWord64Or:
972 case IrOpcode::kWord64Xor:
973 case IrOpcode::kWord64Shl:
974 case IrOpcode::kWord64Shr:
975 case IrOpcode::kWord64Sar:
976 return VisitBinop(node, kRepWord64, kRepWord64);
977 case IrOpcode::kWord64Equal:
978 return VisitBinop(node, kRepWord64, kRepBit);
980 case IrOpcode::kChangeInt32ToInt64:
981 return VisitUnop(node, kTypeInt32 | kRepWord32,
982 kTypeInt32 | kRepWord64);
983 case IrOpcode::kChangeUint32ToUint64:
984 return VisitUnop(node, kTypeUint32 | kRepWord32,
985 kTypeUint32 | kRepWord64);
986 case IrOpcode::kTruncateFloat64ToFloat32:
987 return VisitUnop(node, kTypeNumber | kRepFloat64,
988 kTypeNumber | kRepFloat32);
989 case IrOpcode::kTruncateInt64ToInt32:
990 // TODO(titzer): Is kTypeInt32 correct here?
991 return VisitUnop(node, kTypeInt32 | kRepWord64,
992 kTypeInt32 | kRepWord32);
994 case IrOpcode::kChangeFloat32ToFloat64:
995 return VisitUnop(node, kTypeNumber | kRepFloat32,
996 kTypeNumber | kRepFloat64);
997 case IrOpcode::kChangeInt32ToFloat64:
998 return VisitUnop(node, kTypeInt32 | kRepWord32,
999 kTypeInt32 | kRepFloat64);
1000 case IrOpcode::kChangeUint32ToFloat64:
1001 return VisitUnop(node, kTypeUint32 | kRepWord32,
1002 kTypeUint32 | kRepFloat64);
1003 case IrOpcode::kChangeFloat64ToInt32:
1004 return VisitUnop(node, kTypeInt32 | kRepFloat64,
1005 kTypeInt32 | kRepWord32);
1006 case IrOpcode::kChangeFloat64ToUint32:
1007 return VisitUnop(node, kTypeUint32 | kRepFloat64,
1008 kTypeUint32 | kRepWord32);
1010 case IrOpcode::kFloat64Add:
1011 case IrOpcode::kFloat64Sub:
1012 case IrOpcode::kFloat64Mul:
1013 case IrOpcode::kFloat64Div:
1014 case IrOpcode::kFloat64Mod:
1015 return VisitFloat64Binop(node);
1016 case IrOpcode::kFloat64Sqrt:
1017 case IrOpcode::kFloat64Floor:
1018 case IrOpcode::kFloat64Ceil:
1019 case IrOpcode::kFloat64RoundTruncate:
1020 case IrOpcode::kFloat64RoundTiesAway:
1021 return VisitUnop(node, kMachFloat64, kMachFloat64);
1022 case IrOpcode::kFloat64Equal:
1023 case IrOpcode::kFloat64LessThan:
1024 case IrOpcode::kFloat64LessThanOrEqual:
1025 return VisitFloat64Cmp(node);
1026 case IrOpcode::kLoadStackPointer:
1027 return VisitLeaf(node, kMachPtr);
1028 case IrOpcode::kStateValues:
1029 for (int i = 0; i < node->InputCount(); i++) {
1030 ProcessInput(node, i, kTypeAny);
1032 SetOutput(node, kMachAnyTagged);
1040 void DeferReplacement(Node* node, Node* replacement) {
1041 if (FLAG_trace_representation) {
1042 TRACE(("defer replacement #%d:%s with #%d:%s\n", node->id(),
1043 node->op()->mnemonic(), replacement->id(),
1044 replacement->op()->mnemonic()));
1046 if (replacement->id() < count_ &&
1047 GetInfo(replacement)->output == GetInfo(node)->output) {
1048 // Replace with a previously existing node eagerly only if the type is the
1050 node->ReplaceUses(replacement);
1052 // Otherwise, we are replacing a node with a representation change.
1053 // Such a substitution must be done after all lowering is done, because
1054 // changing the type could confuse the representation change
1055 // insertion for uses of the node.
1056 replacements_.push_back(node);
1057 replacements_.push_back(replacement);
1059 // TODO(titzer) node->RemoveAllInputs(); // Node is now dead.
1062 void PrintUseInfo(Node* node) {
1063 TRACE(("#%d:%-20s ", node->id(), node->op()->mnemonic()));
1064 PrintInfo(GetUseInfo(node));
1068 void PrintInfo(MachineTypeUnion info) {
1069 if (FLAG_trace_representation) {
1070 OFStream os(stdout);
1071 os << static_cast<MachineType>(info);
1077 int count_; // number of nodes in the graph
1078 NodeInfo* info_; // node id -> usage information
1079 NodeVector nodes_; // collected nodes
1080 NodeVector replacements_; // replacements to be done after lowering
1081 Phase phase_; // current phase of algorithm
1082 RepresentationChanger* changer_; // for inserting representation changes
1083 ZoneQueue<Node*> queue_; // queue for traversing the graph
1084 // TODO(danno): RepresentationSelector shouldn't know anything about the
1085 // source positions table, but must for now since there currently is no other
1086 // way to pass down source position information to nodes created during
1087 // lowering. Once this phase becomes a vanilla reducer, it should get source
1088 // position information via the SourcePositionWrapper like all other reducers.
1089 SourcePositionTable* source_positions_;
1090 Type* safe_int_additive_range_;
1092 NodeInfo* GetInfo(Node* node) {
1093 DCHECK(node->id() >= 0);
1094 DCHECK(node->id() < count_);
1095 return &info_[node->id()];
1098 MachineTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; }
1102 Node* SimplifiedLowering::IsTagged(Node* node) {
1103 // TODO(titzer): factor this out to a TaggingScheme abstraction.
1104 STATIC_ASSERT(kSmiTagMask == 1); // Only works if tag is the low bit.
1105 return graph()->NewNode(machine()->WordAnd(), node,
1106 jsgraph()->Int32Constant(kSmiTagMask));
1110 void SimplifiedLowering::LowerAllNodes() {
1111 SimplifiedOperatorBuilder simplified(graph()->zone());
1112 RepresentationChanger changer(jsgraph(), &simplified, jsgraph()->isolate());
1113 RepresentationSelector selector(jsgraph(), zone_, &changer,
1119 Node* SimplifiedLowering::Untag(Node* node) {
1120 // TODO(titzer): factor this out to a TaggingScheme abstraction.
1121 Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
1122 return graph()->NewNode(machine()->WordSar(), node, shift_amount);
1126 Node* SimplifiedLowering::SmiTag(Node* node) {
1127 // TODO(titzer): factor this out to a TaggingScheme abstraction.
1128 Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
1129 return graph()->NewNode(machine()->WordShl(), node, shift_amount);
1133 Node* SimplifiedLowering::OffsetMinusTagConstant(int32_t offset) {
1134 return jsgraph()->Int32Constant(offset - kHeapObjectTag);
1138 static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
1139 MachineType representation,
1141 // TODO(turbofan): skip write barriers for Smis, etc.
1142 if (base_is_tagged == kTaggedBase &&
1143 RepresentationOf(representation) == kRepTagged) {
1144 // Write barriers are only for writes into heap objects (i.e. tagged base).
1145 return kFullWriteBarrier;
1147 return kNoWriteBarrier;
1151 void SimplifiedLowering::DoLoadField(Node* node) {
1152 const FieldAccess& access = FieldAccessOf(node->op());
1153 node->set_op(machine()->Load(access.machine_type));
1154 Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
1155 node->InsertInput(graph()->zone(), 1, offset);
1159 void SimplifiedLowering::DoStoreField(Node* node) {
1160 const FieldAccess& access = FieldAccessOf(node->op());
1161 WriteBarrierKind kind = ComputeWriteBarrierKind(
1162 access.base_is_tagged, access.machine_type, access.type);
1164 machine()->Store(StoreRepresentation(access.machine_type, kind)));
1165 Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
1166 node->InsertInput(graph()->zone(), 1, offset);
1170 Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
1173 const int element_size_shift = ElementSizeLog2Of(access.machine_type);
1174 if (element_size_shift) {
1175 index = graph()->NewNode(machine()->Word32Shl(), index,
1176 jsgraph()->Int32Constant(element_size_shift));
1178 const int fixed_offset = access.header_size - access.tag();
1180 index = graph()->NewNode(machine()->Int32Add(), index,
1181 jsgraph()->Int32Constant(fixed_offset));
1183 if (machine()->Is64()) {
1184 // TODO(turbofan): This is probably only correct for typed arrays, and only
1185 // if the typed arrays are at most 2GiB in size, which happens to match
1186 // exactly our current situation.
1187 index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
1193 void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
1194 RepresentationChanger* changer) {
1195 DCHECK_EQ(IrOpcode::kLoadBuffer, node->opcode());
1196 DCHECK_NE(kMachNone, RepresentationOf(output_type));
1197 MachineType const type = BufferAccessOf(node->op()).machine_type();
1198 if (output_type != type) {
1199 Node* const buffer = node->InputAt(0);
1200 Node* const offset = node->InputAt(1);
1201 Node* const length = node->InputAt(2);
1202 Node* const effect = node->InputAt(3);
1203 Node* const control = node->InputAt(4);
1206 ? graph()->NewNode(machine()->ChangeUint32ToUint64(), offset)
1209 Node* check = graph()->NewNode(machine()->Uint32LessThan(), offset, length);
1211 graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
1213 Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
1215 graph()->NewNode(machine()->Load(type), buffer, index, effect, if_true);
1216 Node* vtrue = changer->GetRepresentationFor(etrue, type, output_type);
1218 Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
1219 Node* efalse = effect;
1221 if (output_type & kRepTagged) {
1222 vfalse = jsgraph()->UndefinedConstant();
1223 } else if (output_type & kRepFloat64) {
1225 jsgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
1226 } else if (output_type & kRepFloat32) {
1228 jsgraph()->Float32Constant(std::numeric_limits<float>::quiet_NaN());
1230 vfalse = jsgraph()->Int32Constant(0);
1233 Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
1234 Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
1236 // Replace effect uses of {node} with the {ephi}.
1237 NodeProperties::ReplaceWithValue(node, node, ephi);
1239 // Turn the {node} into a Phi.
1240 node->set_op(common()->Phi(output_type, 2));
1241 node->ReplaceInput(0, vtrue);
1242 node->ReplaceInput(1, vfalse);
1243 node->ReplaceInput(2, merge);
1244 node->TrimInputCount(3);
1246 node->set_op(machine()->CheckedLoad(type));
1251 void SimplifiedLowering::DoStoreBuffer(Node* node) {
1252 DCHECK_EQ(IrOpcode::kStoreBuffer, node->opcode());
1253 MachineType const type = BufferAccessOf(node->op()).machine_type();
1254 node->set_op(machine()->CheckedStore(type));
1258 void SimplifiedLowering::DoLoadElement(Node* node) {
1259 const ElementAccess& access = ElementAccessOf(node->op());
1260 node->set_op(machine()->Load(access.machine_type));
1261 node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
1265 void SimplifiedLowering::DoStoreElement(Node* node) {
1266 const ElementAccess& access = ElementAccessOf(node->op());
1267 node->set_op(machine()->Store(StoreRepresentation(
1268 access.machine_type,
1269 ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type,
1271 node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
1275 void SimplifiedLowering::DoStringAdd(Node* node) {
1276 Operator::Properties properties = node->op()->properties();
1277 Callable callable = CodeFactory::StringAdd(
1278 jsgraph()->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
1279 CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
1280 CallDescriptor* desc = Linkage::GetStubCallDescriptor(
1281 jsgraph()->isolate(), zone(), callable.descriptor(), 0, flags,
1283 node->set_op(common()->Call(desc));
1284 node->InsertInput(graph()->zone(), 0,
1285 jsgraph()->HeapConstant(callable.code()));
1286 node->AppendInput(graph()->zone(), jsgraph()->UndefinedConstant());
1287 node->AppendInput(graph()->zone(), graph()->start());
1288 node->AppendInput(graph()->zone(), graph()->start());
1292 Node* SimplifiedLowering::StringComparison(Node* node, bool requires_ordering) {
1293 CEntryStub stub(jsgraph()->isolate(), 1);
1294 Runtime::FunctionId f =
1295 requires_ordering ? Runtime::kStringCompare : Runtime::kStringEquals;
1296 ExternalReference ref(f, jsgraph()->isolate());
1297 Operator::Properties props = node->op()->properties();
1298 // TODO(mstarzinger): We should call StringCompareStub here instead, once an
1299 // interface descriptor is available for it.
1300 CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(zone(), f, 2, props);
1301 return graph()->NewNode(common()->Call(desc),
1302 jsgraph()->HeapConstant(stub.GetCode()),
1303 NodeProperties::GetValueInput(node, 0),
1304 NodeProperties::GetValueInput(node, 1),
1305 jsgraph()->ExternalConstant(ref),
1306 jsgraph()->Int32Constant(2),
1307 jsgraph()->UndefinedConstant());
1311 Node* SimplifiedLowering::Int32Div(Node* const node) {
1312 Int32BinopMatcher m(node);
1313 Node* const zero = jsgraph()->Int32Constant(0);
1314 Node* const lhs = m.left().node();
1315 Node* const rhs = m.right().node();
1317 if (m.right().Is(-1)) {
1318 return graph()->NewNode(machine()->Int32Sub(), zero, lhs);
1319 } else if (m.right().Is(0)) {
1321 } else if (machine()->Int32DivIsSafe() || m.right().HasValue()) {
1322 return graph()->NewNode(machine()->Int32Div(), lhs, rhs, graph()->start());
1325 Diamond if_zero(graph(), common(),
1326 graph()->NewNode(machine()->Word32Equal(), rhs, zero),
1327 BranchHint::kFalse);
1329 Diamond if_minus_one(graph(), common(),
1330 graph()->NewNode(machine()->Word32Equal(), rhs,
1331 jsgraph()->Int32Constant(-1)),
1332 BranchHint::kFalse);
1333 if_minus_one.Nest(if_zero, false);
1334 Node* sub = graph()->NewNode(machine()->Int32Sub(), zero, lhs);
1336 graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_minus_one.if_false);
1338 return if_zero.Phi(kMachInt32, zero, if_minus_one.Phi(kMachInt32, sub, div));
1342 Node* SimplifiedLowering::Int32Mod(Node* const node) {
1343 Int32BinopMatcher m(node);
1344 Node* const zero = jsgraph()->Int32Constant(0);
1345 Node* const minus_one = jsgraph()->Int32Constant(-1);
1346 Node* const lhs = m.left().node();
1347 Node* const rhs = m.right().node();
1349 if (m.right().Is(-1) || m.right().Is(0)) {
1351 } else if (m.right().HasValue()) {
1352 return graph()->NewNode(machine()->Int32Mod(), lhs, rhs, graph()->start());
1355 // General case for signed integer modulus, with optimization for (unknown)
1356 // power of 2 right hand side.
1360 // if rhs & msk != 0 then
1373 // Note: We do not use the Diamond helper class here, because it really hurts
1374 // readability with nested diamonds.
1375 const Operator* const merge_op = common()->Merge(2);
1376 const Operator* const phi_op = common()->Phi(kMachInt32, 2);
1378 Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
1379 Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
1382 Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
1385 Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
1387 Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
1388 Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
1390 Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
1391 Node* true1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
1393 Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
1396 Node* check2 = graph()->NewNode(machine()->Int32LessThan(), lhs, zero);
1397 Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
1400 Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
1401 Node* true2 = graph()->NewNode(
1402 machine()->Int32Sub(), zero,
1403 graph()->NewNode(machine()->Word32And(),
1404 graph()->NewNode(machine()->Int32Sub(), zero, lhs),
1407 Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
1408 Node* false2 = graph()->NewNode(machine()->Word32And(), lhs, msk);
1410 if_false1 = graph()->NewNode(merge_op, if_true2, if_false2);
1411 false1 = graph()->NewNode(phi_op, true2, false2, if_false1);
1414 if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
1415 true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
1418 Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
1421 Node* check1 = graph()->NewNode(machine()->Int32LessThan(), rhs, minus_one);
1422 Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
1425 Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
1426 Node* true1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
1428 Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
1429 Node* false1 = zero;
1431 if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
1432 false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
1435 Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
1436 return graph()->NewNode(phi_op, true0, false0, merge0);
1440 Node* SimplifiedLowering::Uint32Div(Node* const node) {
1441 Uint32BinopMatcher m(node);
1442 Node* const zero = jsgraph()->Uint32Constant(0);
1443 Node* const lhs = m.left().node();
1444 Node* const rhs = m.right().node();
1446 if (m.right().Is(0)) {
1448 } else if (machine()->Uint32DivIsSafe() || m.right().HasValue()) {
1449 return graph()->NewNode(machine()->Uint32Div(), lhs, rhs, graph()->start());
1452 Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
1453 Diamond d(graph(), common(), check, BranchHint::kFalse);
1454 Node* div = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, d.if_false);
1455 return d.Phi(kMachUint32, zero, div);
1459 Node* SimplifiedLowering::Uint32Mod(Node* const node) {
1460 Uint32BinopMatcher m(node);
1461 Node* const minus_one = jsgraph()->Int32Constant(-1);
1462 Node* const zero = jsgraph()->Uint32Constant(0);
1463 Node* const lhs = m.left().node();
1464 Node* const rhs = m.right().node();
1466 if (m.right().Is(0)) {
1468 } else if (m.right().HasValue()) {
1469 return graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, graph()->start());
1472 // General case for unsigned integer modulus, with optimization for (unknown)
1473 // power of 2 right hand side.
1477 // if rhs & msk != 0 then
1484 // Note: We do not use the Diamond helper class here, because it really hurts
1485 // readability with nested diamonds.
1486 const Operator* const merge_op = common()->Merge(2);
1487 const Operator* const phi_op = common()->Phi(kMachInt32, 2);
1489 Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), rhs,
1492 Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
1495 Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
1497 Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
1498 Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
1500 Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
1501 Node* true1 = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, if_true1);
1503 Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
1504 Node* false1 = graph()->NewNode(machine()->Word32And(), lhs, msk);
1506 if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
1507 true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
1510 Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
1511 Node* false0 = zero;
1513 Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
1514 return graph()->NewNode(phi_op, true0, false0, merge0);
1518 void SimplifiedLowering::DoStringEqual(Node* node) {
1519 node->set_op(machine()->WordEqual());
1520 node->ReplaceInput(0, StringComparison(node, false));
1521 node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
1525 void SimplifiedLowering::DoStringLessThan(Node* node) {
1526 node->set_op(machine()->IntLessThan());
1527 node->ReplaceInput(0, StringComparison(node, true));
1528 node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
1532 void SimplifiedLowering::DoStringLessThanOrEqual(Node* node) {
1533 node->set_op(machine()->IntLessThanOrEqual());
1534 node->ReplaceInput(0, StringComparison(node, true));
1535 node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
1538 } // namespace compiler
1539 } // namespace internal