Upstream version 10.39.233.0
[platform/framework/web/crosswalk.git] / src / v8 / src / compiler / instruction-selector.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/instruction-selector.h"
6
7 #include "src/compiler/instruction-selector-impl.h"
8 #include "src/compiler/node-matchers.h"
9 #include "src/compiler/node-properties-inl.h"
10 #include "src/compiler/pipeline.h"
11
12 namespace v8 {
13 namespace internal {
14 namespace compiler {
15
16 InstructionSelector::InstructionSelector(InstructionSequence* sequence,
17                                          SourcePositionTable* source_positions,
18                                          Features features)
19     : zone_(sequence->isolate()),
20       sequence_(sequence),
21       source_positions_(source_positions),
22       features_(features),
23       current_block_(NULL),
24       instructions_(zone()),
25       defined_(graph()->NodeCount(), false, zone()),
26       used_(graph()->NodeCount(), false, zone()) {}
27
28
29 void InstructionSelector::SelectInstructions() {
30   // Mark the inputs of all phis in loop headers as used.
31   BasicBlockVector* blocks = schedule()->rpo_order();
32   for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
33     BasicBlock* block = *i;
34     if (!block->IsLoopHeader()) continue;
35     DCHECK_NE(0, block->PredecessorCount());
36     DCHECK_NE(1, block->PredecessorCount());
37     for (BasicBlock::const_iterator j = block->begin(); j != block->end();
38          ++j) {
39       Node* phi = *j;
40       if (phi->opcode() != IrOpcode::kPhi) continue;
41
42       // Mark all inputs as used.
43       Node::Inputs inputs = phi->inputs();
44       for (InputIter k = inputs.begin(); k != inputs.end(); ++k) {
45         MarkAsUsed(*k);
46       }
47     }
48   }
49
50   // Visit each basic block in post order.
51   for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) {
52     VisitBlock(*i);
53   }
54
55   // Schedule the selected instructions.
56   for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
57     BasicBlock* block = *i;
58     size_t end = block->code_end_;
59     size_t start = block->code_start_;
60     sequence()->StartBlock(block);
61     while (start-- > end) {
62       sequence()->AddInstruction(instructions_[start], block);
63     }
64     sequence()->EndBlock(block);
65   }
66 }
67
68
69 Instruction* InstructionSelector::Emit(InstructionCode opcode,
70                                        InstructionOperand* output,
71                                        size_t temp_count,
72                                        InstructionOperand** temps) {
73   size_t output_count = output == NULL ? 0 : 1;
74   return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
75 }
76
77
78 Instruction* InstructionSelector::Emit(InstructionCode opcode,
79                                        InstructionOperand* output,
80                                        InstructionOperand* a, size_t temp_count,
81                                        InstructionOperand** temps) {
82   size_t output_count = output == NULL ? 0 : 1;
83   return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
84 }
85
86
87 Instruction* InstructionSelector::Emit(InstructionCode opcode,
88                                        InstructionOperand* output,
89                                        InstructionOperand* a,
90                                        InstructionOperand* b, size_t temp_count,
91                                        InstructionOperand** temps) {
92   size_t output_count = output == NULL ? 0 : 1;
93   InstructionOperand* inputs[] = {a, b};
94   size_t input_count = arraysize(inputs);
95   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
96               temps);
97 }
98
99
100 Instruction* InstructionSelector::Emit(InstructionCode opcode,
101                                        InstructionOperand* output,
102                                        InstructionOperand* a,
103                                        InstructionOperand* b,
104                                        InstructionOperand* c, size_t temp_count,
105                                        InstructionOperand** temps) {
106   size_t output_count = output == NULL ? 0 : 1;
107   InstructionOperand* inputs[] = {a, b, c};
108   size_t input_count = arraysize(inputs);
109   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
110               temps);
111 }
112
113
114 Instruction* InstructionSelector::Emit(
115     InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
116     InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
117     size_t temp_count, InstructionOperand** temps) {
118   size_t output_count = output == NULL ? 0 : 1;
119   InstructionOperand* inputs[] = {a, b, c, d};
120   size_t input_count = arraysize(inputs);
121   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
122               temps);
123 }
124
125
126 Instruction* InstructionSelector::Emit(
127     InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
128     size_t input_count, InstructionOperand** inputs, size_t temp_count,
129     InstructionOperand** temps) {
130   Instruction* instr =
131       Instruction::New(instruction_zone(), opcode, output_count, outputs,
132                        input_count, inputs, temp_count, temps);
133   return Emit(instr);
134 }
135
136
137 Instruction* InstructionSelector::Emit(Instruction* instr) {
138   instructions_.push_back(instr);
139   return instr;
140 }
141
142
143 bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
144   return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
145          block->deferred_ == current_block_->deferred_;
146 }
147
148
149 bool InstructionSelector::CanCover(Node* user, Node* node) const {
150   return node->OwnedBy(user) &&
151          schedule()->block(node) == schedule()->block(user);
152 }
153
154
155 bool InstructionSelector::IsDefined(Node* node) const {
156   DCHECK_NOT_NULL(node);
157   NodeId id = node->id();
158   DCHECK(id >= 0);
159   DCHECK(id < static_cast<NodeId>(defined_.size()));
160   return defined_[id];
161 }
162
163
164 void InstructionSelector::MarkAsDefined(Node* node) {
165   DCHECK_NOT_NULL(node);
166   NodeId id = node->id();
167   DCHECK(id >= 0);
168   DCHECK(id < static_cast<NodeId>(defined_.size()));
169   defined_[id] = true;
170 }
171
172
173 bool InstructionSelector::IsUsed(Node* node) const {
174   if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
175   NodeId id = node->id();
176   DCHECK(id >= 0);
177   DCHECK(id < static_cast<NodeId>(used_.size()));
178   return used_[id];
179 }
180
181
182 void InstructionSelector::MarkAsUsed(Node* node) {
183   DCHECK_NOT_NULL(node);
184   NodeId id = node->id();
185   DCHECK(id >= 0);
186   DCHECK(id < static_cast<NodeId>(used_.size()));
187   used_[id] = true;
188 }
189
190
191 bool InstructionSelector::IsDouble(const Node* node) const {
192   DCHECK_NOT_NULL(node);
193   return sequence()->IsDouble(node->id());
194 }
195
196
197 void InstructionSelector::MarkAsDouble(Node* node) {
198   DCHECK_NOT_NULL(node);
199   DCHECK(!IsReference(node));
200   sequence()->MarkAsDouble(node->id());
201 }
202
203
204 bool InstructionSelector::IsReference(const Node* node) const {
205   DCHECK_NOT_NULL(node);
206   return sequence()->IsReference(node->id());
207 }
208
209
210 void InstructionSelector::MarkAsReference(Node* node) {
211   DCHECK_NOT_NULL(node);
212   DCHECK(!IsDouble(node));
213   sequence()->MarkAsReference(node->id());
214 }
215
216
217 void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
218   DCHECK_NOT_NULL(node);
219   switch (RepresentationOf(rep)) {
220     case kRepFloat32:
221     case kRepFloat64:
222       MarkAsDouble(node);
223       break;
224     case kRepTagged:
225       MarkAsReference(node);
226       break;
227     default:
228       break;
229   }
230 }
231
232
233 // TODO(bmeurer): Get rid of the CallBuffer business and make
234 // InstructionSelector::VisitCall platform independent instead.
235 CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d,
236                        FrameStateDescriptor* frame_desc)
237     : descriptor(d),
238       frame_state_descriptor(frame_desc),
239       output_nodes(zone),
240       outputs(zone),
241       instruction_args(zone),
242       pushed_nodes(zone) {
243   output_nodes.reserve(d->ReturnCount());
244   outputs.reserve(d->ReturnCount());
245   pushed_nodes.reserve(input_count());
246   instruction_args.reserve(input_count() + frame_state_value_count());
247 }
248
249
250 // TODO(bmeurer): Get rid of the CallBuffer business and make
251 // InstructionSelector::VisitCall platform independent instead.
252 void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
253                                                bool call_code_immediate,
254                                                bool call_address_immediate) {
255   OperandGenerator g(this);
256   DCHECK_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
257   DCHECK_EQ(OperatorProperties::GetValueInputCount(call->op()),
258             buffer->input_count() + buffer->frame_state_count());
259
260   if (buffer->descriptor->ReturnCount() > 0) {
261     // Collect the projections that represent multiple outputs from this call.
262     if (buffer->descriptor->ReturnCount() == 1) {
263       buffer->output_nodes.push_back(call);
264     } else {
265       buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), NULL);
266       call->CollectProjections(&buffer->output_nodes);
267     }
268
269     // Filter out the outputs that aren't live because no projection uses them.
270     for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
271       if (buffer->output_nodes[i] != NULL) {
272         Node* output = buffer->output_nodes[i];
273         MachineType type =
274             buffer->descriptor->GetReturnType(static_cast<int>(i));
275         LinkageLocation location =
276             buffer->descriptor->GetReturnLocation(static_cast<int>(i));
277         MarkAsRepresentation(type, output);
278         buffer->outputs.push_back(g.DefineAsLocation(output, location, type));
279       }
280     }
281   }
282
283   // The first argument is always the callee code.
284   Node* callee = call->InputAt(0);
285   switch (buffer->descriptor->kind()) {
286     case CallDescriptor::kCallCodeObject:
287       buffer->instruction_args.push_back(
288           (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
289               ? g.UseImmediate(callee)
290               : g.UseRegister(callee));
291       break;
292     case CallDescriptor::kCallAddress:
293       buffer->instruction_args.push_back(
294           (call_address_immediate &&
295            (callee->opcode() == IrOpcode::kInt32Constant ||
296             callee->opcode() == IrOpcode::kInt64Constant))
297               ? g.UseImmediate(callee)
298               : g.UseRegister(callee));
299       break;
300     case CallDescriptor::kCallJSFunction:
301       buffer->instruction_args.push_back(
302           g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
303                         buffer->descriptor->GetInputType(0)));
304       break;
305   }
306   DCHECK_EQ(1, buffer->instruction_args.size());
307
308   // If the call needs a frame state, we insert the state information as
309   // follows (n is the number of value inputs to the frame state):
310   // arg 1               : deoptimization id.
311   // arg 2 - arg (n + 1) : value inputs to the frame state.
312   if (buffer->frame_state_descriptor != NULL) {
313     InstructionSequence::StateId state_id =
314         sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
315     buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
316
317     Node* frame_state =
318         call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
319     AddFrameStateInputs(frame_state, &buffer->instruction_args,
320                         buffer->frame_state_descriptor);
321   }
322   DCHECK(1 + buffer->frame_state_value_count() ==
323          buffer->instruction_args.size());
324
325   size_t input_count = static_cast<size_t>(buffer->input_count());
326
327   // Split the arguments into pushed_nodes and instruction_args. Pushed
328   // arguments require an explicit push instruction before the call and do
329   // not appear as arguments to the call. Everything else ends up
330   // as an InstructionOperand argument to the call.
331   InputIter iter(call->inputs().begin());
332   int pushed_count = 0;
333   for (size_t index = 0; index < input_count; ++iter, ++index) {
334     DCHECK(iter != call->inputs().end());
335     DCHECK(index == static_cast<size_t>(iter.index()));
336     DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
337     if (index == 0) continue;  // The first argument (callee) is already done.
338     InstructionOperand* op =
339         g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
340                       buffer->descriptor->GetInputType(index));
341     if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
342       int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
343       if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
344         buffer->pushed_nodes.resize(stack_index + 1, NULL);
345       }
346       DCHECK_EQ(NULL, buffer->pushed_nodes[stack_index]);
347       buffer->pushed_nodes[stack_index] = *iter;
348       pushed_count++;
349     } else {
350       buffer->instruction_args.push_back(op);
351     }
352   }
353   CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
354   DCHECK(static_cast<size_t>(input_count) ==
355          (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
356           buffer->frame_state_value_count()));
357 }
358
359
360 void InstructionSelector::VisitBlock(BasicBlock* block) {
361   DCHECK_EQ(NULL, current_block_);
362   current_block_ = block;
363   int current_block_end = static_cast<int>(instructions_.size());
364
365   // Generate code for the block control "top down", but schedule the code
366   // "bottom up".
367   VisitControl(block);
368   std::reverse(instructions_.begin() + current_block_end, instructions_.end());
369
370   // Visit code in reverse control flow order, because architecture-specific
371   // matching may cover more than one node at a time.
372   for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
373        ++i) {
374     Node* node = *i;
375     // Skip nodes that are unused or already defined.
376     if (!IsUsed(node) || IsDefined(node)) continue;
377     // Generate code for this node "top down", but schedule the code "bottom
378     // up".
379     size_t current_node_end = instructions_.size();
380     VisitNode(node);
381     std::reverse(instructions_.begin() + current_node_end, instructions_.end());
382   }
383
384   // We're done with the block.
385   // TODO(bmeurer): We should not mutate the schedule.
386   block->code_end_ = current_block_end;
387   block->code_start_ = static_cast<int>(instructions_.size());
388
389   current_block_ = NULL;
390 }
391
392
393 static inline void CheckNoPhis(const BasicBlock* block) {
394 #ifdef DEBUG
395   // Branch targets should not have phis.
396   for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
397     const Node* node = *i;
398     CHECK_NE(IrOpcode::kPhi, node->opcode());
399   }
400 #endif
401 }
402
403
404 void InstructionSelector::VisitControl(BasicBlock* block) {
405   Node* input = block->control_input_;
406   switch (block->control_) {
407     case BasicBlockData::kGoto:
408       return VisitGoto(block->SuccessorAt(0));
409     case BasicBlockData::kBranch: {
410       DCHECK_EQ(IrOpcode::kBranch, input->opcode());
411       BasicBlock* tbranch = block->SuccessorAt(0);
412       BasicBlock* fbranch = block->SuccessorAt(1);
413       // SSA deconstruction requires targets of branches not to have phis.
414       // Edge split form guarantees this property, but is more strict.
415       CheckNoPhis(tbranch);
416       CheckNoPhis(fbranch);
417       if (tbranch == fbranch) return VisitGoto(tbranch);
418       return VisitBranch(input, tbranch, fbranch);
419     }
420     case BasicBlockData::kReturn: {
421       // If the result itself is a return, return its input.
422       Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
423                         ? input->InputAt(0)
424                         : input;
425       return VisitReturn(value);
426     }
427     case BasicBlockData::kThrow:
428       return VisitThrow(input);
429     case BasicBlockData::kNone: {
430       // TODO(titzer): exit block doesn't have control.
431       DCHECK(input == NULL);
432       break;
433     }
434     default:
435       UNREACHABLE();
436       break;
437   }
438 }
439
440
441 void InstructionSelector::VisitNode(Node* node) {
442   DCHECK_NOT_NULL(schedule()->block(node));  // should only use scheduled nodes.
443   SourcePosition source_position = source_positions_->GetSourcePosition(node);
444   if (!source_position.IsUnknown()) {
445     DCHECK(!source_position.IsInvalid());
446     if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
447       Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
448     }
449   }
450   switch (node->opcode()) {
451     case IrOpcode::kStart:
452     case IrOpcode::kLoop:
453     case IrOpcode::kEnd:
454     case IrOpcode::kBranch:
455     case IrOpcode::kIfTrue:
456     case IrOpcode::kIfFalse:
457     case IrOpcode::kEffectPhi:
458     case IrOpcode::kMerge:
459       // No code needed for these graph artifacts.
460       return;
461     case IrOpcode::kFinish:
462       return MarkAsReference(node), VisitFinish(node);
463     case IrOpcode::kParameter: {
464       MachineType type = linkage()->GetParameterType(OpParameter<int>(node));
465       MarkAsRepresentation(type, node);
466       return VisitParameter(node);
467     }
468     case IrOpcode::kPhi: {
469       MachineType type = OpParameter<MachineType>(node);
470       MarkAsRepresentation(type, node);
471       return VisitPhi(node);
472     }
473     case IrOpcode::kProjection:
474       return VisitProjection(node);
475     case IrOpcode::kInt32Constant:
476     case IrOpcode::kInt64Constant:
477     case IrOpcode::kExternalConstant:
478       return VisitConstant(node);
479     case IrOpcode::kFloat64Constant:
480       return MarkAsDouble(node), VisitConstant(node);
481     case IrOpcode::kHeapConstant:
482     case IrOpcode::kNumberConstant:
483       // TODO(turbofan): only mark non-smis as references.
484       return MarkAsReference(node), VisitConstant(node);
485     case IrOpcode::kCall:
486       return VisitCall(node, NULL, NULL);
487     case IrOpcode::kFrameState:
488     case IrOpcode::kStateValues:
489       return;
490     case IrOpcode::kLoad: {
491       LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
492       MarkAsRepresentation(rep, node);
493       return VisitLoad(node);
494     }
495     case IrOpcode::kStore:
496       return VisitStore(node);
497     case IrOpcode::kWord32And:
498       return VisitWord32And(node);
499     case IrOpcode::kWord32Or:
500       return VisitWord32Or(node);
501     case IrOpcode::kWord32Xor:
502       return VisitWord32Xor(node);
503     case IrOpcode::kWord32Shl:
504       return VisitWord32Shl(node);
505     case IrOpcode::kWord32Shr:
506       return VisitWord32Shr(node);
507     case IrOpcode::kWord32Sar:
508       return VisitWord32Sar(node);
509     case IrOpcode::kWord32Ror:
510       return VisitWord32Ror(node);
511     case IrOpcode::kWord32Equal:
512       return VisitWord32Equal(node);
513     case IrOpcode::kWord64And:
514       return VisitWord64And(node);
515     case IrOpcode::kWord64Or:
516       return VisitWord64Or(node);
517     case IrOpcode::kWord64Xor:
518       return VisitWord64Xor(node);
519     case IrOpcode::kWord64Shl:
520       return VisitWord64Shl(node);
521     case IrOpcode::kWord64Shr:
522       return VisitWord64Shr(node);
523     case IrOpcode::kWord64Sar:
524       return VisitWord64Sar(node);
525     case IrOpcode::kWord64Ror:
526       return VisitWord64Ror(node);
527     case IrOpcode::kWord64Equal:
528       return VisitWord64Equal(node);
529     case IrOpcode::kInt32Add:
530       return VisitInt32Add(node);
531     case IrOpcode::kInt32AddWithOverflow:
532       return VisitInt32AddWithOverflow(node);
533     case IrOpcode::kInt32Sub:
534       return VisitInt32Sub(node);
535     case IrOpcode::kInt32SubWithOverflow:
536       return VisitInt32SubWithOverflow(node);
537     case IrOpcode::kInt32Mul:
538       return VisitInt32Mul(node);
539     case IrOpcode::kInt32Div:
540       return VisitInt32Div(node);
541     case IrOpcode::kInt32UDiv:
542       return VisitInt32UDiv(node);
543     case IrOpcode::kInt32Mod:
544       return VisitInt32Mod(node);
545     case IrOpcode::kInt32UMod:
546       return VisitInt32UMod(node);
547     case IrOpcode::kInt32LessThan:
548       return VisitInt32LessThan(node);
549     case IrOpcode::kInt32LessThanOrEqual:
550       return VisitInt32LessThanOrEqual(node);
551     case IrOpcode::kUint32LessThan:
552       return VisitUint32LessThan(node);
553     case IrOpcode::kUint32LessThanOrEqual:
554       return VisitUint32LessThanOrEqual(node);
555     case IrOpcode::kInt64Add:
556       return VisitInt64Add(node);
557     case IrOpcode::kInt64Sub:
558       return VisitInt64Sub(node);
559     case IrOpcode::kInt64Mul:
560       return VisitInt64Mul(node);
561     case IrOpcode::kInt64Div:
562       return VisitInt64Div(node);
563     case IrOpcode::kInt64UDiv:
564       return VisitInt64UDiv(node);
565     case IrOpcode::kInt64Mod:
566       return VisitInt64Mod(node);
567     case IrOpcode::kInt64UMod:
568       return VisitInt64UMod(node);
569     case IrOpcode::kInt64LessThan:
570       return VisitInt64LessThan(node);
571     case IrOpcode::kInt64LessThanOrEqual:
572       return VisitInt64LessThanOrEqual(node);
573     case IrOpcode::kChangeInt32ToFloat64:
574       return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
575     case IrOpcode::kChangeUint32ToFloat64:
576       return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
577     case IrOpcode::kChangeFloat64ToInt32:
578       return VisitChangeFloat64ToInt32(node);
579     case IrOpcode::kChangeFloat64ToUint32:
580       return VisitChangeFloat64ToUint32(node);
581     case IrOpcode::kChangeInt32ToInt64:
582       return VisitChangeInt32ToInt64(node);
583     case IrOpcode::kChangeUint32ToUint64:
584       return VisitChangeUint32ToUint64(node);
585     case IrOpcode::kTruncateFloat64ToInt32:
586       return VisitTruncateFloat64ToInt32(node);
587     case IrOpcode::kTruncateInt64ToInt32:
588       return VisitTruncateInt64ToInt32(node);
589     case IrOpcode::kFloat64Add:
590       return MarkAsDouble(node), VisitFloat64Add(node);
591     case IrOpcode::kFloat64Sub:
592       return MarkAsDouble(node), VisitFloat64Sub(node);
593     case IrOpcode::kFloat64Mul:
594       return MarkAsDouble(node), VisitFloat64Mul(node);
595     case IrOpcode::kFloat64Div:
596       return MarkAsDouble(node), VisitFloat64Div(node);
597     case IrOpcode::kFloat64Mod:
598       return MarkAsDouble(node), VisitFloat64Mod(node);
599     case IrOpcode::kFloat64Sqrt:
600       return MarkAsDouble(node), VisitFloat64Sqrt(node);
601     case IrOpcode::kFloat64Equal:
602       return VisitFloat64Equal(node);
603     case IrOpcode::kFloat64LessThan:
604       return VisitFloat64LessThan(node);
605     case IrOpcode::kFloat64LessThanOrEqual:
606       return VisitFloat64LessThanOrEqual(node);
607     default:
608       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
609                node->opcode(), node->op()->mnemonic(), node->id());
610   }
611 }
612
613
614 #if V8_TURBOFAN_BACKEND
615
616 void InstructionSelector::VisitWord32Equal(Node* node) {
617   FlagsContinuation cont(kEqual, node);
618   Int32BinopMatcher m(node);
619   if (m.right().Is(0)) {
620     return VisitWord32Test(m.left().node(), &cont);
621   }
622   VisitWord32Compare(node, &cont);
623 }
624
625
626 void InstructionSelector::VisitInt32LessThan(Node* node) {
627   FlagsContinuation cont(kSignedLessThan, node);
628   VisitWord32Compare(node, &cont);
629 }
630
631
632 void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
633   FlagsContinuation cont(kSignedLessThanOrEqual, node);
634   VisitWord32Compare(node, &cont);
635 }
636
637
638 void InstructionSelector::VisitUint32LessThan(Node* node) {
639   FlagsContinuation cont(kUnsignedLessThan, node);
640   VisitWord32Compare(node, &cont);
641 }
642
643
644 void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
645   FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
646   VisitWord32Compare(node, &cont);
647 }
648
649
650 void InstructionSelector::VisitWord64Equal(Node* node) {
651   FlagsContinuation cont(kEqual, node);
652   Int64BinopMatcher m(node);
653   if (m.right().Is(0)) {
654     return VisitWord64Test(m.left().node(), &cont);
655   }
656   VisitWord64Compare(node, &cont);
657 }
658
659
660 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
661   if (Node* ovf = node->FindProjection(1)) {
662     FlagsContinuation cont(kOverflow, ovf);
663     return VisitInt32AddWithOverflow(node, &cont);
664   }
665   FlagsContinuation cont;
666   VisitInt32AddWithOverflow(node, &cont);
667 }
668
669
670 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
671   if (Node* ovf = node->FindProjection(1)) {
672     FlagsContinuation cont(kOverflow, ovf);
673     return VisitInt32SubWithOverflow(node, &cont);
674   }
675   FlagsContinuation cont;
676   VisitInt32SubWithOverflow(node, &cont);
677 }
678
679
680 void InstructionSelector::VisitInt64LessThan(Node* node) {
681   FlagsContinuation cont(kSignedLessThan, node);
682   VisitWord64Compare(node, &cont);
683 }
684
685
686 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
687   FlagsContinuation cont(kSignedLessThanOrEqual, node);
688   VisitWord64Compare(node, &cont);
689 }
690
691
692 void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
693   OperandGenerator g(this);
694   Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
695        g.UseRegister(node->InputAt(0)));
696 }
697
698
699 void InstructionSelector::VisitFloat64Equal(Node* node) {
700   FlagsContinuation cont(kUnorderedEqual, node);
701   VisitFloat64Compare(node, &cont);
702 }
703
704
705 void InstructionSelector::VisitFloat64LessThan(Node* node) {
706   FlagsContinuation cont(kUnorderedLessThan, node);
707   VisitFloat64Compare(node, &cont);
708 }
709
710
711 void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
712   FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
713   VisitFloat64Compare(node, &cont);
714 }
715
716 #endif  // V8_TURBOFAN_BACKEND
717
718 // 32 bit targets do not implement the following instructions.
719 #if V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
720
721 void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
722
723
724 void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
725
726
727 void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
728
729
730 void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
731
732
733 void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
734
735
736 void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
737
738
739 void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
740
741
742 void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
743
744
745 void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
746
747
748 void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
749
750
751 void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
752
753
754 void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); }
755
756
757 void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
758
759
760 void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
761
762
763 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
764   UNIMPLEMENTED();
765 }
766
767
768 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
769   UNIMPLEMENTED();
770 }
771
772
773 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
774   UNIMPLEMENTED();
775 }
776
777 #endif  // V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
778
779
780 // 32-bit targets and unsupported architectures need dummy implementations of
781 // selected 64-bit ops.
782 #if V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
783
784 void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
785   UNIMPLEMENTED();
786 }
787
788
789 void InstructionSelector::VisitWord64Compare(Node* node,
790                                              FlagsContinuation* cont) {
791   UNIMPLEMENTED();
792 }
793
794 #endif  // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
795
796
797 void InstructionSelector::VisitFinish(Node* node) {
798   OperandGenerator g(this);
799   Node* value = node->InputAt(0);
800   Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
801 }
802
803
804 void InstructionSelector::VisitParameter(Node* node) {
805   OperandGenerator g(this);
806   int index = OpParameter<int>(node);
807   Emit(kArchNop,
808        g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
809                           linkage()->GetParameterType(index)));
810 }
811
812
813 void InstructionSelector::VisitPhi(Node* node) {
814   // TODO(bmeurer): Emit a PhiInstruction here.
815   for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
816     MarkAsUsed(*i);
817   }
818 }
819
820
821 void InstructionSelector::VisitProjection(Node* node) {
822   OperandGenerator g(this);
823   Node* value = node->InputAt(0);
824   switch (value->opcode()) {
825     case IrOpcode::kInt32AddWithOverflow:
826     case IrOpcode::kInt32SubWithOverflow:
827       if (OpParameter<size_t>(node) == 0) {
828         Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
829       } else {
830         DCHECK(OpParameter<size_t>(node) == 1u);
831         MarkAsUsed(value);
832       }
833       break;
834     default:
835       break;
836   }
837 }
838
839
840 void InstructionSelector::VisitConstant(Node* node) {
841   // We must emit a NOP here because every live range needs a defining
842   // instruction in the register allocator.
843   OperandGenerator g(this);
844   Emit(kArchNop, g.DefineAsConstant(node));
845 }
846
847
848 void InstructionSelector::VisitGoto(BasicBlock* target) {
849   if (IsNextInAssemblyOrder(target)) {
850     // fall through to the next block.
851     Emit(kArchNop, NULL)->MarkAsControl();
852   } else {
853     // jump to the next block.
854     OperandGenerator g(this);
855     Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
856   }
857 }
858
859
860 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
861                                       BasicBlock* fbranch) {
862   OperandGenerator g(this);
863   Node* user = branch;
864   Node* value = branch->InputAt(0);
865
866   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
867
868   // If we can fall through to the true block, invert the branch.
869   if (IsNextInAssemblyOrder(tbranch)) {
870     cont.Negate();
871     cont.SwapBlocks();
872   }
873
874   // Try to combine with comparisons against 0 by simply inverting the branch.
875   while (CanCover(user, value)) {
876     if (value->opcode() == IrOpcode::kWord32Equal) {
877       Int32BinopMatcher m(value);
878       if (m.right().Is(0)) {
879         user = value;
880         value = m.left().node();
881         cont.Negate();
882       } else {
883         break;
884       }
885     } else if (value->opcode() == IrOpcode::kWord64Equal) {
886       Int64BinopMatcher m(value);
887       if (m.right().Is(0)) {
888         user = value;
889         value = m.left().node();
890         cont.Negate();
891       } else {
892         break;
893       }
894     } else {
895       break;
896     }
897   }
898
899   // Try to combine the branch with a comparison.
900   if (CanCover(user, value)) {
901     switch (value->opcode()) {
902       case IrOpcode::kWord32Equal:
903         cont.OverwriteAndNegateIfEqual(kEqual);
904         return VisitWord32Compare(value, &cont);
905       case IrOpcode::kInt32LessThan:
906         cont.OverwriteAndNegateIfEqual(kSignedLessThan);
907         return VisitWord32Compare(value, &cont);
908       case IrOpcode::kInt32LessThanOrEqual:
909         cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
910         return VisitWord32Compare(value, &cont);
911       case IrOpcode::kUint32LessThan:
912         cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
913         return VisitWord32Compare(value, &cont);
914       case IrOpcode::kUint32LessThanOrEqual:
915         cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
916         return VisitWord32Compare(value, &cont);
917       case IrOpcode::kWord64Equal:
918         cont.OverwriteAndNegateIfEqual(kEqual);
919         return VisitWord64Compare(value, &cont);
920       case IrOpcode::kInt64LessThan:
921         cont.OverwriteAndNegateIfEqual(kSignedLessThan);
922         return VisitWord64Compare(value, &cont);
923       case IrOpcode::kInt64LessThanOrEqual:
924         cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
925         return VisitWord64Compare(value, &cont);
926       case IrOpcode::kFloat64Equal:
927         cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
928         return VisitFloat64Compare(value, &cont);
929       case IrOpcode::kFloat64LessThan:
930         cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
931         return VisitFloat64Compare(value, &cont);
932       case IrOpcode::kFloat64LessThanOrEqual:
933         cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
934         return VisitFloat64Compare(value, &cont);
935       case IrOpcode::kProjection:
936         // Check if this is the overflow output projection of an
937         // <Operation>WithOverflow node.
938         if (OpParameter<size_t>(value) == 1u) {
939           // We cannot combine the <Operation>WithOverflow with this branch
940           // unless the 0th projection (the use of the actual value of the
941           // <Operation> is either NULL, which means there's no use of the
942           // actual value, or was already defined, which means it is scheduled
943           // *AFTER* this branch).
944           Node* node = value->InputAt(0);
945           Node* result = node->FindProjection(0);
946           if (result == NULL || IsDefined(result)) {
947             switch (node->opcode()) {
948               case IrOpcode::kInt32AddWithOverflow:
949                 cont.OverwriteAndNegateIfEqual(kOverflow);
950                 return VisitInt32AddWithOverflow(node, &cont);
951               case IrOpcode::kInt32SubWithOverflow:
952                 cont.OverwriteAndNegateIfEqual(kOverflow);
953                 return VisitInt32SubWithOverflow(node, &cont);
954               default:
955                 break;
956             }
957           }
958         }
959         break;
960       default:
961         break;
962     }
963   }
964
965   // Branch could not be combined with a compare, emit compare against 0.
966   VisitWord32Test(value, &cont);
967 }
968
969
970 void InstructionSelector::VisitReturn(Node* value) {
971   OperandGenerator g(this);
972   if (value != NULL) {
973     Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation(),
974                                        linkage()->GetReturnType()));
975   } else {
976     Emit(kArchRet, NULL);
977   }
978 }
979
980
981 void InstructionSelector::VisitThrow(Node* value) {
982   UNIMPLEMENTED();  // TODO(titzer)
983 }
984
985
986 FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
987     Node* state) {
988   DCHECK(state->opcode() == IrOpcode::kFrameState);
989   DCHECK_EQ(5, state->InputCount());
990   FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
991   int parameters = OpParameter<int>(state->InputAt(0));
992   int locals = OpParameter<int>(state->InputAt(1));
993   int stack = OpParameter<int>(state->InputAt(2));
994
995   FrameStateDescriptor* outer_state = NULL;
996   Node* outer_node = state->InputAt(4);
997   if (outer_node->opcode() == IrOpcode::kFrameState) {
998     outer_state = GetFrameStateDescriptor(outer_node);
999   }
1000
1001   return new (instruction_zone())
1002       FrameStateDescriptor(state_info, parameters, locals, stack, outer_state);
1003 }
1004
1005
1006 static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) {
1007   switch (input->opcode()) {
1008     case IrOpcode::kInt32Constant:
1009     case IrOpcode::kNumberConstant:
1010     case IrOpcode::kFloat64Constant:
1011     case IrOpcode::kHeapConstant:
1012       return g->UseImmediate(input);
1013     default:
1014       return g->UseUnique(input);
1015   }
1016 }
1017
1018
1019 void InstructionSelector::AddFrameStateInputs(
1020     Node* state, InstructionOperandVector* inputs,
1021     FrameStateDescriptor* descriptor) {
1022   DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
1023
1024   if (descriptor->outer_state() != NULL) {
1025     AddFrameStateInputs(state->InputAt(4), inputs, descriptor->outer_state());
1026   }
1027
1028   Node* parameters = state->InputAt(0);
1029   Node* locals = state->InputAt(1);
1030   Node* stack = state->InputAt(2);
1031   Node* context = state->InputAt(3);
1032
1033   DCHECK_EQ(IrOpcode::kStateValues, parameters->op()->opcode());
1034   DCHECK_EQ(IrOpcode::kStateValues, locals->op()->opcode());
1035   DCHECK_EQ(IrOpcode::kStateValues, stack->op()->opcode());
1036
1037   DCHECK_EQ(descriptor->parameters_count(), parameters->InputCount());
1038   DCHECK_EQ(descriptor->locals_count(), locals->InputCount());
1039   DCHECK_EQ(descriptor->stack_count(), stack->InputCount());
1040
1041   OperandGenerator g(this);
1042   for (int i = 0; i < static_cast<int>(descriptor->parameters_count()); i++) {
1043     inputs->push_back(UseOrImmediate(&g, parameters->InputAt(i)));
1044   }
1045   if (descriptor->HasContext()) {
1046     inputs->push_back(UseOrImmediate(&g, context));
1047   }
1048   for (int i = 0; i < static_cast<int>(descriptor->locals_count()); i++) {
1049     inputs->push_back(UseOrImmediate(&g, locals->InputAt(i)));
1050   }
1051   for (int i = 0; i < static_cast<int>(descriptor->stack_count()); i++) {
1052     inputs->push_back(UseOrImmediate(&g, stack->InputAt(i)));
1053   }
1054 }
1055
1056
1057 #if !V8_TURBOFAN_BACKEND
1058
1059 #define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
1060   void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
1061 MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
1062 #undef DECLARE_UNIMPLEMENTED_SELECTOR
1063
1064
1065 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
1066                                                     FlagsContinuation* cont) {
1067   UNIMPLEMENTED();
1068 }
1069
1070
1071 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
1072                                                     FlagsContinuation* cont) {
1073   UNIMPLEMENTED();
1074 }
1075
1076
1077 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
1078   UNIMPLEMENTED();
1079 }
1080
1081
1082 void InstructionSelector::VisitWord32Compare(Node* node,
1083                                              FlagsContinuation* cont) {
1084   UNIMPLEMENTED();
1085 }
1086
1087
1088 void InstructionSelector::VisitFloat64Compare(Node* node,
1089                                               FlagsContinuation* cont) {
1090   UNIMPLEMENTED();
1091 }
1092
1093
1094 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
1095                                     BasicBlock* deoptimization) {}
1096
1097 #endif  // !V8_TURBOFAN_BACKEND
1098
1099 }  // namespace compiler
1100 }  // namespace internal
1101 }  // namespace v8