deps: update v8 to 4.3.61.21
[platform/upstream/nodejs.git] / deps / v8 / src / compiler / instruction-selector.cc
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/instruction-selector.h"
6
7 #include <limits>
8
9 #include "src/compiler/instruction-selector-impl.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/node-properties.h"
12 #include "src/compiler/pipeline.h"
13 #include "src/compiler/schedule.h"
14 #include "src/compiler/state-values-utils.h"
15
16 namespace v8 {
17 namespace internal {
18 namespace compiler {
19
20 InstructionSelector::InstructionSelector(Zone* zone, size_t node_count,
21                                          Linkage* linkage,
22                                          InstructionSequence* sequence,
23                                          Schedule* schedule,
24                                          SourcePositionTable* source_positions,
25                                          Features features)
26     : zone_(zone),
27       linkage_(linkage),
28       sequence_(sequence),
29       source_positions_(source_positions),
30       features_(features),
31       schedule_(schedule),
32       current_block_(NULL),
33       instructions_(zone),
34       defined_(node_count, false, zone),
35       used_(node_count, false, zone),
36       virtual_registers_(node_count,
37                          InstructionOperand::kInvalidVirtualRegister, zone) {
38   instructions_.reserve(node_count);
39 }
40
41
42 void InstructionSelector::SelectInstructions() {
43   // Mark the inputs of all phis in loop headers as used.
44   BasicBlockVector* blocks = schedule()->rpo_order();
45   for (auto const block : *blocks) {
46     if (!block->IsLoopHeader()) continue;
47     DCHECK_LE(2u, block->PredecessorCount());
48     for (Node* const phi : *block) {
49       if (phi->opcode() != IrOpcode::kPhi) continue;
50
51       // Mark all inputs as used.
52       for (Node* const input : phi->inputs()) {
53         MarkAsUsed(input);
54       }
55     }
56   }
57
58   // Visit each basic block in post order.
59   for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
60     VisitBlock(*i);
61   }
62
63   // Schedule the selected instructions.
64   for (auto const block : *blocks) {
65     InstructionBlock* instruction_block =
66         sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
67     size_t end = instruction_block->code_end();
68     size_t start = instruction_block->code_start();
69     DCHECK_LE(end, start);
70     sequence()->StartBlock(RpoNumber::FromInt(block->rpo_number()));
71     while (start-- > end) {
72       sequence()->AddInstruction(instructions_[start]);
73     }
74     sequence()->EndBlock(RpoNumber::FromInt(block->rpo_number()));
75   }
76 }
77
78
79 Instruction* InstructionSelector::Emit(InstructionCode opcode,
80                                        InstructionOperand output,
81                                        size_t temp_count,
82                                        InstructionOperand* temps) {
83   size_t output_count = output.IsInvalid() ? 0 : 1;
84   return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
85 }
86
87
88 Instruction* InstructionSelector::Emit(InstructionCode opcode,
89                                        InstructionOperand output,
90                                        InstructionOperand a, size_t temp_count,
91                                        InstructionOperand* temps) {
92   size_t output_count = output.IsInvalid() ? 0 : 1;
93   return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
94 }
95
96
97 Instruction* InstructionSelector::Emit(InstructionCode opcode,
98                                        InstructionOperand output,
99                                        InstructionOperand a,
100                                        InstructionOperand b, size_t temp_count,
101                                        InstructionOperand* temps) {
102   size_t output_count = output.IsInvalid() ? 0 : 1;
103   InstructionOperand inputs[] = {a, b};
104   size_t input_count = arraysize(inputs);
105   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
106               temps);
107 }
108
109
110 Instruction* InstructionSelector::Emit(InstructionCode opcode,
111                                        InstructionOperand output,
112                                        InstructionOperand a,
113                                        InstructionOperand b,
114                                        InstructionOperand c, size_t temp_count,
115                                        InstructionOperand* temps) {
116   size_t output_count = output.IsInvalid() ? 0 : 1;
117   InstructionOperand inputs[] = {a, b, c};
118   size_t input_count = arraysize(inputs);
119   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
120               temps);
121 }
122
123
124 Instruction* InstructionSelector::Emit(
125     InstructionCode opcode, InstructionOperand output, InstructionOperand a,
126     InstructionOperand b, InstructionOperand c, InstructionOperand d,
127     size_t temp_count, InstructionOperand* temps) {
128   size_t output_count = output.IsInvalid() ? 0 : 1;
129   InstructionOperand inputs[] = {a, b, c, d};
130   size_t input_count = arraysize(inputs);
131   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
132               temps);
133 }
134
135
136 Instruction* InstructionSelector::Emit(
137     InstructionCode opcode, InstructionOperand output, InstructionOperand a,
138     InstructionOperand b, InstructionOperand c, InstructionOperand d,
139     InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
140   size_t output_count = output.IsInvalid() ? 0 : 1;
141   InstructionOperand inputs[] = {a, b, c, d, e};
142   size_t input_count = arraysize(inputs);
143   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
144               temps);
145 }
146
147
148 Instruction* InstructionSelector::Emit(
149     InstructionCode opcode, InstructionOperand output, InstructionOperand a,
150     InstructionOperand b, InstructionOperand c, InstructionOperand d,
151     InstructionOperand e, InstructionOperand f, size_t temp_count,
152     InstructionOperand* temps) {
153   size_t output_count = output.IsInvalid() ? 0 : 1;
154   InstructionOperand inputs[] = {a, b, c, d, e, f};
155   size_t input_count = arraysize(inputs);
156   return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
157               temps);
158 }
159
160
161 Instruction* InstructionSelector::Emit(
162     InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
163     size_t input_count, InstructionOperand* inputs, size_t temp_count,
164     InstructionOperand* temps) {
165   Instruction* instr =
166       Instruction::New(instruction_zone(), opcode, output_count, outputs,
167                        input_count, inputs, temp_count, temps);
168   return Emit(instr);
169 }
170
171
172 Instruction* InstructionSelector::Emit(Instruction* instr) {
173   instructions_.push_back(instr);
174   return instr;
175 }
176
177
178 bool InstructionSelector::CanCover(Node* user, Node* node) const {
179   return node->OwnedBy(user) &&
180          schedule()->block(node) == schedule()->block(user);
181 }
182
183
184 int InstructionSelector::GetVirtualRegister(const Node* node) {
185   DCHECK_NOT_NULL(node);
186   size_t const id = node->id();
187   DCHECK_LT(id, virtual_registers_.size());
188   int virtual_register = virtual_registers_[id];
189   if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
190     virtual_register = sequence()->NextVirtualRegister();
191     virtual_registers_[id] = virtual_register;
192   }
193   return virtual_register;
194 }
195
196
197 const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
198     const {
199   std::map<NodeId, int> virtual_registers;
200   for (size_t n = 0; n < virtual_registers_.size(); ++n) {
201     if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
202       NodeId const id = static_cast<NodeId>(n);
203       virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
204     }
205   }
206   return virtual_registers;
207 }
208
209
210 bool InstructionSelector::IsDefined(Node* node) const {
211   DCHECK_NOT_NULL(node);
212   size_t const id = node->id();
213   DCHECK_LT(id, defined_.size());
214   return defined_[id];
215 }
216
217
218 void InstructionSelector::MarkAsDefined(Node* node) {
219   DCHECK_NOT_NULL(node);
220   size_t const id = node->id();
221   DCHECK_LT(id, defined_.size());
222   defined_[id] = true;
223 }
224
225
226 bool InstructionSelector::IsUsed(Node* node) const {
227   DCHECK_NOT_NULL(node);
228   if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
229   size_t const id = node->id();
230   DCHECK_LT(id, used_.size());
231   return used_[id];
232 }
233
234
235 void InstructionSelector::MarkAsUsed(Node* node) {
236   DCHECK_NOT_NULL(node);
237   size_t const id = node->id();
238   DCHECK_LT(id, used_.size());
239   used_[id] = true;
240 }
241
242
243 bool InstructionSelector::IsDouble(const Node* node) const {
244   DCHECK_NOT_NULL(node);
245   int const virtual_register = virtual_registers_[node->id()];
246   if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
247     return false;
248   }
249   return sequence()->IsDouble(virtual_register);
250 }
251
252
253 void InstructionSelector::MarkAsDouble(Node* node) {
254   DCHECK_NOT_NULL(node);
255   DCHECK(!IsReference(node));
256   sequence()->MarkAsDouble(GetVirtualRegister(node));
257 }
258
259
260 bool InstructionSelector::IsReference(const Node* node) const {
261   DCHECK_NOT_NULL(node);
262   int const virtual_register = virtual_registers_[node->id()];
263   if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
264     return false;
265   }
266   return sequence()->IsReference(virtual_register);
267 }
268
269
270 void InstructionSelector::MarkAsReference(Node* node) {
271   DCHECK_NOT_NULL(node);
272   DCHECK(!IsDouble(node));
273   sequence()->MarkAsReference(GetVirtualRegister(node));
274 }
275
276
277 void InstructionSelector::MarkAsRepresentation(MachineType rep,
278                                                const InstructionOperand& op) {
279   UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
280   switch (RepresentationOf(rep)) {
281     case kRepFloat32:
282     case kRepFloat64:
283       sequence()->MarkAsDouble(unalloc.virtual_register());
284       break;
285     case kRepTagged:
286       sequence()->MarkAsReference(unalloc.virtual_register());
287       break;
288     default:
289       break;
290   }
291 }
292
293
294 void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
295   DCHECK_NOT_NULL(node);
296   switch (RepresentationOf(rep)) {
297     case kRepFloat32:
298     case kRepFloat64:
299       MarkAsDouble(node);
300       break;
301     case kRepTagged:
302       MarkAsReference(node);
303       break;
304     default:
305       break;
306   }
307 }
308
309
310 // TODO(bmeurer): Get rid of the CallBuffer business and make
311 // InstructionSelector::VisitCall platform independent instead.
312 CallBuffer::CallBuffer(Zone* zone, const CallDescriptor* d,
313                        FrameStateDescriptor* frame_desc)
314     : descriptor(d),
315       frame_state_descriptor(frame_desc),
316       output_nodes(zone),
317       outputs(zone),
318       instruction_args(zone),
319       pushed_nodes(zone) {
320   output_nodes.reserve(d->ReturnCount());
321   outputs.reserve(d->ReturnCount());
322   pushed_nodes.reserve(input_count());
323   instruction_args.reserve(input_count() + frame_state_value_count());
324 }
325
326
327 // TODO(bmeurer): Get rid of the CallBuffer business and make
328 // InstructionSelector::VisitCall platform independent instead.
329 void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
330                                                bool call_code_immediate,
331                                                bool call_address_immediate) {
332   OperandGenerator g(this);
333   DCHECK_EQ(call->op()->ValueOutputCount(),
334             static_cast<int>(buffer->descriptor->ReturnCount()));
335   DCHECK_EQ(
336       call->op()->ValueInputCount(),
337       static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
338
339   if (buffer->descriptor->ReturnCount() > 0) {
340     // Collect the projections that represent multiple outputs from this call.
341     if (buffer->descriptor->ReturnCount() == 1) {
342       buffer->output_nodes.push_back(call);
343     } else {
344       buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
345       for (auto use : call->uses()) {
346         if (use->opcode() != IrOpcode::kProjection) continue;
347         size_t const index = ProjectionIndexOf(use->op());
348         DCHECK_LT(index, buffer->output_nodes.size());
349         DCHECK(!buffer->output_nodes[index]);
350         buffer->output_nodes[index] = use;
351       }
352     }
353
354     // Filter out the outputs that aren't live because no projection uses them.
355     size_t outputs_needed_by_framestate =
356         buffer->frame_state_descriptor == NULL
357             ? 0
358             : buffer->frame_state_descriptor->state_combine()
359                   .ConsumedOutputCount();
360     for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
361       bool output_is_live =
362           buffer->output_nodes[i] != NULL || i < outputs_needed_by_framestate;
363       if (output_is_live) {
364         MachineType type =
365             buffer->descriptor->GetReturnType(static_cast<int>(i));
366         LinkageLocation location =
367             buffer->descriptor->GetReturnLocation(static_cast<int>(i));
368
369         Node* output = buffer->output_nodes[i];
370         InstructionOperand op =
371             output == NULL ? g.TempLocation(location, type)
372                            : g.DefineAsLocation(output, location, type);
373         MarkAsRepresentation(type, op);
374
375         buffer->outputs.push_back(op);
376       }
377     }
378   }
379
380   // The first argument is always the callee code.
381   Node* callee = call->InputAt(0);
382   switch (buffer->descriptor->kind()) {
383     case CallDescriptor::kCallCodeObject:
384       buffer->instruction_args.push_back(
385           (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
386               ? g.UseImmediate(callee)
387               : g.UseRegister(callee));
388       break;
389     case CallDescriptor::kCallAddress:
390       buffer->instruction_args.push_back(
391           (call_address_immediate &&
392            (callee->opcode() == IrOpcode::kInt32Constant ||
393             callee->opcode() == IrOpcode::kInt64Constant))
394               ? g.UseImmediate(callee)
395               : g.UseRegister(callee));
396       break;
397     case CallDescriptor::kCallJSFunction:
398       buffer->instruction_args.push_back(
399           g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
400                         buffer->descriptor->GetInputType(0)));
401       break;
402   }
403   DCHECK_EQ(1u, buffer->instruction_args.size());
404
405   // If the call needs a frame state, we insert the state information as
406   // follows (n is the number of value inputs to the frame state):
407   // arg 1               : deoptimization id.
408   // arg 2 - arg (n + 1) : value inputs to the frame state.
409   if (buffer->frame_state_descriptor != NULL) {
410     InstructionSequence::StateId state_id =
411         sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
412     buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
413
414     Node* frame_state =
415         call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
416     AddFrameStateInputs(frame_state, &buffer->instruction_args,
417                         buffer->frame_state_descriptor);
418   }
419   DCHECK(1 + buffer->frame_state_value_count() ==
420          buffer->instruction_args.size());
421
422   size_t input_count = static_cast<size_t>(buffer->input_count());
423
424   // Split the arguments into pushed_nodes and instruction_args. Pushed
425   // arguments require an explicit push instruction before the call and do
426   // not appear as arguments to the call. Everything else ends up
427   // as an InstructionOperand argument to the call.
428   auto iter(call->inputs().begin());
429   int pushed_count = 0;
430   for (size_t index = 0; index < input_count; ++iter, ++index) {
431     DCHECK(iter != call->inputs().end());
432     DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
433     if (index == 0) continue;  // The first argument (callee) is already done.
434     InstructionOperand op =
435         g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
436                       buffer->descriptor->GetInputType(index));
437     if (UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
438       int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
439       if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
440         buffer->pushed_nodes.resize(stack_index + 1, NULL);
441       }
442       DCHECK(!buffer->pushed_nodes[stack_index]);
443       buffer->pushed_nodes[stack_index] = *iter;
444       pushed_count++;
445     } else {
446       buffer->instruction_args.push_back(op);
447     }
448   }
449   CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
450   DCHECK(static_cast<size_t>(input_count) ==
451          (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
452           buffer->frame_state_value_count()));
453 }
454
455
456 void InstructionSelector::VisitBlock(BasicBlock* block) {
457   DCHECK(!current_block_);
458   current_block_ = block;
459   int current_block_end = static_cast<int>(instructions_.size());
460
461   // Generate code for the block control "top down", but schedule the code
462   // "bottom up".
463   VisitControl(block);
464   std::reverse(instructions_.begin() + current_block_end, instructions_.end());
465
466   // Visit code in reverse control flow order, because architecture-specific
467   // matching may cover more than one node at a time.
468   for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
469        ++i) {
470     Node* node = *i;
471     // Skip nodes that are unused or already defined.
472     if (!IsUsed(node) || IsDefined(node)) continue;
473     // Generate code for this node "top down", but schedule the code "bottom
474     // up".
475     size_t current_node_end = instructions_.size();
476     VisitNode(node);
477     std::reverse(instructions_.begin() + current_node_end, instructions_.end());
478   }
479
480   // We're done with the block.
481   InstructionBlock* instruction_block =
482       sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
483   instruction_block->set_code_start(static_cast<int>(instructions_.size()));
484   instruction_block->set_code_end(current_block_end);
485
486   current_block_ = NULL;
487 }
488
489
490 void InstructionSelector::VisitControl(BasicBlock* block) {
491 #ifdef DEBUG
492   // SSA deconstruction requires targets of branches not to have phis.
493   // Edge split form guarantees this property, but is more strict.
494   if (block->SuccessorCount() > 1) {
495     for (BasicBlock* const successor : block->successors()) {
496       for (Node* const node : *successor) {
497         CHECK(!IrOpcode::IsPhiOpcode(node->opcode()));
498       }
499     }
500   }
501 #endif
502
503   Node* input = block->control_input();
504   switch (block->control()) {
505     case BasicBlock::kGoto:
506       return VisitGoto(block->SuccessorAt(0));
507     case BasicBlock::kCall: {
508       DCHECK_EQ(IrOpcode::kCall, input->opcode());
509       BasicBlock* success = block->SuccessorAt(0);
510       BasicBlock* exception = block->SuccessorAt(1);
511       return VisitCall(input, exception), VisitGoto(success);
512     }
513     case BasicBlock::kBranch: {
514       DCHECK_EQ(IrOpcode::kBranch, input->opcode());
515       BasicBlock* tbranch = block->SuccessorAt(0);
516       BasicBlock* fbranch = block->SuccessorAt(1);
517       if (tbranch == fbranch) return VisitGoto(tbranch);
518       // Treat special Branch(Always, IfTrue, IfFalse) as Goto(IfTrue).
519       Node* const condition = input->InputAt(0);
520       if (condition->opcode() == IrOpcode::kAlways) return VisitGoto(tbranch);
521       return VisitBranch(input, tbranch, fbranch);
522     }
523     case BasicBlock::kSwitch: {
524       DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
525       SwitchInfo sw;
526       // Last successor must be Default.
527       sw.default_branch = block->successors().back();
528       DCHECK_EQ(IrOpcode::kIfDefault, sw.default_branch->front()->opcode());
529       // All other successors must be cases.
530       sw.case_count = block->SuccessorCount() - 1;
531       DCHECK_LE(1u, sw.case_count);
532       sw.case_branches = &block->successors().front();
533       // Determine case values and their min/max.
534       sw.case_values = zone()->NewArray<int32_t>(sw.case_count);
535       sw.min_value = std::numeric_limits<int32_t>::max();
536       sw.max_value = std::numeric_limits<int32_t>::min();
537       for (size_t index = 0; index < sw.case_count; ++index) {
538         BasicBlock* branch = sw.case_branches[index];
539         int32_t value = OpParameter<int32_t>(branch->front()->op());
540         sw.case_values[index] = value;
541         if (sw.min_value > value) sw.min_value = value;
542         if (sw.max_value < value) sw.max_value = value;
543       }
544       DCHECK_LE(sw.min_value, sw.max_value);
545       // Note that {value_range} can be 0 if {min_value} is -2^31 and
546       // {max_value}
547       // is 2^31-1, so don't assume that it's non-zero below.
548       sw.value_range = 1u + bit_cast<uint32_t>(sw.max_value) -
549                        bit_cast<uint32_t>(sw.min_value);
550       return VisitSwitch(input, sw);
551     }
552     case BasicBlock::kReturn: {
553       // If the result itself is a return, return its input.
554       Node* value = (input != nullptr && input->opcode() == IrOpcode::kReturn)
555                         ? input->InputAt(0)
556                         : input;
557       return VisitReturn(value);
558     }
559     case BasicBlock::kDeoptimize: {
560       // If the result itself is a return, return its input.
561       Node* value =
562           (input != nullptr && input->opcode() == IrOpcode::kDeoptimize)
563               ? input->InputAt(0)
564               : input;
565       return VisitDeoptimize(value);
566     }
567     case BasicBlock::kThrow:
568       DCHECK_EQ(IrOpcode::kThrow, input->opcode());
569       return VisitThrow(input->InputAt(0));
570     case BasicBlock::kNone: {
571       // TODO(titzer): exit block doesn't have control.
572       DCHECK_NULL(input);
573       break;
574     }
575     default:
576       UNREACHABLE();
577       break;
578   }
579 }
580
581
582 void InstructionSelector::VisitNode(Node* node) {
583   DCHECK_NOT_NULL(schedule()->block(node));  // should only use scheduled nodes.
584   SourcePosition source_position = source_positions_->GetSourcePosition(node);
585   if (!source_position.IsUnknown()) {
586     DCHECK(!source_position.IsInvalid());
587     if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
588       Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
589     }
590   }
591   switch (node->opcode()) {
592     case IrOpcode::kStart:
593     case IrOpcode::kLoop:
594     case IrOpcode::kEnd:
595     case IrOpcode::kBranch:
596     case IrOpcode::kIfTrue:
597     case IrOpcode::kIfFalse:
598     case IrOpcode::kIfSuccess:
599     case IrOpcode::kIfException:
600     case IrOpcode::kSwitch:
601     case IrOpcode::kIfValue:
602     case IrOpcode::kIfDefault:
603     case IrOpcode::kEffectPhi:
604     case IrOpcode::kMerge:
605       // No code needed for these graph artifacts.
606       return;
607     case IrOpcode::kFinish:
608       return MarkAsReference(node), VisitFinish(node);
609     case IrOpcode::kParameter: {
610       MachineType type = linkage()->GetParameterType(OpParameter<int>(node));
611       MarkAsRepresentation(type, node);
612       return VisitParameter(node);
613     }
614     case IrOpcode::kOsrValue:
615       return MarkAsReference(node), VisitOsrValue(node);
616     case IrOpcode::kPhi: {
617       MachineType type = OpParameter<MachineType>(node);
618       MarkAsRepresentation(type, node);
619       return VisitPhi(node);
620     }
621     case IrOpcode::kProjection:
622       return VisitProjection(node);
623     case IrOpcode::kInt32Constant:
624     case IrOpcode::kInt64Constant:
625     case IrOpcode::kExternalConstant:
626       return VisitConstant(node);
627     case IrOpcode::kFloat32Constant:
628       return MarkAsDouble(node), VisitConstant(node);
629     case IrOpcode::kFloat64Constant:
630       return MarkAsDouble(node), VisitConstant(node);
631     case IrOpcode::kHeapConstant:
632       return MarkAsReference(node), VisitConstant(node);
633     case IrOpcode::kNumberConstant: {
634       double value = OpParameter<double>(node);
635       if (!IsSmiDouble(value)) MarkAsReference(node);
636       return VisitConstant(node);
637     }
638     case IrOpcode::kCall:
639       return VisitCall(node, nullptr);
640     case IrOpcode::kFrameState:
641     case IrOpcode::kStateValues:
642       return;
643     case IrOpcode::kLoad: {
644       LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
645       MarkAsRepresentation(rep, node);
646       return VisitLoad(node);
647     }
648     case IrOpcode::kStore:
649       return VisitStore(node);
650     case IrOpcode::kWord32And:
651       return VisitWord32And(node);
652     case IrOpcode::kWord32Or:
653       return VisitWord32Or(node);
654     case IrOpcode::kWord32Xor:
655       return VisitWord32Xor(node);
656     case IrOpcode::kWord32Shl:
657       return VisitWord32Shl(node);
658     case IrOpcode::kWord32Shr:
659       return VisitWord32Shr(node);
660     case IrOpcode::kWord32Sar:
661       return VisitWord32Sar(node);
662     case IrOpcode::kWord32Ror:
663       return VisitWord32Ror(node);
664     case IrOpcode::kWord32Equal:
665       return VisitWord32Equal(node);
666     case IrOpcode::kWord32Clz:
667       return VisitWord32Clz(node);
668     case IrOpcode::kWord64And:
669       return VisitWord64And(node);
670     case IrOpcode::kWord64Or:
671       return VisitWord64Or(node);
672     case IrOpcode::kWord64Xor:
673       return VisitWord64Xor(node);
674     case IrOpcode::kWord64Shl:
675       return VisitWord64Shl(node);
676     case IrOpcode::kWord64Shr:
677       return VisitWord64Shr(node);
678     case IrOpcode::kWord64Sar:
679       return VisitWord64Sar(node);
680     case IrOpcode::kWord64Ror:
681       return VisitWord64Ror(node);
682     case IrOpcode::kWord64Equal:
683       return VisitWord64Equal(node);
684     case IrOpcode::kInt32Add:
685       return VisitInt32Add(node);
686     case IrOpcode::kInt32AddWithOverflow:
687       return VisitInt32AddWithOverflow(node);
688     case IrOpcode::kInt32Sub:
689       return VisitInt32Sub(node);
690     case IrOpcode::kInt32SubWithOverflow:
691       return VisitInt32SubWithOverflow(node);
692     case IrOpcode::kInt32Mul:
693       return VisitInt32Mul(node);
694     case IrOpcode::kInt32MulHigh:
695       return VisitInt32MulHigh(node);
696     case IrOpcode::kInt32Div:
697       return VisitInt32Div(node);
698     case IrOpcode::kInt32Mod:
699       return VisitInt32Mod(node);
700     case IrOpcode::kInt32LessThan:
701       return VisitInt32LessThan(node);
702     case IrOpcode::kInt32LessThanOrEqual:
703       return VisitInt32LessThanOrEqual(node);
704     case IrOpcode::kUint32Div:
705       return VisitUint32Div(node);
706     case IrOpcode::kUint32LessThan:
707       return VisitUint32LessThan(node);
708     case IrOpcode::kUint32LessThanOrEqual:
709       return VisitUint32LessThanOrEqual(node);
710     case IrOpcode::kUint32Mod:
711       return VisitUint32Mod(node);
712     case IrOpcode::kUint32MulHigh:
713       return VisitUint32MulHigh(node);
714     case IrOpcode::kInt64Add:
715       return VisitInt64Add(node);
716     case IrOpcode::kInt64Sub:
717       return VisitInt64Sub(node);
718     case IrOpcode::kInt64Mul:
719       return VisitInt64Mul(node);
720     case IrOpcode::kInt64Div:
721       return VisitInt64Div(node);
722     case IrOpcode::kInt64Mod:
723       return VisitInt64Mod(node);
724     case IrOpcode::kInt64LessThan:
725       return VisitInt64LessThan(node);
726     case IrOpcode::kInt64LessThanOrEqual:
727       return VisitInt64LessThanOrEqual(node);
728     case IrOpcode::kUint64Div:
729       return VisitUint64Div(node);
730     case IrOpcode::kUint64LessThan:
731       return VisitUint64LessThan(node);
732     case IrOpcode::kUint64Mod:
733       return VisitUint64Mod(node);
734     case IrOpcode::kChangeFloat32ToFloat64:
735       return MarkAsDouble(node), VisitChangeFloat32ToFloat64(node);
736     case IrOpcode::kChangeInt32ToFloat64:
737       return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
738     case IrOpcode::kChangeUint32ToFloat64:
739       return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
740     case IrOpcode::kChangeFloat64ToInt32:
741       return VisitChangeFloat64ToInt32(node);
742     case IrOpcode::kChangeFloat64ToUint32:
743       return VisitChangeFloat64ToUint32(node);
744     case IrOpcode::kChangeInt32ToInt64:
745       return VisitChangeInt32ToInt64(node);
746     case IrOpcode::kChangeUint32ToUint64:
747       return VisitChangeUint32ToUint64(node);
748     case IrOpcode::kTruncateFloat64ToFloat32:
749       return MarkAsDouble(node), VisitTruncateFloat64ToFloat32(node);
750     case IrOpcode::kTruncateFloat64ToInt32:
751       return VisitTruncateFloat64ToInt32(node);
752     case IrOpcode::kTruncateInt64ToInt32:
753       return VisitTruncateInt64ToInt32(node);
754     case IrOpcode::kFloat64Add:
755       return MarkAsDouble(node), VisitFloat64Add(node);
756     case IrOpcode::kFloat64Sub:
757       return MarkAsDouble(node), VisitFloat64Sub(node);
758     case IrOpcode::kFloat64Mul:
759       return MarkAsDouble(node), VisitFloat64Mul(node);
760     case IrOpcode::kFloat64Div:
761       return MarkAsDouble(node), VisitFloat64Div(node);
762     case IrOpcode::kFloat64Mod:
763       return MarkAsDouble(node), VisitFloat64Mod(node);
764     case IrOpcode::kFloat64Min:
765       return MarkAsDouble(node), VisitFloat64Min(node);
766     case IrOpcode::kFloat64Max:
767       return MarkAsDouble(node), VisitFloat64Max(node);
768     case IrOpcode::kFloat64Sqrt:
769       return MarkAsDouble(node), VisitFloat64Sqrt(node);
770     case IrOpcode::kFloat64Equal:
771       return VisitFloat64Equal(node);
772     case IrOpcode::kFloat64LessThan:
773       return VisitFloat64LessThan(node);
774     case IrOpcode::kFloat64LessThanOrEqual:
775       return VisitFloat64LessThanOrEqual(node);
776     case IrOpcode::kFloat64RoundDown:
777       return MarkAsDouble(node), VisitFloat64RoundDown(node);
778     case IrOpcode::kFloat64RoundTruncate:
779       return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
780     case IrOpcode::kFloat64RoundTiesAway:
781       return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
782     case IrOpcode::kFloat64ExtractLowWord32:
783       return VisitFloat64ExtractLowWord32(node);
784     case IrOpcode::kFloat64ExtractHighWord32:
785       return VisitFloat64ExtractHighWord32(node);
786     case IrOpcode::kFloat64InsertLowWord32:
787       return MarkAsDouble(node), VisitFloat64InsertLowWord32(node);
788     case IrOpcode::kFloat64InsertHighWord32:
789       return MarkAsDouble(node), VisitFloat64InsertHighWord32(node);
790     case IrOpcode::kLoadStackPointer:
791       return VisitLoadStackPointer(node);
792     case IrOpcode::kCheckedLoad: {
793       MachineType rep = OpParameter<MachineType>(node);
794       MarkAsRepresentation(rep, node);
795       return VisitCheckedLoad(node);
796     }
797     case IrOpcode::kCheckedStore:
798       return VisitCheckedStore(node);
799     default:
800       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
801                node->opcode(), node->op()->mnemonic(), node->id());
802       break;
803   }
804 }
805
806
807 #if V8_TURBOFAN_BACKEND
808
809 void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
810   OperandGenerator g(this);
811   Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
812        g.UseRegister(node->InputAt(0)));
813 }
814
815
816 void InstructionSelector::VisitLoadStackPointer(Node* node) {
817   OperandGenerator g(this);
818   Emit(kArchStackPointer, g.DefineAsRegister(node));
819 }
820
821
822 void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
823                                           InstructionOperand& index_operand) {
824   OperandGenerator g(this);
825   size_t input_count = 2 + sw.value_range;
826   auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
827   inputs[0] = index_operand;
828   InstructionOperand default_operand = g.Label(sw.default_branch);
829   std::fill(&inputs[1], &inputs[input_count], default_operand);
830   for (size_t index = 0; index < sw.case_count; ++index) {
831     size_t value = sw.case_values[index] - sw.min_value;
832     BasicBlock* branch = sw.case_branches[index];
833     DCHECK_LE(0u, value);
834     DCHECK_LT(value + 2, input_count);
835     inputs[value + 2] = g.Label(branch);
836   }
837   Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
838 }
839
840
841 void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
842                                            InstructionOperand& value_operand) {
843   OperandGenerator g(this);
844   size_t input_count = 2 + sw.case_count * 2;
845   auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
846   inputs[0] = value_operand;
847   inputs[1] = g.Label(sw.default_branch);
848   for (size_t index = 0; index < sw.case_count; ++index) {
849     int32_t value = sw.case_values[index];
850     BasicBlock* branch = sw.case_branches[index];
851     inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
852     inputs[index * 2 + 2 + 1] = g.Label(branch);
853   }
854   Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
855 }
856
857
858 #endif  // V8_TURBOFAN_BACKEND
859
860 // 32 bit targets do not implement the following instructions.
861 #if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
862
863 void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
864
865
866 void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
867
868
869 void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
870
871
872 void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
873
874
875 void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
876
877
878 void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
879
880
881 void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
882
883
884 void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
885
886
887 void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
888
889
890 void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
891
892
893 void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
894
895
896 void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
897
898
899 void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED(); }
900
901
902 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
903   UNIMPLEMENTED();
904 }
905
906
907 void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); }
908
909
910 void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
911
912
913 void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
914
915
916 void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
917
918
919 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
920   UNIMPLEMENTED();
921 }
922
923
924 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
925   UNIMPLEMENTED();
926 }
927
928
929 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
930   UNIMPLEMENTED();
931 }
932
933 #endif  // V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
934
935
936 void InstructionSelector::VisitFinish(Node* node) {
937   OperandGenerator g(this);
938   Node* value = node->InputAt(0);
939   Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
940 }
941
942
943 void InstructionSelector::VisitParameter(Node* node) {
944   OperandGenerator g(this);
945   int index = OpParameter<int>(node);
946   Emit(kArchNop,
947        g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
948                           linkage()->GetParameterType(index)));
949 }
950
951
952 void InstructionSelector::VisitOsrValue(Node* node) {
953   OperandGenerator g(this);
954   int index = OpParameter<int>(node);
955   Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index),
956                                     kMachAnyTagged));
957 }
958
959
960 void InstructionSelector::VisitPhi(Node* node) {
961   const int input_count = node->op()->ValueInputCount();
962   PhiInstruction* phi = new (instruction_zone())
963       PhiInstruction(instruction_zone(), GetVirtualRegister(node),
964                      static_cast<size_t>(input_count));
965   sequence()
966       ->InstructionBlockAt(RpoNumber::FromInt(current_block_->rpo_number()))
967       ->AddPhi(phi);
968   for (int i = 0; i < input_count; ++i) {
969     Node* const input = node->InputAt(i);
970     MarkAsUsed(input);
971     phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input));
972   }
973 }
974
975
976 void InstructionSelector::VisitProjection(Node* node) {
977   OperandGenerator g(this);
978   Node* value = node->InputAt(0);
979   switch (value->opcode()) {
980     case IrOpcode::kInt32AddWithOverflow:
981     case IrOpcode::kInt32SubWithOverflow:
982       if (ProjectionIndexOf(node->op()) == 0u) {
983         Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
984       } else {
985         DCHECK(ProjectionIndexOf(node->op()) == 1u);
986         MarkAsUsed(value);
987       }
988       break;
989     default:
990       break;
991   }
992 }
993
994
995 void InstructionSelector::VisitConstant(Node* node) {
996   // We must emit a NOP here because every live range needs a defining
997   // instruction in the register allocator.
998   OperandGenerator g(this);
999   Emit(kArchNop, g.DefineAsConstant(node));
1000 }
1001
1002
1003 void InstructionSelector::VisitGoto(BasicBlock* target) {
1004   // jump to the next block.
1005   OperandGenerator g(this);
1006   Emit(kArchJmp, g.NoOutput(), g.Label(target));
1007 }
1008
1009
1010 void InstructionSelector::VisitReturn(Node* value) {
1011   OperandGenerator g(this);
1012   if (value != NULL) {
1013     Emit(kArchRet, g.NoOutput(),
1014          g.UseLocation(value, linkage()->GetReturnLocation(),
1015                        linkage()->GetReturnType()));
1016   } else {
1017     Emit(kArchRet, g.NoOutput());
1018   }
1019 }
1020
1021
1022 void InstructionSelector::VisitDeoptimize(Node* value) {
1023   DCHECK(FLAG_turbo_deoptimization);
1024
1025   OperandGenerator g(this);
1026
1027   FrameStateDescriptor* desc = GetFrameStateDescriptor(value);
1028   size_t arg_count = desc->GetTotalSize() + 1;  // Include deopt id.
1029
1030   InstructionOperandVector args(instruction_zone());
1031   args.reserve(arg_count);
1032
1033   InstructionSequence::StateId state_id =
1034       sequence()->AddFrameStateDescriptor(desc);
1035   args.push_back(g.TempImmediate(state_id.ToInt()));
1036
1037   AddFrameStateInputs(value, &args, desc);
1038
1039   DCHECK_EQ(args.size(), arg_count);
1040
1041   Emit(kArchDeoptimize, 0, nullptr, arg_count, &args.front(), 0, nullptr);
1042 }
1043
1044
1045 void InstructionSelector::VisitThrow(Node* value) {
1046   OperandGenerator g(this);
1047   Emit(kArchNop, g.NoOutput());  // TODO(titzer)
1048 }
1049
1050
1051 FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
1052     Node* state) {
1053   DCHECK(state->opcode() == IrOpcode::kFrameState);
1054   DCHECK_EQ(5, state->InputCount());
1055   DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(0)->opcode());
1056   DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(1)->opcode());
1057   DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(2)->opcode());
1058   FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
1059
1060   int parameters =
1061       static_cast<int>(StateValuesAccess(state->InputAt(0)).size());
1062   int locals = static_cast<int>(StateValuesAccess(state->InputAt(1)).size());
1063   int stack = static_cast<int>(StateValuesAccess(state->InputAt(2)).size());
1064
1065   FrameStateDescriptor* outer_state = NULL;
1066   Node* outer_node = state->InputAt(4);
1067   if (outer_node->opcode() == IrOpcode::kFrameState) {
1068     outer_state = GetFrameStateDescriptor(outer_node);
1069   }
1070
1071   return new (instruction_zone()) FrameStateDescriptor(
1072       instruction_zone(), state_info, parameters, locals, stack, outer_state);
1073 }
1074
1075
1076 static InstructionOperand SlotOrImmediate(OperandGenerator* g, Node* input) {
1077   switch (input->opcode()) {
1078     case IrOpcode::kInt32Constant:
1079     case IrOpcode::kNumberConstant:
1080     case IrOpcode::kFloat64Constant:
1081     case IrOpcode::kHeapConstant:
1082       return g->UseImmediate(input);
1083     default:
1084       return g->UseUniqueSlot(input);
1085   }
1086 }
1087
1088
1089 void InstructionSelector::AddFrameStateInputs(
1090     Node* state, InstructionOperandVector* inputs,
1091     FrameStateDescriptor* descriptor) {
1092   DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
1093
1094   if (descriptor->outer_state() != NULL) {
1095     AddFrameStateInputs(state->InputAt(4), inputs, descriptor->outer_state());
1096   }
1097
1098   Node* parameters = state->InputAt(0);
1099   Node* locals = state->InputAt(1);
1100   Node* stack = state->InputAt(2);
1101   Node* context = state->InputAt(3);
1102
1103   DCHECK_EQ(IrOpcode::kTypedStateValues, parameters->op()->opcode());
1104   DCHECK_EQ(IrOpcode::kTypedStateValues, locals->op()->opcode());
1105   DCHECK_EQ(IrOpcode::kTypedStateValues, stack->op()->opcode());
1106
1107   DCHECK_EQ(descriptor->parameters_count(),
1108             StateValuesAccess(parameters).size());
1109   DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
1110   DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
1111
1112   ZoneVector<MachineType> types(instruction_zone());
1113   types.reserve(descriptor->GetSize());
1114
1115   OperandGenerator g(this);
1116   size_t value_index = 0;
1117   for (StateValuesAccess::TypedNode input_node :
1118        StateValuesAccess(parameters)) {
1119     inputs->push_back(SlotOrImmediate(&g, input_node.node));
1120     descriptor->SetType(value_index++, input_node.type);
1121   }
1122   if (descriptor->HasContext()) {
1123     inputs->push_back(SlotOrImmediate(&g, context));
1124     descriptor->SetType(value_index++, kMachAnyTagged);
1125   }
1126   for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
1127     inputs->push_back(SlotOrImmediate(&g, input_node.node));
1128     descriptor->SetType(value_index++, input_node.type);
1129   }
1130   for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
1131     inputs->push_back(SlotOrImmediate(&g, input_node.node));
1132     descriptor->SetType(value_index++, input_node.type);
1133   }
1134   DCHECK(value_index == descriptor->GetSize());
1135 }
1136
1137
1138 #if !V8_TURBOFAN_BACKEND
1139
1140 #define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
1141   void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
1142 MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
1143 #undef DECLARE_UNIMPLEMENTED_SELECTOR
1144
1145
1146 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
1147   UNIMPLEMENTED();
1148 }
1149
1150
1151 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1152                                       BasicBlock* fbranch) {
1153   UNIMPLEMENTED();
1154 }
1155
1156
1157 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1158   UNIMPLEMENTED();
1159 }
1160
1161
1162 // static
1163 MachineOperatorBuilder::Flags
1164 InstructionSelector::SupportedMachineOperatorFlags() {
1165   return MachineOperatorBuilder::Flag::kNoFlags;
1166 }
1167
1168 #endif  // !V8_TURBOFAN_BACKEND
1169
1170 }  // namespace compiler
1171 }  // namespace internal
1172 }  // namespace v8