1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/instruction-selector.h"
9 #include "src/compiler/instruction-selector-impl.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/node-properties.h"
12 #include "src/compiler/pipeline.h"
18 InstructionSelector::InstructionSelector(Zone* zone, size_t node_count,
20 InstructionSequence* sequence,
22 SourcePositionTable* source_positions,
27 source_positions_(source_positions),
32 defined_(node_count, false, zone),
33 used_(node_count, false, zone),
34 virtual_registers_(node_count,
35 InstructionOperand::kInvalidVirtualRegister, zone) {
36 instructions_.reserve(node_count);
40 void InstructionSelector::SelectInstructions() {
41 // Mark the inputs of all phis in loop headers as used.
42 BasicBlockVector* blocks = schedule()->rpo_order();
43 for (auto const block : *blocks) {
44 if (!block->IsLoopHeader()) continue;
45 DCHECK_LE(2u, block->PredecessorCount());
46 for (Node* const phi : *block) {
47 if (phi->opcode() != IrOpcode::kPhi) continue;
49 // Mark all inputs as used.
50 for (Node* const input : phi->inputs()) {
56 // Visit each basic block in post order.
57 for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
61 // Schedule the selected instructions.
62 for (auto const block : *blocks) {
63 InstructionBlock* instruction_block =
64 sequence()->InstructionBlockAt(block->GetRpoNumber());
65 size_t end = instruction_block->code_end();
66 size_t start = instruction_block->code_start();
67 DCHECK_LE(end, start);
68 sequence()->StartBlock(block->GetRpoNumber());
69 while (start-- > end) {
70 sequence()->AddInstruction(instructions_[start]);
72 sequence()->EndBlock(block->GetRpoNumber());
77 Instruction* InstructionSelector::Emit(InstructionCode opcode,
78 InstructionOperand output,
80 InstructionOperand* temps) {
81 size_t output_count = output.IsInvalid() ? 0 : 1;
82 return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
86 Instruction* InstructionSelector::Emit(InstructionCode opcode,
87 InstructionOperand output,
88 InstructionOperand a, size_t temp_count,
89 InstructionOperand* temps) {
90 size_t output_count = output.IsInvalid() ? 0 : 1;
91 return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
95 Instruction* InstructionSelector::Emit(InstructionCode opcode,
96 InstructionOperand output,
98 InstructionOperand b, size_t temp_count,
99 InstructionOperand* temps) {
100 size_t output_count = output.IsInvalid() ? 0 : 1;
101 InstructionOperand inputs[] = {a, b};
102 size_t input_count = arraysize(inputs);
103 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
108 Instruction* InstructionSelector::Emit(InstructionCode opcode,
109 InstructionOperand output,
110 InstructionOperand a,
111 InstructionOperand b,
112 InstructionOperand c, size_t temp_count,
113 InstructionOperand* temps) {
114 size_t output_count = output.IsInvalid() ? 0 : 1;
115 InstructionOperand inputs[] = {a, b, c};
116 size_t input_count = arraysize(inputs);
117 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
122 Instruction* InstructionSelector::Emit(
123 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
124 InstructionOperand b, InstructionOperand c, InstructionOperand d,
125 size_t temp_count, InstructionOperand* temps) {
126 size_t output_count = output.IsInvalid() ? 0 : 1;
127 InstructionOperand inputs[] = {a, b, c, d};
128 size_t input_count = arraysize(inputs);
129 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
134 Instruction* InstructionSelector::Emit(
135 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
136 InstructionOperand b, InstructionOperand c, InstructionOperand d,
137 InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
138 size_t output_count = output.IsInvalid() ? 0 : 1;
139 InstructionOperand inputs[] = {a, b, c, d, e};
140 size_t input_count = arraysize(inputs);
141 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
146 Instruction* InstructionSelector::Emit(
147 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
148 InstructionOperand b, InstructionOperand c, InstructionOperand d,
149 InstructionOperand e, InstructionOperand f, size_t temp_count,
150 InstructionOperand* temps) {
151 size_t output_count = output.IsInvalid() ? 0 : 1;
152 InstructionOperand inputs[] = {a, b, c, d, e, f};
153 size_t input_count = arraysize(inputs);
154 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
159 Instruction* InstructionSelector::Emit(
160 InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
161 size_t input_count, InstructionOperand* inputs, size_t temp_count,
162 InstructionOperand* temps) {
164 Instruction::New(instruction_zone(), opcode, output_count, outputs,
165 input_count, inputs, temp_count, temps);
170 Instruction* InstructionSelector::Emit(Instruction* instr) {
171 instructions_.push_back(instr);
176 bool InstructionSelector::CanCover(Node* user, Node* node) const {
177 return node->OwnedBy(user) &&
178 schedule()->block(node) == schedule()->block(user);
182 int InstructionSelector::GetVirtualRegister(const Node* node) {
183 DCHECK_NOT_NULL(node);
184 size_t const id = node->id();
185 DCHECK_LT(id, virtual_registers_.size());
186 int virtual_register = virtual_registers_[id];
187 if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
188 virtual_register = sequence()->NextVirtualRegister();
189 virtual_registers_[id] = virtual_register;
191 return virtual_register;
195 const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
197 std::map<NodeId, int> virtual_registers;
198 for (size_t n = 0; n < virtual_registers_.size(); ++n) {
199 if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
200 NodeId const id = static_cast<NodeId>(n);
201 virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
204 return virtual_registers;
208 bool InstructionSelector::IsDefined(Node* node) const {
209 DCHECK_NOT_NULL(node);
210 size_t const id = node->id();
211 DCHECK_LT(id, defined_.size());
216 void InstructionSelector::MarkAsDefined(Node* node) {
217 DCHECK_NOT_NULL(node);
218 size_t const id = node->id();
219 DCHECK_LT(id, defined_.size());
224 bool InstructionSelector::IsUsed(Node* node) const {
225 DCHECK_NOT_NULL(node);
226 if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
227 size_t const id = node->id();
228 DCHECK_LT(id, used_.size());
233 void InstructionSelector::MarkAsUsed(Node* node) {
234 DCHECK_NOT_NULL(node);
235 size_t const id = node->id();
236 DCHECK_LT(id, used_.size());
241 bool InstructionSelector::IsDouble(const Node* node) const {
242 DCHECK_NOT_NULL(node);
243 int const virtual_register = virtual_registers_[node->id()];
244 if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
247 return sequence()->IsDouble(virtual_register);
251 void InstructionSelector::MarkAsDouble(Node* node) {
252 DCHECK_NOT_NULL(node);
253 DCHECK(!IsReference(node));
254 sequence()->MarkAsDouble(GetVirtualRegister(node));
258 bool InstructionSelector::IsReference(const Node* node) const {
259 DCHECK_NOT_NULL(node);
260 int const virtual_register = virtual_registers_[node->id()];
261 if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
264 return sequence()->IsReference(virtual_register);
268 void InstructionSelector::MarkAsReference(Node* node) {
269 DCHECK_NOT_NULL(node);
270 DCHECK(!IsDouble(node));
271 sequence()->MarkAsReference(GetVirtualRegister(node));
275 void InstructionSelector::MarkAsRepresentation(MachineType rep,
276 const InstructionOperand& op) {
277 UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
278 switch (RepresentationOf(rep)) {
281 sequence()->MarkAsDouble(unalloc.virtual_register());
284 sequence()->MarkAsReference(unalloc.virtual_register());
292 void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
293 DCHECK_NOT_NULL(node);
294 switch (RepresentationOf(rep)) {
300 MarkAsReference(node);
308 // TODO(bmeurer): Get rid of the CallBuffer business and make
309 // InstructionSelector::VisitCall platform independent instead.
310 CallBuffer::CallBuffer(Zone* zone, const CallDescriptor* d,
311 FrameStateDescriptor* frame_desc)
313 frame_state_descriptor(frame_desc),
316 instruction_args(zone),
318 output_nodes.reserve(d->ReturnCount());
319 outputs.reserve(d->ReturnCount());
320 pushed_nodes.reserve(input_count());
321 instruction_args.reserve(input_count() + frame_state_value_count());
325 // TODO(bmeurer): Get rid of the CallBuffer business and make
326 // InstructionSelector::VisitCall platform independent instead.
327 void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
328 bool call_code_immediate,
329 bool call_address_immediate) {
330 OperandGenerator g(this);
331 DCHECK_EQ(call->op()->ValueOutputCount(),
332 static_cast<int>(buffer->descriptor->ReturnCount()));
334 call->op()->ValueInputCount(),
335 static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
337 if (buffer->descriptor->ReturnCount() > 0) {
338 // Collect the projections that represent multiple outputs from this call.
339 if (buffer->descriptor->ReturnCount() == 1) {
340 buffer->output_nodes.push_back(call);
342 buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
343 for (auto use : call->uses()) {
344 if (use->opcode() != IrOpcode::kProjection) continue;
345 size_t const index = ProjectionIndexOf(use->op());
346 DCHECK_LT(index, buffer->output_nodes.size());
347 DCHECK(!buffer->output_nodes[index]);
348 buffer->output_nodes[index] = use;
352 // Filter out the outputs that aren't live because no projection uses them.
353 size_t outputs_needed_by_framestate =
354 buffer->frame_state_descriptor == NULL
356 : buffer->frame_state_descriptor->state_combine()
357 .ConsumedOutputCount();
358 for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
359 bool output_is_live =
360 buffer->output_nodes[i] != NULL || i < outputs_needed_by_framestate;
361 if (output_is_live) {
363 buffer->descriptor->GetReturnType(static_cast<int>(i));
364 LinkageLocation location =
365 buffer->descriptor->GetReturnLocation(static_cast<int>(i));
367 Node* output = buffer->output_nodes[i];
368 InstructionOperand op =
369 output == NULL ? g.TempLocation(location, type)
370 : g.DefineAsLocation(output, location, type);
371 MarkAsRepresentation(type, op);
373 buffer->outputs.push_back(op);
378 // The first argument is always the callee code.
379 Node* callee = call->InputAt(0);
380 switch (buffer->descriptor->kind()) {
381 case CallDescriptor::kCallCodeObject:
382 buffer->instruction_args.push_back(
383 (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
384 ? g.UseImmediate(callee)
385 : g.UseRegister(callee));
387 case CallDescriptor::kCallAddress:
388 buffer->instruction_args.push_back(
389 (call_address_immediate &&
390 (callee->opcode() == IrOpcode::kInt32Constant ||
391 callee->opcode() == IrOpcode::kInt64Constant))
392 ? g.UseImmediate(callee)
393 : g.UseRegister(callee));
395 case CallDescriptor::kCallJSFunction:
396 buffer->instruction_args.push_back(
397 g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
398 buffer->descriptor->GetInputType(0)));
401 DCHECK_EQ(1, static_cast<int>(buffer->instruction_args.size()));
403 // If the call needs a frame state, we insert the state information as
404 // follows (n is the number of value inputs to the frame state):
405 // arg 1 : deoptimization id.
406 // arg 2 - arg (n + 1) : value inputs to the frame state.
407 if (buffer->frame_state_descriptor != NULL) {
408 InstructionSequence::StateId state_id =
409 sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
410 buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
413 call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
414 AddFrameStateInputs(frame_state, &buffer->instruction_args,
415 buffer->frame_state_descriptor);
417 DCHECK(1 + buffer->frame_state_value_count() ==
418 buffer->instruction_args.size());
420 size_t input_count = static_cast<size_t>(buffer->input_count());
422 // Split the arguments into pushed_nodes and instruction_args. Pushed
423 // arguments require an explicit push instruction before the call and do
424 // not appear as arguments to the call. Everything else ends up
425 // as an InstructionOperand argument to the call.
426 auto iter(call->inputs().begin());
427 int pushed_count = 0;
428 for (size_t index = 0; index < input_count; ++iter, ++index) {
429 DCHECK(iter != call->inputs().end());
430 DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
431 if (index == 0) continue; // The first argument (callee) is already done.
432 InstructionOperand op =
433 g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
434 buffer->descriptor->GetInputType(index));
435 if (UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
436 int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
437 if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
438 buffer->pushed_nodes.resize(stack_index + 1, NULL);
440 DCHECK(!buffer->pushed_nodes[stack_index]);
441 buffer->pushed_nodes[stack_index] = *iter;
444 buffer->instruction_args.push_back(op);
447 CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
448 DCHECK(static_cast<size_t>(input_count) ==
449 (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
450 buffer->frame_state_value_count()));
454 void InstructionSelector::VisitBlock(BasicBlock* block) {
455 DCHECK(!current_block_);
456 current_block_ = block;
457 int current_block_end = static_cast<int>(instructions_.size());
459 // Generate code for the block control "top down", but schedule the code
462 std::reverse(instructions_.begin() + current_block_end, instructions_.end());
464 // Visit code in reverse control flow order, because architecture-specific
465 // matching may cover more than one node at a time.
466 for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
469 // Skip nodes that are unused or already defined.
470 if (!IsUsed(node) || IsDefined(node)) continue;
471 // Generate code for this node "top down", but schedule the code "bottom
473 size_t current_node_end = instructions_.size();
475 std::reverse(instructions_.begin() + current_node_end, instructions_.end());
478 // We're done with the block.
479 InstructionBlock* instruction_block =
480 sequence()->InstructionBlockAt(block->GetRpoNumber());
481 instruction_block->set_code_start(static_cast<int>(instructions_.size()));
482 instruction_block->set_code_end(current_block_end);
484 current_block_ = NULL;
490 V8_INLINE void CheckNoPhis(const BasicBlock* block) {
492 // Branch targets should not have phis.
493 for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
494 const Node* node = *i;
495 CHECK_NE(IrOpcode::kPhi, node->opcode());
503 void InstructionSelector::VisitControl(BasicBlock* block) {
504 Node* input = block->control_input();
505 switch (block->control()) {
506 case BasicBlock::kGoto:
507 return VisitGoto(block->SuccessorAt(0));
508 case BasicBlock::kBranch: {
509 DCHECK_EQ(IrOpcode::kBranch, input->opcode());
510 BasicBlock* tbranch = block->SuccessorAt(0);
511 BasicBlock* fbranch = block->SuccessorAt(1);
512 // SSA deconstruction requires targets of branches not to have phis.
513 // Edge split form guarantees this property, but is more strict.
514 CheckNoPhis(tbranch);
515 CheckNoPhis(fbranch);
516 if (tbranch == fbranch) return VisitGoto(tbranch);
517 // Treat special Branch(Always, IfTrue, IfFalse) as Goto(IfTrue).
518 Node* const condition = input->InputAt(0);
519 if (condition->opcode() == IrOpcode::kAlways) return VisitGoto(tbranch);
520 return VisitBranch(input, tbranch, fbranch);
522 case BasicBlock::kSwitch: {
523 DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
524 // Last successor must be Default.
525 BasicBlock* default_branch = block->successors().back();
526 DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
527 // SSA deconstruction requires targets of branches not to have phis.
528 // Edge split form guarantees this property, but is more strict.
529 CheckNoPhis(default_branch);
530 // All other successors must be cases.
531 size_t case_count = block->SuccessorCount() - 1;
532 DCHECK_LE(1u, case_count);
533 BasicBlock** case_branches = &block->successors().front();
534 // Determine case values and their min/max.
535 int32_t* case_values = zone()->NewArray<int32_t>(case_count);
536 int32_t min_value = std::numeric_limits<int32_t>::max();
537 int32_t max_value = std::numeric_limits<int32_t>::min();
538 for (size_t index = 0; index < case_count; ++index) {
539 BasicBlock* branch = case_branches[index];
540 int32_t value = OpParameter<int32_t>(branch->front()->op());
541 case_values[index] = value;
542 if (min_value > value) min_value = value;
543 if (max_value < value) max_value = value;
544 // SSA deconstruction requires targets of branches not to have phis.
545 // Edge split form guarantees this property, but is more strict.
548 DCHECK_LE(min_value, max_value);
549 return VisitSwitch(input, default_branch, case_branches, case_values,
550 case_count, min_value, max_value);
552 case BasicBlock::kReturn: {
553 // If the result itself is a return, return its input.
554 Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
557 return VisitReturn(value);
559 case BasicBlock::kThrow:
560 DCHECK_EQ(IrOpcode::kThrow, input->opcode());
561 return VisitThrow(input->InputAt(0));
562 case BasicBlock::kNone: {
563 // TODO(titzer): exit block doesn't have control.
574 MachineType InstructionSelector::GetMachineType(Node* node) {
575 DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
576 switch (node->opcode()) {
577 case IrOpcode::kStart:
578 case IrOpcode::kLoop:
580 case IrOpcode::kBranch:
581 case IrOpcode::kIfTrue:
582 case IrOpcode::kIfFalse:
583 case IrOpcode::kSwitch:
584 case IrOpcode::kIfValue:
585 case IrOpcode::kIfDefault:
586 case IrOpcode::kEffectPhi:
587 case IrOpcode::kEffectSet:
588 case IrOpcode::kMerge:
589 // No code needed for these graph artifacts.
591 case IrOpcode::kFinish:
592 return kMachAnyTagged;
593 case IrOpcode::kParameter:
594 return linkage()->GetParameterType(OpParameter<int>(node));
595 case IrOpcode::kOsrValue:
596 return kMachAnyTagged;
598 return OpParameter<MachineType>(node);
599 case IrOpcode::kProjection:
600 // TODO(jarin) Really project from outputs.
601 return kMachAnyTagged;
602 case IrOpcode::kInt32Constant:
604 case IrOpcode::kInt64Constant:
606 case IrOpcode::kExternalConstant:
608 case IrOpcode::kFloat64Constant:
610 case IrOpcode::kHeapConstant:
611 case IrOpcode::kNumberConstant:
612 return kMachAnyTagged;
613 case IrOpcode::kCall:
614 return kMachAnyTagged;
615 case IrOpcode::kFrameState:
616 case IrOpcode::kStateValues:
618 case IrOpcode::kLoad:
619 return OpParameter<LoadRepresentation>(node);
620 case IrOpcode::kStore:
622 case IrOpcode::kCheckedLoad:
623 return OpParameter<MachineType>(node);
624 case IrOpcode::kCheckedStore:
626 case IrOpcode::kWord32And:
627 case IrOpcode::kWord32Or:
628 case IrOpcode::kWord32Xor:
629 case IrOpcode::kWord32Shl:
630 case IrOpcode::kWord32Shr:
631 case IrOpcode::kWord32Sar:
632 case IrOpcode::kWord32Ror:
634 case IrOpcode::kWord32Equal:
636 case IrOpcode::kWord64And:
637 case IrOpcode::kWord64Or:
638 case IrOpcode::kWord64Xor:
639 case IrOpcode::kWord64Shl:
640 case IrOpcode::kWord64Shr:
641 case IrOpcode::kWord64Sar:
642 case IrOpcode::kWord64Ror:
644 case IrOpcode::kWord64Equal:
646 case IrOpcode::kInt32Add:
647 case IrOpcode::kInt32AddWithOverflow:
648 case IrOpcode::kInt32Sub:
649 case IrOpcode::kInt32SubWithOverflow:
650 case IrOpcode::kInt32Mul:
651 case IrOpcode::kInt32Div:
652 case IrOpcode::kInt32Mod:
654 case IrOpcode::kInt32LessThan:
655 case IrOpcode::kInt32LessThanOrEqual:
656 case IrOpcode::kUint32LessThan:
657 case IrOpcode::kUint32LessThanOrEqual:
659 case IrOpcode::kInt64Add:
660 case IrOpcode::kInt64Sub:
661 case IrOpcode::kInt64Mul:
662 case IrOpcode::kInt64Div:
663 case IrOpcode::kInt64Mod:
665 case IrOpcode::kInt64LessThan:
666 case IrOpcode::kInt64LessThanOrEqual:
668 case IrOpcode::kChangeFloat32ToFloat64:
669 case IrOpcode::kChangeInt32ToFloat64:
670 case IrOpcode::kChangeUint32ToFloat64:
672 case IrOpcode::kChangeFloat64ToInt32:
674 case IrOpcode::kChangeFloat64ToUint32:
676 case IrOpcode::kChangeInt32ToInt64:
678 case IrOpcode::kChangeUint32ToUint64:
680 case IrOpcode::kTruncateFloat64ToFloat32:
682 case IrOpcode::kTruncateFloat64ToInt32:
683 case IrOpcode::kTruncateInt64ToInt32:
685 case IrOpcode::kFloat64Add:
686 case IrOpcode::kFloat64Sub:
687 case IrOpcode::kFloat64Mul:
688 case IrOpcode::kFloat64Div:
689 case IrOpcode::kFloat64Mod:
690 case IrOpcode::kFloat64Sqrt:
691 case IrOpcode::kFloat64Floor:
692 case IrOpcode::kFloat64Ceil:
693 case IrOpcode::kFloat64RoundTruncate:
694 case IrOpcode::kFloat64RoundTiesAway:
696 case IrOpcode::kFloat64Equal:
697 case IrOpcode::kFloat64LessThan:
698 case IrOpcode::kFloat64LessThanOrEqual:
701 V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
702 node->opcode(), node->op()->mnemonic(), node->id());
708 void InstructionSelector::VisitNode(Node* node) {
709 DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
710 SourcePosition source_position = source_positions_->GetSourcePosition(node);
711 if (!source_position.IsUnknown()) {
712 DCHECK(!source_position.IsInvalid());
713 if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
714 Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
717 switch (node->opcode()) {
718 case IrOpcode::kStart:
719 case IrOpcode::kLoop:
721 case IrOpcode::kBranch:
722 case IrOpcode::kIfTrue:
723 case IrOpcode::kIfFalse:
724 case IrOpcode::kSwitch:
725 case IrOpcode::kIfValue:
726 case IrOpcode::kIfDefault:
727 case IrOpcode::kEffectPhi:
728 case IrOpcode::kMerge:
729 // No code needed for these graph artifacts.
731 case IrOpcode::kFinish:
732 return MarkAsReference(node), VisitFinish(node);
733 case IrOpcode::kParameter: {
734 MachineType type = linkage()->GetParameterType(OpParameter<int>(node));
735 MarkAsRepresentation(type, node);
736 return VisitParameter(node);
738 case IrOpcode::kOsrValue:
739 return MarkAsReference(node), VisitOsrValue(node);
740 case IrOpcode::kPhi: {
741 MachineType type = OpParameter<MachineType>(node);
742 MarkAsRepresentation(type, node);
743 return VisitPhi(node);
745 case IrOpcode::kProjection:
746 return VisitProjection(node);
747 case IrOpcode::kInt32Constant:
748 case IrOpcode::kInt64Constant:
749 case IrOpcode::kExternalConstant:
750 return VisitConstant(node);
751 case IrOpcode::kFloat32Constant:
752 return MarkAsDouble(node), VisitConstant(node);
753 case IrOpcode::kFloat64Constant:
754 return MarkAsDouble(node), VisitConstant(node);
755 case IrOpcode::kHeapConstant:
756 return MarkAsReference(node), VisitConstant(node);
757 case IrOpcode::kNumberConstant: {
758 double value = OpParameter<double>(node);
759 if (!IsSmiDouble(value)) MarkAsReference(node);
760 return VisitConstant(node);
762 case IrOpcode::kCall:
763 return VisitCall(node);
764 case IrOpcode::kFrameState:
765 case IrOpcode::kStateValues:
767 case IrOpcode::kLoad: {
768 LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
769 MarkAsRepresentation(rep, node);
770 return VisitLoad(node);
772 case IrOpcode::kStore:
773 return VisitStore(node);
774 case IrOpcode::kWord32And:
775 return VisitWord32And(node);
776 case IrOpcode::kWord32Or:
777 return VisitWord32Or(node);
778 case IrOpcode::kWord32Xor:
779 return VisitWord32Xor(node);
780 case IrOpcode::kWord32Shl:
781 return VisitWord32Shl(node);
782 case IrOpcode::kWord32Shr:
783 return VisitWord32Shr(node);
784 case IrOpcode::kWord32Sar:
785 return VisitWord32Sar(node);
786 case IrOpcode::kWord32Ror:
787 return VisitWord32Ror(node);
788 case IrOpcode::kWord32Equal:
789 return VisitWord32Equal(node);
790 case IrOpcode::kWord64And:
791 return VisitWord64And(node);
792 case IrOpcode::kWord64Or:
793 return VisitWord64Or(node);
794 case IrOpcode::kWord64Xor:
795 return VisitWord64Xor(node);
796 case IrOpcode::kWord64Shl:
797 return VisitWord64Shl(node);
798 case IrOpcode::kWord64Shr:
799 return VisitWord64Shr(node);
800 case IrOpcode::kWord64Sar:
801 return VisitWord64Sar(node);
802 case IrOpcode::kWord64Ror:
803 return VisitWord64Ror(node);
804 case IrOpcode::kWord64Equal:
805 return VisitWord64Equal(node);
806 case IrOpcode::kInt32Add:
807 return VisitInt32Add(node);
808 case IrOpcode::kInt32AddWithOverflow:
809 return VisitInt32AddWithOverflow(node);
810 case IrOpcode::kInt32Sub:
811 return VisitInt32Sub(node);
812 case IrOpcode::kInt32SubWithOverflow:
813 return VisitInt32SubWithOverflow(node);
814 case IrOpcode::kInt32Mul:
815 return VisitInt32Mul(node);
816 case IrOpcode::kInt32MulHigh:
817 return VisitInt32MulHigh(node);
818 case IrOpcode::kInt32Div:
819 return VisitInt32Div(node);
820 case IrOpcode::kInt32Mod:
821 return VisitInt32Mod(node);
822 case IrOpcode::kInt32LessThan:
823 return VisitInt32LessThan(node);
824 case IrOpcode::kInt32LessThanOrEqual:
825 return VisitInt32LessThanOrEqual(node);
826 case IrOpcode::kUint32Div:
827 return VisitUint32Div(node);
828 case IrOpcode::kUint32LessThan:
829 return VisitUint32LessThan(node);
830 case IrOpcode::kUint32LessThanOrEqual:
831 return VisitUint32LessThanOrEqual(node);
832 case IrOpcode::kUint32Mod:
833 return VisitUint32Mod(node);
834 case IrOpcode::kUint32MulHigh:
835 return VisitUint32MulHigh(node);
836 case IrOpcode::kInt64Add:
837 return VisitInt64Add(node);
838 case IrOpcode::kInt64Sub:
839 return VisitInt64Sub(node);
840 case IrOpcode::kInt64Mul:
841 return VisitInt64Mul(node);
842 case IrOpcode::kInt64Div:
843 return VisitInt64Div(node);
844 case IrOpcode::kInt64Mod:
845 return VisitInt64Mod(node);
846 case IrOpcode::kInt64LessThan:
847 return VisitInt64LessThan(node);
848 case IrOpcode::kInt64LessThanOrEqual:
849 return VisitInt64LessThanOrEqual(node);
850 case IrOpcode::kUint64Div:
851 return VisitUint64Div(node);
852 case IrOpcode::kUint64LessThan:
853 return VisitUint64LessThan(node);
854 case IrOpcode::kUint64Mod:
855 return VisitUint64Mod(node);
856 case IrOpcode::kChangeFloat32ToFloat64:
857 return MarkAsDouble(node), VisitChangeFloat32ToFloat64(node);
858 case IrOpcode::kChangeInt32ToFloat64:
859 return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
860 case IrOpcode::kChangeUint32ToFloat64:
861 return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
862 case IrOpcode::kChangeFloat64ToInt32:
863 return VisitChangeFloat64ToInt32(node);
864 case IrOpcode::kChangeFloat64ToUint32:
865 return VisitChangeFloat64ToUint32(node);
866 case IrOpcode::kChangeInt32ToInt64:
867 return VisitChangeInt32ToInt64(node);
868 case IrOpcode::kChangeUint32ToUint64:
869 return VisitChangeUint32ToUint64(node);
870 case IrOpcode::kTruncateFloat64ToFloat32:
871 return MarkAsDouble(node), VisitTruncateFloat64ToFloat32(node);
872 case IrOpcode::kTruncateFloat64ToInt32:
873 return VisitTruncateFloat64ToInt32(node);
874 case IrOpcode::kTruncateInt64ToInt32:
875 return VisitTruncateInt64ToInt32(node);
876 case IrOpcode::kFloat64Add:
877 return MarkAsDouble(node), VisitFloat64Add(node);
878 case IrOpcode::kFloat64Sub:
879 return MarkAsDouble(node), VisitFloat64Sub(node);
880 case IrOpcode::kFloat64Mul:
881 return MarkAsDouble(node), VisitFloat64Mul(node);
882 case IrOpcode::kFloat64Div:
883 return MarkAsDouble(node), VisitFloat64Div(node);
884 case IrOpcode::kFloat64Mod:
885 return MarkAsDouble(node), VisitFloat64Mod(node);
886 case IrOpcode::kFloat64Sqrt:
887 return MarkAsDouble(node), VisitFloat64Sqrt(node);
888 case IrOpcode::kFloat64Equal:
889 return VisitFloat64Equal(node);
890 case IrOpcode::kFloat64LessThan:
891 return VisitFloat64LessThan(node);
892 case IrOpcode::kFloat64LessThanOrEqual:
893 return VisitFloat64LessThanOrEqual(node);
894 case IrOpcode::kFloat64Floor:
895 return MarkAsDouble(node), VisitFloat64Floor(node);
896 case IrOpcode::kFloat64Ceil:
897 return MarkAsDouble(node), VisitFloat64Ceil(node);
898 case IrOpcode::kFloat64RoundTruncate:
899 return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
900 case IrOpcode::kFloat64RoundTiesAway:
901 return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
902 case IrOpcode::kLoadStackPointer:
903 return VisitLoadStackPointer(node);
904 case IrOpcode::kCheckedLoad: {
905 MachineType rep = OpParameter<MachineType>(node);
906 MarkAsRepresentation(rep, node);
907 return VisitCheckedLoad(node);
909 case IrOpcode::kCheckedStore:
910 return VisitCheckedStore(node);
912 V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
913 node->opcode(), node->op()->mnemonic(), node->id());
919 #if V8_TURBOFAN_BACKEND
921 void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
922 OperandGenerator g(this);
923 Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
924 g.UseRegister(node->InputAt(0)));
928 void InstructionSelector::VisitLoadStackPointer(Node* node) {
929 OperandGenerator g(this);
930 Emit(kArchStackPointer, g.DefineAsRegister(node));
933 #endif // V8_TURBOFAN_BACKEND
935 // 32 bit targets do not implement the following instructions.
936 #if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
938 void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
941 void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
944 void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
947 void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
950 void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
953 void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
956 void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
959 void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
962 void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
965 void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
968 void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
971 void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
974 void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED(); }
977 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
982 void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); }
985 void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
988 void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
991 void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
994 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
999 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
1004 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
1008 #endif // V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
1011 void InstructionSelector::VisitFinish(Node* node) {
1012 OperandGenerator g(this);
1013 Node* value = node->InputAt(0);
1014 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1018 void InstructionSelector::VisitParameter(Node* node) {
1019 OperandGenerator g(this);
1020 int index = OpParameter<int>(node);
1022 g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
1023 linkage()->GetParameterType(index)));
1027 void InstructionSelector::VisitOsrValue(Node* node) {
1028 OperandGenerator g(this);
1029 int index = OpParameter<int>(node);
1030 Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index),
1035 void InstructionSelector::VisitPhi(Node* node) {
1036 const int input_count = node->op()->ValueInputCount();
1037 PhiInstruction* phi = new (instruction_zone())
1038 PhiInstruction(instruction_zone(), GetVirtualRegister(node),
1039 static_cast<size_t>(input_count));
1040 sequence()->InstructionBlockAt(current_block_->GetRpoNumber())->AddPhi(phi);
1041 for (int i = 0; i < input_count; ++i) {
1042 Node* const input = node->InputAt(i);
1044 phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input));
1049 void InstructionSelector::VisitProjection(Node* node) {
1050 OperandGenerator g(this);
1051 Node* value = node->InputAt(0);
1052 switch (value->opcode()) {
1053 case IrOpcode::kInt32AddWithOverflow:
1054 case IrOpcode::kInt32SubWithOverflow:
1055 if (ProjectionIndexOf(node->op()) == 0u) {
1056 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
1058 DCHECK(ProjectionIndexOf(node->op()) == 1u);
1068 void InstructionSelector::VisitConstant(Node* node) {
1069 // We must emit a NOP here because every live range needs a defining
1070 // instruction in the register allocator.
1071 OperandGenerator g(this);
1072 Emit(kArchNop, g.DefineAsConstant(node));
1076 void InstructionSelector::VisitGoto(BasicBlock* target) {
1077 // jump to the next block.
1078 OperandGenerator g(this);
1079 Emit(kArchJmp, g.NoOutput(), g.Label(target))->MarkAsControl();
1083 void InstructionSelector::VisitReturn(Node* value) {
1084 OperandGenerator g(this);
1085 if (value != NULL) {
1086 Emit(kArchRet, g.NoOutput(),
1087 g.UseLocation(value, linkage()->GetReturnLocation(),
1088 linkage()->GetReturnType()));
1090 Emit(kArchRet, g.NoOutput());
1095 void InstructionSelector::VisitThrow(Node* value) {
1096 OperandGenerator g(this);
1097 Emit(kArchNop, g.NoOutput()); // TODO(titzer)
1101 void InstructionSelector::FillTypeVectorFromStateValues(
1102 ZoneVector<MachineType>* types, Node* state_values) {
1103 DCHECK(state_values->opcode() == IrOpcode::kStateValues);
1104 int count = state_values->InputCount();
1105 types->reserve(static_cast<size_t>(count));
1106 for (int i = 0; i < count; i++) {
1107 types->push_back(GetMachineType(state_values->InputAt(i)));
1112 FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
1114 DCHECK(state->opcode() == IrOpcode::kFrameState);
1115 DCHECK_EQ(5, state->InputCount());
1116 DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(0)->opcode());
1117 DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(1)->opcode());
1118 DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(2)->opcode());
1119 FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
1121 int parameters = state->InputAt(0)->InputCount();
1122 int locals = state->InputAt(1)->InputCount();
1123 int stack = state->InputAt(2)->InputCount();
1125 FrameStateDescriptor* outer_state = NULL;
1126 Node* outer_node = state->InputAt(4);
1127 if (outer_node->opcode() == IrOpcode::kFrameState) {
1128 outer_state = GetFrameStateDescriptor(outer_node);
1131 return new (instruction_zone()) FrameStateDescriptor(
1132 instruction_zone(), state_info, parameters, locals, stack, outer_state);
1136 static InstructionOperand UseOrImmediate(OperandGenerator* g, Node* input) {
1137 switch (input->opcode()) {
1138 case IrOpcode::kInt32Constant:
1139 case IrOpcode::kNumberConstant:
1140 case IrOpcode::kFloat64Constant:
1141 case IrOpcode::kHeapConstant:
1142 return g->UseImmediate(input);
1144 return g->UseUnique(input);
1149 void InstructionSelector::AddFrameStateInputs(
1150 Node* state, InstructionOperandVector* inputs,
1151 FrameStateDescriptor* descriptor) {
1152 DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
1154 if (descriptor->outer_state() != NULL) {
1155 AddFrameStateInputs(state->InputAt(4), inputs, descriptor->outer_state());
1158 Node* parameters = state->InputAt(0);
1159 Node* locals = state->InputAt(1);
1160 Node* stack = state->InputAt(2);
1161 Node* context = state->InputAt(3);
1163 DCHECK_EQ(IrOpcode::kStateValues, parameters->op()->opcode());
1164 DCHECK_EQ(IrOpcode::kStateValues, locals->op()->opcode());
1165 DCHECK_EQ(IrOpcode::kStateValues, stack->op()->opcode());
1167 DCHECK_EQ(static_cast<int>(descriptor->parameters_count()),
1168 parameters->InputCount());
1169 DCHECK_EQ(static_cast<int>(descriptor->locals_count()), locals->InputCount());
1170 DCHECK_EQ(static_cast<int>(descriptor->stack_count()), stack->InputCount());
1172 ZoneVector<MachineType> types(instruction_zone());
1173 types.reserve(descriptor->GetSize());
1175 OperandGenerator g(this);
1176 size_t value_index = 0;
1177 for (int i = 0; i < static_cast<int>(descriptor->parameters_count()); i++) {
1178 Node* input_node = parameters->InputAt(i);
1179 inputs->push_back(UseOrImmediate(&g, input_node));
1180 descriptor->SetType(value_index++, GetMachineType(input_node));
1182 if (descriptor->HasContext()) {
1183 inputs->push_back(UseOrImmediate(&g, context));
1184 descriptor->SetType(value_index++, kMachAnyTagged);
1186 for (int i = 0; i < static_cast<int>(descriptor->locals_count()); i++) {
1187 Node* input_node = locals->InputAt(i);
1188 inputs->push_back(UseOrImmediate(&g, input_node));
1189 descriptor->SetType(value_index++, GetMachineType(input_node));
1191 for (int i = 0; i < static_cast<int>(descriptor->stack_count()); i++) {
1192 Node* input_node = stack->InputAt(i);
1193 inputs->push_back(UseOrImmediate(&g, input_node));
1194 descriptor->SetType(value_index++, GetMachineType(input_node));
1196 DCHECK(value_index == descriptor->GetSize());
1200 #if !V8_TURBOFAN_BACKEND
1202 #define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
1203 void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
1204 MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
1205 #undef DECLARE_UNIMPLEMENTED_SELECTOR
1208 void InstructionSelector::VisitCall(Node* node) { UNIMPLEMENTED(); }
1211 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1212 BasicBlock* fbranch) {
1217 void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
1218 BasicBlock** case_branches,
1219 int32_t* case_values, size_t case_count,
1220 int32_t min_value, int32_t max_value) {
1226 MachineOperatorBuilder::Flags
1227 InstructionSelector::SupportedMachineOperatorFlags() {
1228 return MachineOperatorBuilder::Flag::kNoFlags;
1231 #endif // !V8_TURBOFAN_BACKEND
1233 } // namespace compiler
1234 } // namespace internal