1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/instruction-selector.h"
9 #include "src/compiler/instruction-selector-impl.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/node-properties.h"
12 #include "src/compiler/pipeline.h"
13 #include "src/compiler/schedule.h"
14 #include "src/compiler/state-values-utils.h"
20 InstructionSelector::InstructionSelector(Zone* zone, size_t node_count,
22 InstructionSequence* sequence,
24 SourcePositionTable* source_positions,
29 source_positions_(source_positions),
34 defined_(node_count, false, zone),
35 used_(node_count, false, zone),
36 virtual_registers_(node_count,
37 InstructionOperand::kInvalidVirtualRegister, zone) {
38 instructions_.reserve(node_count);
42 void InstructionSelector::SelectInstructions() {
43 // Mark the inputs of all phis in loop headers as used.
44 BasicBlockVector* blocks = schedule()->rpo_order();
45 for (auto const block : *blocks) {
46 if (!block->IsLoopHeader()) continue;
47 DCHECK_LE(2u, block->PredecessorCount());
48 for (Node* const phi : *block) {
49 if (phi->opcode() != IrOpcode::kPhi) continue;
51 // Mark all inputs as used.
52 for (Node* const input : phi->inputs()) {
58 // Visit each basic block in post order.
59 for (auto i = blocks->rbegin(); i != blocks->rend(); ++i) {
63 // Schedule the selected instructions.
64 for (auto const block : *blocks) {
65 InstructionBlock* instruction_block =
66 sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
67 size_t end = instruction_block->code_end();
68 size_t start = instruction_block->code_start();
69 DCHECK_LE(end, start);
70 sequence()->StartBlock(RpoNumber::FromInt(block->rpo_number()));
71 while (start-- > end) {
72 sequence()->AddInstruction(instructions_[start]);
74 sequence()->EndBlock(RpoNumber::FromInt(block->rpo_number()));
79 Instruction* InstructionSelector::Emit(InstructionCode opcode,
80 InstructionOperand output,
82 InstructionOperand* temps) {
83 size_t output_count = output.IsInvalid() ? 0 : 1;
84 return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
88 Instruction* InstructionSelector::Emit(InstructionCode opcode,
89 InstructionOperand output,
90 InstructionOperand a, size_t temp_count,
91 InstructionOperand* temps) {
92 size_t output_count = output.IsInvalid() ? 0 : 1;
93 return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
97 Instruction* InstructionSelector::Emit(InstructionCode opcode,
98 InstructionOperand output,
100 InstructionOperand b, size_t temp_count,
101 InstructionOperand* temps) {
102 size_t output_count = output.IsInvalid() ? 0 : 1;
103 InstructionOperand inputs[] = {a, b};
104 size_t input_count = arraysize(inputs);
105 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
110 Instruction* InstructionSelector::Emit(InstructionCode opcode,
111 InstructionOperand output,
112 InstructionOperand a,
113 InstructionOperand b,
114 InstructionOperand c, size_t temp_count,
115 InstructionOperand* temps) {
116 size_t output_count = output.IsInvalid() ? 0 : 1;
117 InstructionOperand inputs[] = {a, b, c};
118 size_t input_count = arraysize(inputs);
119 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
124 Instruction* InstructionSelector::Emit(
125 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
126 InstructionOperand b, InstructionOperand c, InstructionOperand d,
127 size_t temp_count, InstructionOperand* temps) {
128 size_t output_count = output.IsInvalid() ? 0 : 1;
129 InstructionOperand inputs[] = {a, b, c, d};
130 size_t input_count = arraysize(inputs);
131 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
136 Instruction* InstructionSelector::Emit(
137 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
138 InstructionOperand b, InstructionOperand c, InstructionOperand d,
139 InstructionOperand e, size_t temp_count, InstructionOperand* temps) {
140 size_t output_count = output.IsInvalid() ? 0 : 1;
141 InstructionOperand inputs[] = {a, b, c, d, e};
142 size_t input_count = arraysize(inputs);
143 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
148 Instruction* InstructionSelector::Emit(
149 InstructionCode opcode, InstructionOperand output, InstructionOperand a,
150 InstructionOperand b, InstructionOperand c, InstructionOperand d,
151 InstructionOperand e, InstructionOperand f, size_t temp_count,
152 InstructionOperand* temps) {
153 size_t output_count = output.IsInvalid() ? 0 : 1;
154 InstructionOperand inputs[] = {a, b, c, d, e, f};
155 size_t input_count = arraysize(inputs);
156 return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
161 Instruction* InstructionSelector::Emit(
162 InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
163 size_t input_count, InstructionOperand* inputs, size_t temp_count,
164 InstructionOperand* temps) {
166 Instruction::New(instruction_zone(), opcode, output_count, outputs,
167 input_count, inputs, temp_count, temps);
172 Instruction* InstructionSelector::Emit(Instruction* instr) {
173 instructions_.push_back(instr);
178 bool InstructionSelector::CanCover(Node* user, Node* node) const {
179 return node->OwnedBy(user) &&
180 schedule()->block(node) == schedule()->block(user);
184 int InstructionSelector::GetVirtualRegister(const Node* node) {
185 DCHECK_NOT_NULL(node);
186 size_t const id = node->id();
187 DCHECK_LT(id, virtual_registers_.size());
188 int virtual_register = virtual_registers_[id];
189 if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
190 virtual_register = sequence()->NextVirtualRegister();
191 virtual_registers_[id] = virtual_register;
193 return virtual_register;
197 const std::map<NodeId, int> InstructionSelector::GetVirtualRegistersForTesting()
199 std::map<NodeId, int> virtual_registers;
200 for (size_t n = 0; n < virtual_registers_.size(); ++n) {
201 if (virtual_registers_[n] != InstructionOperand::kInvalidVirtualRegister) {
202 NodeId const id = static_cast<NodeId>(n);
203 virtual_registers.insert(std::make_pair(id, virtual_registers_[n]));
206 return virtual_registers;
210 bool InstructionSelector::IsDefined(Node* node) const {
211 DCHECK_NOT_NULL(node);
212 size_t const id = node->id();
213 DCHECK_LT(id, defined_.size());
218 void InstructionSelector::MarkAsDefined(Node* node) {
219 DCHECK_NOT_NULL(node);
220 size_t const id = node->id();
221 DCHECK_LT(id, defined_.size());
226 bool InstructionSelector::IsUsed(Node* node) const {
227 DCHECK_NOT_NULL(node);
228 if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
229 size_t const id = node->id();
230 DCHECK_LT(id, used_.size());
235 void InstructionSelector::MarkAsUsed(Node* node) {
236 DCHECK_NOT_NULL(node);
237 size_t const id = node->id();
238 DCHECK_LT(id, used_.size());
243 bool InstructionSelector::IsDouble(const Node* node) const {
244 DCHECK_NOT_NULL(node);
245 int const virtual_register = virtual_registers_[node->id()];
246 if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
249 return sequence()->IsDouble(virtual_register);
253 void InstructionSelector::MarkAsDouble(Node* node) {
254 DCHECK_NOT_NULL(node);
255 DCHECK(!IsReference(node));
256 sequence()->MarkAsDouble(GetVirtualRegister(node));
260 bool InstructionSelector::IsReference(const Node* node) const {
261 DCHECK_NOT_NULL(node);
262 int const virtual_register = virtual_registers_[node->id()];
263 if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
266 return sequence()->IsReference(virtual_register);
270 void InstructionSelector::MarkAsReference(Node* node) {
271 DCHECK_NOT_NULL(node);
272 DCHECK(!IsDouble(node));
273 sequence()->MarkAsReference(GetVirtualRegister(node));
277 void InstructionSelector::MarkAsRepresentation(MachineType rep,
278 const InstructionOperand& op) {
279 UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
280 switch (RepresentationOf(rep)) {
283 sequence()->MarkAsDouble(unalloc.virtual_register());
286 sequence()->MarkAsReference(unalloc.virtual_register());
294 void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
295 DCHECK_NOT_NULL(node);
296 switch (RepresentationOf(rep)) {
302 MarkAsReference(node);
310 // TODO(bmeurer): Get rid of the CallBuffer business and make
311 // InstructionSelector::VisitCall platform independent instead.
312 CallBuffer::CallBuffer(Zone* zone, const CallDescriptor* d,
313 FrameStateDescriptor* frame_desc)
315 frame_state_descriptor(frame_desc),
318 instruction_args(zone),
320 output_nodes.reserve(d->ReturnCount());
321 outputs.reserve(d->ReturnCount());
322 pushed_nodes.reserve(input_count());
323 instruction_args.reserve(input_count() + frame_state_value_count());
327 // TODO(bmeurer): Get rid of the CallBuffer business and make
328 // InstructionSelector::VisitCall platform independent instead.
329 void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
330 bool call_code_immediate,
331 bool call_address_immediate) {
332 OperandGenerator g(this);
333 DCHECK_EQ(call->op()->ValueOutputCount(),
334 static_cast<int>(buffer->descriptor->ReturnCount()));
336 call->op()->ValueInputCount(),
337 static_cast<int>(buffer->input_count() + buffer->frame_state_count()));
339 if (buffer->descriptor->ReturnCount() > 0) {
340 // Collect the projections that represent multiple outputs from this call.
341 if (buffer->descriptor->ReturnCount() == 1) {
342 buffer->output_nodes.push_back(call);
344 buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), nullptr);
345 for (auto use : call->uses()) {
346 if (use->opcode() != IrOpcode::kProjection) continue;
347 size_t const index = ProjectionIndexOf(use->op());
348 DCHECK_LT(index, buffer->output_nodes.size());
349 DCHECK(!buffer->output_nodes[index]);
350 buffer->output_nodes[index] = use;
354 // Filter out the outputs that aren't live because no projection uses them.
355 size_t outputs_needed_by_framestate =
356 buffer->frame_state_descriptor == NULL
358 : buffer->frame_state_descriptor->state_combine()
359 .ConsumedOutputCount();
360 for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
361 bool output_is_live =
362 buffer->output_nodes[i] != NULL || i < outputs_needed_by_framestate;
363 if (output_is_live) {
365 buffer->descriptor->GetReturnType(static_cast<int>(i));
366 LinkageLocation location =
367 buffer->descriptor->GetReturnLocation(static_cast<int>(i));
369 Node* output = buffer->output_nodes[i];
370 InstructionOperand op =
371 output == NULL ? g.TempLocation(location, type)
372 : g.DefineAsLocation(output, location, type);
373 MarkAsRepresentation(type, op);
375 buffer->outputs.push_back(op);
380 // The first argument is always the callee code.
381 Node* callee = call->InputAt(0);
382 switch (buffer->descriptor->kind()) {
383 case CallDescriptor::kCallCodeObject:
384 buffer->instruction_args.push_back(
385 (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
386 ? g.UseImmediate(callee)
387 : g.UseRegister(callee));
389 case CallDescriptor::kCallAddress:
390 buffer->instruction_args.push_back(
391 (call_address_immediate &&
392 (callee->opcode() == IrOpcode::kInt32Constant ||
393 callee->opcode() == IrOpcode::kInt64Constant))
394 ? g.UseImmediate(callee)
395 : g.UseRegister(callee));
397 case CallDescriptor::kCallJSFunction:
398 buffer->instruction_args.push_back(
399 g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
400 buffer->descriptor->GetInputType(0)));
403 DCHECK_EQ(1u, buffer->instruction_args.size());
405 // If the call needs a frame state, we insert the state information as
406 // follows (n is the number of value inputs to the frame state):
407 // arg 1 : deoptimization id.
408 // arg 2 - arg (n + 1) : value inputs to the frame state.
409 if (buffer->frame_state_descriptor != NULL) {
410 InstructionSequence::StateId state_id =
411 sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
412 buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
415 call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
416 AddFrameStateInputs(frame_state, &buffer->instruction_args,
417 buffer->frame_state_descriptor);
419 DCHECK(1 + buffer->frame_state_value_count() ==
420 buffer->instruction_args.size());
422 size_t input_count = static_cast<size_t>(buffer->input_count());
424 // Split the arguments into pushed_nodes and instruction_args. Pushed
425 // arguments require an explicit push instruction before the call and do
426 // not appear as arguments to the call. Everything else ends up
427 // as an InstructionOperand argument to the call.
428 auto iter(call->inputs().begin());
429 int pushed_count = 0;
430 for (size_t index = 0; index < input_count; ++iter, ++index) {
431 DCHECK(iter != call->inputs().end());
432 DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
433 if (index == 0) continue; // The first argument (callee) is already done.
434 InstructionOperand op =
435 g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
436 buffer->descriptor->GetInputType(index));
437 if (UnallocatedOperand::cast(op).HasFixedSlotPolicy()) {
438 int stack_index = -UnallocatedOperand::cast(op).fixed_slot_index() - 1;
439 if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
440 buffer->pushed_nodes.resize(stack_index + 1, NULL);
442 DCHECK(!buffer->pushed_nodes[stack_index]);
443 buffer->pushed_nodes[stack_index] = *iter;
446 buffer->instruction_args.push_back(op);
449 CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
450 DCHECK(static_cast<size_t>(input_count) ==
451 (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
452 buffer->frame_state_value_count()));
456 void InstructionSelector::VisitBlock(BasicBlock* block) {
457 DCHECK(!current_block_);
458 current_block_ = block;
459 int current_block_end = static_cast<int>(instructions_.size());
461 // Generate code for the block control "top down", but schedule the code
464 std::reverse(instructions_.begin() + current_block_end, instructions_.end());
466 // Visit code in reverse control flow order, because architecture-specific
467 // matching may cover more than one node at a time.
468 for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
471 // Skip nodes that are unused or already defined.
472 if (!IsUsed(node) || IsDefined(node)) continue;
473 // Generate code for this node "top down", but schedule the code "bottom
475 size_t current_node_end = instructions_.size();
477 std::reverse(instructions_.begin() + current_node_end, instructions_.end());
480 // We're done with the block.
481 InstructionBlock* instruction_block =
482 sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
483 instruction_block->set_code_start(static_cast<int>(instructions_.size()));
484 instruction_block->set_code_end(current_block_end);
486 current_block_ = NULL;
490 void InstructionSelector::VisitControl(BasicBlock* block) {
492 // SSA deconstruction requires targets of branches not to have phis.
493 // Edge split form guarantees this property, but is more strict.
494 if (block->SuccessorCount() > 1) {
495 for (BasicBlock* const successor : block->successors()) {
496 for (Node* const node : *successor) {
497 CHECK(!IrOpcode::IsPhiOpcode(node->opcode()));
503 Node* input = block->control_input();
504 switch (block->control()) {
505 case BasicBlock::kGoto:
506 return VisitGoto(block->SuccessorAt(0));
507 case BasicBlock::kCall: {
508 DCHECK_EQ(IrOpcode::kCall, input->opcode());
509 BasicBlock* success = block->SuccessorAt(0);
510 BasicBlock* exception = block->SuccessorAt(1);
511 return VisitCall(input, exception), VisitGoto(success);
513 case BasicBlock::kBranch: {
514 DCHECK_EQ(IrOpcode::kBranch, input->opcode());
515 BasicBlock* tbranch = block->SuccessorAt(0);
516 BasicBlock* fbranch = block->SuccessorAt(1);
517 if (tbranch == fbranch) return VisitGoto(tbranch);
518 // Treat special Branch(Always, IfTrue, IfFalse) as Goto(IfTrue).
519 Node* const condition = input->InputAt(0);
520 if (condition->opcode() == IrOpcode::kAlways) return VisitGoto(tbranch);
521 return VisitBranch(input, tbranch, fbranch);
523 case BasicBlock::kSwitch: {
524 DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
526 // Last successor must be Default.
527 sw.default_branch = block->successors().back();
528 DCHECK_EQ(IrOpcode::kIfDefault, sw.default_branch->front()->opcode());
529 // All other successors must be cases.
530 sw.case_count = block->SuccessorCount() - 1;
531 DCHECK_LE(1u, sw.case_count);
532 sw.case_branches = &block->successors().front();
533 // Determine case values and their min/max.
534 sw.case_values = zone()->NewArray<int32_t>(sw.case_count);
535 sw.min_value = std::numeric_limits<int32_t>::max();
536 sw.max_value = std::numeric_limits<int32_t>::min();
537 for (size_t index = 0; index < sw.case_count; ++index) {
538 BasicBlock* branch = sw.case_branches[index];
539 int32_t value = OpParameter<int32_t>(branch->front()->op());
540 sw.case_values[index] = value;
541 if (sw.min_value > value) sw.min_value = value;
542 if (sw.max_value < value) sw.max_value = value;
544 DCHECK_LE(sw.min_value, sw.max_value);
545 // Note that {value_range} can be 0 if {min_value} is -2^31 and
547 // is 2^31-1, so don't assume that it's non-zero below.
548 sw.value_range = 1u + bit_cast<uint32_t>(sw.max_value) -
549 bit_cast<uint32_t>(sw.min_value);
550 return VisitSwitch(input, sw);
552 case BasicBlock::kReturn: {
553 // If the result itself is a return, return its input.
554 Node* value = (input != nullptr && input->opcode() == IrOpcode::kReturn)
557 return VisitReturn(value);
559 case BasicBlock::kDeoptimize: {
560 // If the result itself is a return, return its input.
562 (input != nullptr && input->opcode() == IrOpcode::kDeoptimize)
565 return VisitDeoptimize(value);
567 case BasicBlock::kThrow:
568 DCHECK_EQ(IrOpcode::kThrow, input->opcode());
569 return VisitThrow(input->InputAt(0));
570 case BasicBlock::kNone: {
571 // TODO(titzer): exit block doesn't have control.
582 void InstructionSelector::VisitNode(Node* node) {
583 DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
584 SourcePosition source_position = source_positions_->GetSourcePosition(node);
585 if (!source_position.IsUnknown()) {
586 DCHECK(!source_position.IsInvalid());
587 if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
588 Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
591 switch (node->opcode()) {
592 case IrOpcode::kStart:
593 case IrOpcode::kLoop:
595 case IrOpcode::kBranch:
596 case IrOpcode::kIfTrue:
597 case IrOpcode::kIfFalse:
598 case IrOpcode::kIfSuccess:
599 case IrOpcode::kIfException:
600 case IrOpcode::kSwitch:
601 case IrOpcode::kIfValue:
602 case IrOpcode::kIfDefault:
603 case IrOpcode::kEffectPhi:
604 case IrOpcode::kMerge:
605 // No code needed for these graph artifacts.
607 case IrOpcode::kFinish:
608 return MarkAsReference(node), VisitFinish(node);
609 case IrOpcode::kParameter: {
610 MachineType type = linkage()->GetParameterType(OpParameter<int>(node));
611 MarkAsRepresentation(type, node);
612 return VisitParameter(node);
614 case IrOpcode::kOsrValue:
615 return MarkAsReference(node), VisitOsrValue(node);
616 case IrOpcode::kPhi: {
617 MachineType type = OpParameter<MachineType>(node);
618 MarkAsRepresentation(type, node);
619 return VisitPhi(node);
621 case IrOpcode::kProjection:
622 return VisitProjection(node);
623 case IrOpcode::kInt32Constant:
624 case IrOpcode::kInt64Constant:
625 case IrOpcode::kExternalConstant:
626 return VisitConstant(node);
627 case IrOpcode::kFloat32Constant:
628 return MarkAsDouble(node), VisitConstant(node);
629 case IrOpcode::kFloat64Constant:
630 return MarkAsDouble(node), VisitConstant(node);
631 case IrOpcode::kHeapConstant:
632 return MarkAsReference(node), VisitConstant(node);
633 case IrOpcode::kNumberConstant: {
634 double value = OpParameter<double>(node);
635 if (!IsSmiDouble(value)) MarkAsReference(node);
636 return VisitConstant(node);
638 case IrOpcode::kCall:
639 return VisitCall(node, nullptr);
640 case IrOpcode::kFrameState:
641 case IrOpcode::kStateValues:
643 case IrOpcode::kLoad: {
644 LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
645 MarkAsRepresentation(rep, node);
646 return VisitLoad(node);
648 case IrOpcode::kStore:
649 return VisitStore(node);
650 case IrOpcode::kWord32And:
651 return VisitWord32And(node);
652 case IrOpcode::kWord32Or:
653 return VisitWord32Or(node);
654 case IrOpcode::kWord32Xor:
655 return VisitWord32Xor(node);
656 case IrOpcode::kWord32Shl:
657 return VisitWord32Shl(node);
658 case IrOpcode::kWord32Shr:
659 return VisitWord32Shr(node);
660 case IrOpcode::kWord32Sar:
661 return VisitWord32Sar(node);
662 case IrOpcode::kWord32Ror:
663 return VisitWord32Ror(node);
664 case IrOpcode::kWord32Equal:
665 return VisitWord32Equal(node);
666 case IrOpcode::kWord32Clz:
667 return VisitWord32Clz(node);
668 case IrOpcode::kWord64And:
669 return VisitWord64And(node);
670 case IrOpcode::kWord64Or:
671 return VisitWord64Or(node);
672 case IrOpcode::kWord64Xor:
673 return VisitWord64Xor(node);
674 case IrOpcode::kWord64Shl:
675 return VisitWord64Shl(node);
676 case IrOpcode::kWord64Shr:
677 return VisitWord64Shr(node);
678 case IrOpcode::kWord64Sar:
679 return VisitWord64Sar(node);
680 case IrOpcode::kWord64Ror:
681 return VisitWord64Ror(node);
682 case IrOpcode::kWord64Equal:
683 return VisitWord64Equal(node);
684 case IrOpcode::kInt32Add:
685 return VisitInt32Add(node);
686 case IrOpcode::kInt32AddWithOverflow:
687 return VisitInt32AddWithOverflow(node);
688 case IrOpcode::kInt32Sub:
689 return VisitInt32Sub(node);
690 case IrOpcode::kInt32SubWithOverflow:
691 return VisitInt32SubWithOverflow(node);
692 case IrOpcode::kInt32Mul:
693 return VisitInt32Mul(node);
694 case IrOpcode::kInt32MulHigh:
695 return VisitInt32MulHigh(node);
696 case IrOpcode::kInt32Div:
697 return VisitInt32Div(node);
698 case IrOpcode::kInt32Mod:
699 return VisitInt32Mod(node);
700 case IrOpcode::kInt32LessThan:
701 return VisitInt32LessThan(node);
702 case IrOpcode::kInt32LessThanOrEqual:
703 return VisitInt32LessThanOrEqual(node);
704 case IrOpcode::kUint32Div:
705 return VisitUint32Div(node);
706 case IrOpcode::kUint32LessThan:
707 return VisitUint32LessThan(node);
708 case IrOpcode::kUint32LessThanOrEqual:
709 return VisitUint32LessThanOrEqual(node);
710 case IrOpcode::kUint32Mod:
711 return VisitUint32Mod(node);
712 case IrOpcode::kUint32MulHigh:
713 return VisitUint32MulHigh(node);
714 case IrOpcode::kInt64Add:
715 return VisitInt64Add(node);
716 case IrOpcode::kInt64Sub:
717 return VisitInt64Sub(node);
718 case IrOpcode::kInt64Mul:
719 return VisitInt64Mul(node);
720 case IrOpcode::kInt64Div:
721 return VisitInt64Div(node);
722 case IrOpcode::kInt64Mod:
723 return VisitInt64Mod(node);
724 case IrOpcode::kInt64LessThan:
725 return VisitInt64LessThan(node);
726 case IrOpcode::kInt64LessThanOrEqual:
727 return VisitInt64LessThanOrEqual(node);
728 case IrOpcode::kUint64Div:
729 return VisitUint64Div(node);
730 case IrOpcode::kUint64LessThan:
731 return VisitUint64LessThan(node);
732 case IrOpcode::kUint64Mod:
733 return VisitUint64Mod(node);
734 case IrOpcode::kChangeFloat32ToFloat64:
735 return MarkAsDouble(node), VisitChangeFloat32ToFloat64(node);
736 case IrOpcode::kChangeInt32ToFloat64:
737 return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
738 case IrOpcode::kChangeUint32ToFloat64:
739 return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
740 case IrOpcode::kChangeFloat64ToInt32:
741 return VisitChangeFloat64ToInt32(node);
742 case IrOpcode::kChangeFloat64ToUint32:
743 return VisitChangeFloat64ToUint32(node);
744 case IrOpcode::kChangeInt32ToInt64:
745 return VisitChangeInt32ToInt64(node);
746 case IrOpcode::kChangeUint32ToUint64:
747 return VisitChangeUint32ToUint64(node);
748 case IrOpcode::kTruncateFloat64ToFloat32:
749 return MarkAsDouble(node), VisitTruncateFloat64ToFloat32(node);
750 case IrOpcode::kTruncateFloat64ToInt32:
751 return VisitTruncateFloat64ToInt32(node);
752 case IrOpcode::kTruncateInt64ToInt32:
753 return VisitTruncateInt64ToInt32(node);
754 case IrOpcode::kFloat64Add:
755 return MarkAsDouble(node), VisitFloat64Add(node);
756 case IrOpcode::kFloat64Sub:
757 return MarkAsDouble(node), VisitFloat64Sub(node);
758 case IrOpcode::kFloat64Mul:
759 return MarkAsDouble(node), VisitFloat64Mul(node);
760 case IrOpcode::kFloat64Div:
761 return MarkAsDouble(node), VisitFloat64Div(node);
762 case IrOpcode::kFloat64Mod:
763 return MarkAsDouble(node), VisitFloat64Mod(node);
764 case IrOpcode::kFloat64Min:
765 return MarkAsDouble(node), VisitFloat64Min(node);
766 case IrOpcode::kFloat64Max:
767 return MarkAsDouble(node), VisitFloat64Max(node);
768 case IrOpcode::kFloat64Sqrt:
769 return MarkAsDouble(node), VisitFloat64Sqrt(node);
770 case IrOpcode::kFloat64Equal:
771 return VisitFloat64Equal(node);
772 case IrOpcode::kFloat64LessThan:
773 return VisitFloat64LessThan(node);
774 case IrOpcode::kFloat64LessThanOrEqual:
775 return VisitFloat64LessThanOrEqual(node);
776 case IrOpcode::kFloat64RoundDown:
777 return MarkAsDouble(node), VisitFloat64RoundDown(node);
778 case IrOpcode::kFloat64RoundTruncate:
779 return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
780 case IrOpcode::kFloat64RoundTiesAway:
781 return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
782 case IrOpcode::kFloat64ExtractLowWord32:
783 return VisitFloat64ExtractLowWord32(node);
784 case IrOpcode::kFloat64ExtractHighWord32:
785 return VisitFloat64ExtractHighWord32(node);
786 case IrOpcode::kFloat64InsertLowWord32:
787 return MarkAsDouble(node), VisitFloat64InsertLowWord32(node);
788 case IrOpcode::kFloat64InsertHighWord32:
789 return MarkAsDouble(node), VisitFloat64InsertHighWord32(node);
790 case IrOpcode::kLoadStackPointer:
791 return VisitLoadStackPointer(node);
792 case IrOpcode::kCheckedLoad: {
793 MachineType rep = OpParameter<MachineType>(node);
794 MarkAsRepresentation(rep, node);
795 return VisitCheckedLoad(node);
797 case IrOpcode::kCheckedStore:
798 return VisitCheckedStore(node);
800 V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
801 node->opcode(), node->op()->mnemonic(), node->id());
807 #if V8_TURBOFAN_BACKEND
809 void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
810 OperandGenerator g(this);
811 Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
812 g.UseRegister(node->InputAt(0)));
816 void InstructionSelector::VisitLoadStackPointer(Node* node) {
817 OperandGenerator g(this);
818 Emit(kArchStackPointer, g.DefineAsRegister(node));
822 void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
823 InstructionOperand& index_operand) {
824 OperandGenerator g(this);
825 size_t input_count = 2 + sw.value_range;
826 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
827 inputs[0] = index_operand;
828 InstructionOperand default_operand = g.Label(sw.default_branch);
829 std::fill(&inputs[1], &inputs[input_count], default_operand);
830 for (size_t index = 0; index < sw.case_count; ++index) {
831 size_t value = sw.case_values[index] - sw.min_value;
832 BasicBlock* branch = sw.case_branches[index];
833 DCHECK_LE(0u, value);
834 DCHECK_LT(value + 2, input_count);
835 inputs[value + 2] = g.Label(branch);
837 Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
841 void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
842 InstructionOperand& value_operand) {
843 OperandGenerator g(this);
844 size_t input_count = 2 + sw.case_count * 2;
845 auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
846 inputs[0] = value_operand;
847 inputs[1] = g.Label(sw.default_branch);
848 for (size_t index = 0; index < sw.case_count; ++index) {
849 int32_t value = sw.case_values[index];
850 BasicBlock* branch = sw.case_branches[index];
851 inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
852 inputs[index * 2 + 2 + 1] = g.Label(branch);
854 Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
858 #endif // V8_TURBOFAN_BACKEND
860 // 32 bit targets do not implement the following instructions.
861 #if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
863 void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
866 void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
869 void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
872 void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
875 void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
878 void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
881 void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
884 void InstructionSelector::VisitWord64Equal(Node* node) { UNIMPLEMENTED(); }
887 void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
890 void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
893 void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
896 void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
899 void InstructionSelector::VisitInt64LessThan(Node* node) { UNIMPLEMENTED(); }
902 void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
907 void InstructionSelector::VisitUint64Div(Node* node) { UNIMPLEMENTED(); }
910 void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
913 void InstructionSelector::VisitUint64LessThan(Node* node) { UNIMPLEMENTED(); }
916 void InstructionSelector::VisitUint64Mod(Node* node) { UNIMPLEMENTED(); }
919 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
924 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
929 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
933 #endif // V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X64 && V8_TURBOFAN_BACKEND
936 void InstructionSelector::VisitFinish(Node* node) {
937 OperandGenerator g(this);
938 Node* value = node->InputAt(0);
939 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
943 void InstructionSelector::VisitParameter(Node* node) {
944 OperandGenerator g(this);
945 int index = OpParameter<int>(node);
947 g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
948 linkage()->GetParameterType(index)));
952 void InstructionSelector::VisitOsrValue(Node* node) {
953 OperandGenerator g(this);
954 int index = OpParameter<int>(node);
955 Emit(kArchNop, g.DefineAsLocation(node, linkage()->GetOsrValueLocation(index),
960 void InstructionSelector::VisitPhi(Node* node) {
961 const int input_count = node->op()->ValueInputCount();
962 PhiInstruction* phi = new (instruction_zone())
963 PhiInstruction(instruction_zone(), GetVirtualRegister(node),
964 static_cast<size_t>(input_count));
966 ->InstructionBlockAt(RpoNumber::FromInt(current_block_->rpo_number()))
968 for (int i = 0; i < input_count; ++i) {
969 Node* const input = node->InputAt(i);
971 phi->SetInput(static_cast<size_t>(i), GetVirtualRegister(input));
976 void InstructionSelector::VisitProjection(Node* node) {
977 OperandGenerator g(this);
978 Node* value = node->InputAt(0);
979 switch (value->opcode()) {
980 case IrOpcode::kInt32AddWithOverflow:
981 case IrOpcode::kInt32SubWithOverflow:
982 if (ProjectionIndexOf(node->op()) == 0u) {
983 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
985 DCHECK(ProjectionIndexOf(node->op()) == 1u);
995 void InstructionSelector::VisitConstant(Node* node) {
996 // We must emit a NOP here because every live range needs a defining
997 // instruction in the register allocator.
998 OperandGenerator g(this);
999 Emit(kArchNop, g.DefineAsConstant(node));
1003 void InstructionSelector::VisitGoto(BasicBlock* target) {
1004 // jump to the next block.
1005 OperandGenerator g(this);
1006 Emit(kArchJmp, g.NoOutput(), g.Label(target));
1010 void InstructionSelector::VisitReturn(Node* value) {
1011 OperandGenerator g(this);
1012 if (value != NULL) {
1013 Emit(kArchRet, g.NoOutput(),
1014 g.UseLocation(value, linkage()->GetReturnLocation(),
1015 linkage()->GetReturnType()));
1017 Emit(kArchRet, g.NoOutput());
1022 void InstructionSelector::VisitDeoptimize(Node* value) {
1023 DCHECK(FLAG_turbo_deoptimization);
1025 OperandGenerator g(this);
1027 FrameStateDescriptor* desc = GetFrameStateDescriptor(value);
1028 size_t arg_count = desc->GetTotalSize() + 1; // Include deopt id.
1030 InstructionOperandVector args(instruction_zone());
1031 args.reserve(arg_count);
1033 InstructionSequence::StateId state_id =
1034 sequence()->AddFrameStateDescriptor(desc);
1035 args.push_back(g.TempImmediate(state_id.ToInt()));
1037 AddFrameStateInputs(value, &args, desc);
1039 DCHECK_EQ(args.size(), arg_count);
1041 Emit(kArchDeoptimize, 0, nullptr, arg_count, &args.front(), 0, nullptr);
1045 void InstructionSelector::VisitThrow(Node* value) {
1046 OperandGenerator g(this);
1047 Emit(kArchNop, g.NoOutput()); // TODO(titzer)
1051 FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
1053 DCHECK(state->opcode() == IrOpcode::kFrameState);
1054 DCHECK_EQ(5, state->InputCount());
1055 DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(0)->opcode());
1056 DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(1)->opcode());
1057 DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(2)->opcode());
1058 FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
1061 static_cast<int>(StateValuesAccess(state->InputAt(0)).size());
1062 int locals = static_cast<int>(StateValuesAccess(state->InputAt(1)).size());
1063 int stack = static_cast<int>(StateValuesAccess(state->InputAt(2)).size());
1065 FrameStateDescriptor* outer_state = NULL;
1066 Node* outer_node = state->InputAt(4);
1067 if (outer_node->opcode() == IrOpcode::kFrameState) {
1068 outer_state = GetFrameStateDescriptor(outer_node);
1071 return new (instruction_zone()) FrameStateDescriptor(
1072 instruction_zone(), state_info, parameters, locals, stack, outer_state);
1076 static InstructionOperand SlotOrImmediate(OperandGenerator* g, Node* input) {
1077 switch (input->opcode()) {
1078 case IrOpcode::kInt32Constant:
1079 case IrOpcode::kNumberConstant:
1080 case IrOpcode::kFloat64Constant:
1081 case IrOpcode::kHeapConstant:
1082 return g->UseImmediate(input);
1084 return g->UseUniqueSlot(input);
1089 void InstructionSelector::AddFrameStateInputs(
1090 Node* state, InstructionOperandVector* inputs,
1091 FrameStateDescriptor* descriptor) {
1092 DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
1094 if (descriptor->outer_state() != NULL) {
1095 AddFrameStateInputs(state->InputAt(4), inputs, descriptor->outer_state());
1098 Node* parameters = state->InputAt(0);
1099 Node* locals = state->InputAt(1);
1100 Node* stack = state->InputAt(2);
1101 Node* context = state->InputAt(3);
1103 DCHECK_EQ(IrOpcode::kTypedStateValues, parameters->op()->opcode());
1104 DCHECK_EQ(IrOpcode::kTypedStateValues, locals->op()->opcode());
1105 DCHECK_EQ(IrOpcode::kTypedStateValues, stack->op()->opcode());
1107 DCHECK_EQ(descriptor->parameters_count(),
1108 StateValuesAccess(parameters).size());
1109 DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
1110 DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
1112 ZoneVector<MachineType> types(instruction_zone());
1113 types.reserve(descriptor->GetSize());
1115 OperandGenerator g(this);
1116 size_t value_index = 0;
1117 for (StateValuesAccess::TypedNode input_node :
1118 StateValuesAccess(parameters)) {
1119 inputs->push_back(SlotOrImmediate(&g, input_node.node));
1120 descriptor->SetType(value_index++, input_node.type);
1122 if (descriptor->HasContext()) {
1123 inputs->push_back(SlotOrImmediate(&g, context));
1124 descriptor->SetType(value_index++, kMachAnyTagged);
1126 for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
1127 inputs->push_back(SlotOrImmediate(&g, input_node.node));
1128 descriptor->SetType(value_index++, input_node.type);
1130 for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
1131 inputs->push_back(SlotOrImmediate(&g, input_node.node));
1132 descriptor->SetType(value_index++, input_node.type);
1134 DCHECK(value_index == descriptor->GetSize());
1138 #if !V8_TURBOFAN_BACKEND
1140 #define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
1141 void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
1142 MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
1143 #undef DECLARE_UNIMPLEMENTED_SELECTOR
1146 void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
1151 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
1152 BasicBlock* fbranch) {
1157 void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
1163 MachineOperatorBuilder::Flags
1164 InstructionSelector::SupportedMachineOperatorFlags() {
1165 return MachineOperatorBuilder::Flag::kNoFlags;
1168 #endif // !V8_TURBOFAN_BACKEND
1170 } // namespace compiler
1171 } // namespace internal