1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
6 #define V8_COMPILER_INSTRUCTION_SELECTOR_H_
10 #include "src/compiler/common-operator.h"
11 #include "src/compiler/instruction.h"
12 #include "src/compiler/machine-operator.h"
13 #include "src/zone-containers.h"
19 // Forward declarations.
20 struct CallBuffer; // TODO(bmeurer): Remove this.
21 class FlagsContinuation;
23 class InstructionSelector FINAL {
25 // Forward declarations.
28 InstructionSelector(InstructionSequence* sequence,
29 SourcePositionTable* source_positions,
30 Features features = SupportedFeatures());
32 // Visit code for the entire graph with the included schedule.
33 void SelectInstructions();
35 // ===========================================================================
36 // ============= Architecture-independent code emission methods. =============
37 // ===========================================================================
39 Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
40 size_t temp_count = 0, InstructionOperand* *temps = NULL);
41 Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
42 InstructionOperand* a, size_t temp_count = 0,
43 InstructionOperand* *temps = NULL);
44 Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
45 InstructionOperand* a, InstructionOperand* b,
46 size_t temp_count = 0, InstructionOperand* *temps = NULL);
47 Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
48 InstructionOperand* a, InstructionOperand* b,
49 InstructionOperand* c, size_t temp_count = 0,
50 InstructionOperand* *temps = NULL);
51 Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
52 InstructionOperand* a, InstructionOperand* b,
53 InstructionOperand* c, InstructionOperand* d,
54 size_t temp_count = 0, InstructionOperand* *temps = NULL);
55 Instruction* Emit(InstructionCode opcode, size_t output_count,
56 InstructionOperand** outputs, size_t input_count,
57 InstructionOperand** inputs, size_t temp_count = 0,
58 InstructionOperand* *temps = NULL);
59 Instruction* Emit(Instruction* instr);
61 // ===========================================================================
62 // ============== Architecture-independent CPU feature methods. ==============
63 // ===========================================================================
65 class Features FINAL {
67 Features() : bits_(0) {}
68 explicit Features(unsigned bits) : bits_(bits) {}
69 explicit Features(CpuFeature f) : bits_(1u << f) {}
70 Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
72 bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
78 bool IsSupported(CpuFeature feature) const {
79 return features_.Contains(feature);
82 // Returns the features supported on the target platform.
83 static Features SupportedFeatures() {
84 return Features(CpuFeatures::SupportedFeatures());
88 friend class OperandGenerator;
90 // ===========================================================================
91 // ============ Architecture-independent graph covering methods. =============
92 // ===========================================================================
94 // Checks if {block} will appear directly after {current_block_} when
95 // assembling code, in which case, a fall-through can be used.
96 bool IsNextInAssemblyOrder(const BasicBlock* block) const;
98 // Used in pattern matching during code generation.
99 // Check if {node} can be covered while generating code for the current
100 // instruction. A node can be covered if the {user} of the node has the only
101 // edge and the two are in the same basic block.
102 bool CanCover(Node* user, Node* node) const;
104 // Checks if {node} was already defined, and therefore code was already
106 bool IsDefined(Node* node) const;
108 // Inform the instruction selection that {node} was just defined.
109 void MarkAsDefined(Node* node);
111 // Checks if {node} has any uses, and therefore code has to be generated for
113 bool IsUsed(Node* node) const;
115 // Inform the instruction selection that {node} has at least one use and we
116 // will need to generate code for it.
117 void MarkAsUsed(Node* node);
119 // Checks if {node} is marked as double.
120 bool IsDouble(const Node* node) const;
122 // Inform the register allocator of a double result.
123 void MarkAsDouble(Node* node);
125 // Checks if {node} is marked as reference.
126 bool IsReference(const Node* node) const;
128 // Inform the register allocator of a reference result.
129 void MarkAsReference(Node* node);
131 // Inform the register allocation of the representation of the value produced
133 void MarkAsRepresentation(MachineType rep, Node* node);
135 // Initialize the call buffer with the InstructionOperands, nodes, etc,
137 // to the inputs and outputs of the call.
138 // {call_code_immediate} to generate immediate operands to calls of code.
139 // {call_address_immediate} to generate immediate operands to address calls.
140 void InitializeCallBuffer(Node* call, CallBuffer* buffer,
141 bool call_code_immediate,
142 bool call_address_immediate);
144 FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
145 void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
146 FrameStateDescriptor* descriptor);
148 // ===========================================================================
149 // ============= Architecture-specific graph covering methods. ===============
150 // ===========================================================================
152 // Visit nodes in the given block and generate code.
153 void VisitBlock(BasicBlock* block);
155 // Visit the node for the control flow at the end of the block, generating
156 // code if necessary.
157 void VisitControl(BasicBlock* block);
159 // Visit the node and generate code, if any.
160 void VisitNode(Node* node);
162 #define DECLARE_GENERATOR(x) void Visit##x(Node* node);
163 MACHINE_OP_LIST(DECLARE_GENERATOR)
164 #undef DECLARE_GENERATOR
166 void VisitInt32AddWithOverflow(Node* node, FlagsContinuation* cont);
167 void VisitInt32SubWithOverflow(Node* node, FlagsContinuation* cont);
169 void VisitWord32Test(Node* node, FlagsContinuation* cont);
170 void VisitWord64Test(Node* node, FlagsContinuation* cont);
171 void VisitWord32Compare(Node* node, FlagsContinuation* cont);
172 void VisitWord64Compare(Node* node, FlagsContinuation* cont);
173 void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
175 void VisitFinish(Node* node);
176 void VisitParameter(Node* node);
177 void VisitPhi(Node* node);
178 void VisitProjection(Node* node);
179 void VisitConstant(Node* node);
180 void VisitCall(Node* call, BasicBlock* continuation,
181 BasicBlock* deoptimization);
182 void VisitGoto(BasicBlock* target);
183 void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
184 void VisitReturn(Node* value);
185 void VisitThrow(Node* value);
186 void VisitDeoptimize(Node* deopt);
188 // ===========================================================================
190 Graph* graph() const { return sequence()->graph(); }
191 Linkage* linkage() const { return sequence()->linkage(); }
192 Schedule* schedule() const { return sequence()->schedule(); }
193 InstructionSequence* sequence() const { return sequence_; }
194 Zone* instruction_zone() const { return sequence()->zone(); }
195 Zone* zone() { return &zone_; }
197 // ===========================================================================
200 InstructionSequence* sequence_;
201 SourcePositionTable* source_positions_;
203 BasicBlock* current_block_;
204 ZoneDeque<Instruction*> instructions_;
209 } // namespace compiler
210 } // namespace internal
213 #endif // V8_COMPILER_INSTRUCTION_SELECTOR_H_