1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/compiler/code-generator.h"
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/linkage.h"
9 #include "src/compiler/pipeline.h"
15 CodeGenerator::CodeGenerator(InstructionSequence* code)
18 current_source_position_(SourcePosition::Invalid()),
19 masm_(code->zone()->isolate(), NULL, 0),
21 safepoints_(code->zone()),
22 lazy_deoptimization_entries_(
23 LazyDeoptimizationEntries::allocator_type(code->zone())),
24 deoptimization_states_(
25 DeoptimizationStates::allocator_type(code->zone())),
26 deoptimization_literals_(Literals::allocator_type(code->zone())),
27 translations_(code->zone()) {
28 deoptimization_states_.resize(code->GetDeoptimizationEntryCount(), NULL);
32 Handle<Code> CodeGenerator::GenerateCode() {
33 CompilationInfo* info = linkage()->info();
35 // Emit a code line info recording start event.
36 PositionsRecorder* recorder = masm()->positions_recorder();
37 LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
39 // Place function entry hook if requested to do so.
40 if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
41 ProfileEntryHookStub::MaybeCallEntryHook(masm());
44 // Architecture-specific, linkage-specific prologue.
45 info->set_prologue_offset(masm()->pc_offset());
48 // Assemble all instructions.
49 for (InstructionSequence::const_iterator i = code()->begin();
50 i != code()->end(); ++i) {
51 AssembleInstruction(*i);
56 safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
58 // TODO(titzer): what are the right code flags here?
59 Code::Kind kind = Code::STUB;
60 if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
61 kind = Code::OPTIMIZED_FUNCTION;
63 Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
64 masm(), Code::ComputeFlags(kind), info);
65 result->set_is_turbofanned(true);
66 result->set_stack_slots(frame()->GetSpillSlotCount());
67 result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
69 PopulateDeoptimizationData(result);
71 // Emit a code line info recording stop event.
72 void* line_info = recorder->DetachJITHandlerData();
73 LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
79 void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
81 Safepoint::DeoptMode deopt_mode) {
82 const ZoneList<InstructionOperand*>* operands =
83 pointers->GetNormalizedOperands();
85 safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
86 for (int i = 0; i < operands->length(); i++) {
87 InstructionOperand* pointer = operands->at(i);
88 if (pointer->IsStackSlot()) {
89 safepoint.DefinePointerSlot(pointer->index(), zone());
90 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
91 Register reg = Register::FromAllocationIndex(pointer->index());
92 safepoint.DefinePointerRegister(reg, zone());
98 void CodeGenerator::AssembleInstruction(Instruction* instr) {
99 if (instr->IsBlockStart()) {
100 // Bind a label for a block start and handle parallel moves.
101 BlockStartInstruction* block_start = BlockStartInstruction::cast(instr);
102 current_block_ = block_start->block();
103 if (FLAG_code_comments) {
104 // TODO(titzer): these code comments are a giant memory leak.
105 Vector<char> buffer = Vector<char>::New(32);
106 SNPrintF(buffer, "-- B%d start --", block_start->block()->id());
107 masm()->RecordComment(buffer.start());
109 masm()->bind(block_start->label());
111 if (instr->IsGapMoves()) {
112 // Handle parallel moves associated with the gap instruction.
113 AssembleGap(GapInstruction::cast(instr));
114 } else if (instr->IsSourcePosition()) {
115 AssembleSourcePosition(SourcePositionInstruction::cast(instr));
117 // Assemble architecture-specific code for the instruction.
118 AssembleArchInstruction(instr);
120 // Assemble branches or boolean materializations after this instruction.
121 FlagsMode mode = FlagsModeField::decode(instr->opcode());
122 FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
127 return AssembleArchBoolean(instr, condition);
129 return AssembleArchBranch(instr, condition);
136 void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) {
137 SourcePosition source_position = instr->source_position();
138 if (source_position == current_source_position_) return;
139 DCHECK(!source_position.IsInvalid());
140 if (!source_position.IsUnknown()) {
141 int code_pos = source_position.raw();
142 masm()->positions_recorder()->RecordPosition(source_position.raw());
143 masm()->positions_recorder()->WriteRecordedPositions();
144 if (FLAG_code_comments) {
145 Vector<char> buffer = Vector<char>::New(256);
146 CompilationInfo* info = linkage()->info();
147 int ln = Script::GetLineNumber(info->script(), code_pos);
148 int cn = Script::GetColumnNumber(info->script(), code_pos);
149 if (info->script()->name()->IsString()) {
150 Handle<String> file(String::cast(info->script()->name()));
151 base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --",
152 file->ToCString().get(), ln, cn);
154 base::OS::SNPrintF(buffer.start(), buffer.length(),
155 "-- <unknown>:%d:%d --", ln, cn);
157 masm()->RecordComment(buffer.start());
160 current_source_position_ = source_position;
164 void CodeGenerator::AssembleGap(GapInstruction* instr) {
165 for (int i = GapInstruction::FIRST_INNER_POSITION;
166 i <= GapInstruction::LAST_INNER_POSITION; i++) {
167 GapInstruction::InnerPosition inner_pos =
168 static_cast<GapInstruction::InnerPosition>(i);
169 ParallelMove* move = instr->GetParallelMove(inner_pos);
170 if (move != NULL) resolver()->Resolve(move);
175 void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
176 CompilationInfo* info = linkage()->info();
177 int deopt_count = code()->GetDeoptimizationEntryCount();
178 int patch_count = static_cast<int>(lazy_deoptimization_entries_.size());
179 if (patch_count == 0 && deopt_count == 0) return;
180 Handle<DeoptimizationInputData> data = DeoptimizationInputData::New(
181 isolate(), deopt_count, patch_count, TENURED);
183 Handle<ByteArray> translation_array =
184 translations_.CreateByteArray(isolate()->factory());
186 data->SetTranslationByteArray(*translation_array);
187 data->SetInlinedFunctionCount(Smi::FromInt(0));
188 data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
189 // TODO(jarin) The following code was copied over from Lithium, not sure
190 // whether the scope or the IsOptimizing condition are really needed.
191 if (info->IsOptimizing()) {
192 // Reference to shared function info does not change between phases.
193 AllowDeferredHandleDereference allow_handle_dereference;
194 data->SetSharedFunctionInfo(*info->shared_info());
196 data->SetSharedFunctionInfo(Smi::FromInt(0));
199 Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
200 static_cast<int>(deoptimization_literals_.size()), TENURED);
202 AllowDeferredHandleDereference copy_handles;
203 for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
204 literals->set(i, *deoptimization_literals_[i]);
206 data->SetLiteralArray(*literals);
209 // No OSR in Turbofan yet...
210 BailoutId osr_ast_id = BailoutId::None();
211 data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
212 data->SetOsrPcOffset(Smi::FromInt(-1));
214 // Populate deoptimization entries.
215 for (int i = 0; i < deopt_count; i++) {
216 FrameStateDescriptor* descriptor = code()->GetDeoptimizationEntry(i);
217 data->SetAstId(i, descriptor->bailout_id());
218 CHECK_NE(NULL, deoptimization_states_[i]);
219 data->SetTranslationIndex(
220 i, Smi::FromInt(deoptimization_states_[i]->translation_id_));
221 data->SetArgumentsStackHeight(i, Smi::FromInt(0));
222 data->SetPc(i, Smi::FromInt(-1));
225 // Populate the return address patcher entries.
226 for (int i = 0; i < patch_count; ++i) {
227 LazyDeoptimizationEntry entry = lazy_deoptimization_entries_[i];
228 DCHECK(entry.position_after_call() == entry.continuation()->pos() ||
229 IsNopForSmiCodeInlining(code_object, entry.position_after_call(),
230 entry.continuation()->pos()));
231 data->SetReturnAddressPc(i, Smi::FromInt(entry.position_after_call()));
232 data->SetPatchedAddressPc(i, Smi::FromInt(entry.deoptimization()->pos()));
235 code_object->set_deoptimization_data(*data);
239 void CodeGenerator::RecordLazyDeoptimizationEntry(Instruction* instr) {
240 InstructionOperandConverter i(this, instr);
243 masm()->bind(&after_call);
245 // The continuation and deoptimization are the last two inputs:
246 BasicBlock* cont_block =
247 i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
248 BasicBlock* deopt_block =
249 i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
251 Label* cont_label = code_->GetLabel(cont_block);
252 Label* deopt_label = code_->GetLabel(deopt_block);
254 lazy_deoptimization_entries_.push_back(
255 LazyDeoptimizationEntry(after_call.pos(), cont_label, deopt_label));
259 int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
260 int result = static_cast<int>(deoptimization_literals_.size());
261 for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
262 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
264 deoptimization_literals_.push_back(literal);
269 void CodeGenerator::BuildTranslation(Instruction* instr,
270 int deoptimization_id) {
271 // We should build translation only once.
272 DCHECK_EQ(NULL, deoptimization_states_[deoptimization_id]);
274 FrameStateDescriptor* descriptor =
275 code()->GetDeoptimizationEntry(deoptimization_id);
276 Translation translation(&translations_, 1, 1, zone());
277 translation.BeginJSFrame(descriptor->bailout_id(),
278 Translation::kSelfLiteralId,
279 descriptor->size() - descriptor->parameters_count());
281 for (int i = 0; i < descriptor->size(); i++) {
282 AddTranslationForOperand(&translation, instr, instr->InputAt(i));
285 deoptimization_states_[deoptimization_id] =
286 new (zone()) DeoptimizationState(translation.index());
290 void CodeGenerator::AddTranslationForOperand(Translation* translation,
292 InstructionOperand* op) {
293 if (op->IsStackSlot()) {
294 translation->StoreStackSlot(op->index());
295 } else if (op->IsDoubleStackSlot()) {
296 translation->StoreDoubleStackSlot(op->index());
297 } else if (op->IsRegister()) {
298 InstructionOperandConverter converter(this, instr);
299 translation->StoreRegister(converter.ToRegister(op));
300 } else if (op->IsDoubleRegister()) {
301 InstructionOperandConverter converter(this, instr);
302 translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
303 } else if (op->IsImmediate()) {
304 InstructionOperandConverter converter(this, instr);
305 Constant constant = converter.ToConstant(op);
306 Handle<Object> constant_object;
307 switch (constant.type()) {
308 case Constant::kInt32:
310 isolate()->factory()->NewNumberFromInt(constant.ToInt32());
312 case Constant::kFloat64:
314 isolate()->factory()->NewHeapNumber(constant.ToFloat64());
316 case Constant::kHeapObject:
317 constant_object = constant.ToHeapObject();
322 int literal_id = DefineDeoptimizationLiteral(constant_object);
323 translation->StoreLiteral(literal_id);
329 #if !V8_TURBOFAN_BACKEND
331 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
336 void CodeGenerator::AssembleArchBranch(Instruction* instr,
337 FlagsCondition condition) {
342 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
343 FlagsCondition condition) {
348 void CodeGenerator::AssemblePrologue() { UNIMPLEMENTED(); }
351 void CodeGenerator::AssembleReturn() { UNIMPLEMENTED(); }
354 void CodeGenerator::AssembleMove(InstructionOperand* source,
355 InstructionOperand* destination) {
360 void CodeGenerator::AssembleSwap(InstructionOperand* source,
361 InstructionOperand* destination) {
366 void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
370 bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
377 #endif // !V8_TURBOFAN_BACKEND
379 } // namespace compiler
380 } // namespace internal