1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
6 #define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
8 #include "arm64/lithium-arm64.h"
10 #include "arm64/lithium-gap-resolver-arm64.h"
11 #include "deoptimizer.h"
12 #include "lithium-codegen.h"
13 #include "safepoint-table.h"
20 // Forward declarations.
22 class SafepointGenerator;
23 class BranchGenerator;
25 class LCodeGen: public LCodeGenBase {
27 LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
28 : LCodeGenBase(chunk, assembler, info),
29 deoptimizations_(4, info->zone()),
30 deopt_jump_table_(4, info->zone()),
31 deoptimization_literals_(8, info->zone()),
32 inlined_function_count_(0),
33 scope_(info->scope()),
34 translations_(info->zone()),
35 deferred_(8, info->zone()),
37 frame_is_built_(false),
38 safepoints_(info->zone()),
40 expected_safepoint_kind_(Safepoint::kSimple) {
41 PopulateDeoptimizationLiteralsWithInlinedFunctions();
45 Scope* scope() const { return scope_; }
47 int LookupDestination(int block_id) const {
48 return chunk()->LookupDestination(block_id);
51 bool IsNextEmittedBlock(int block_id) const {
52 return LookupDestination(block_id) == GetNextEmittedBlock();
55 bool NeedsEagerFrame() const {
56 return GetStackSlotCount() > 0 ||
57 info()->is_non_deferred_calling() ||
59 info()->requires_frame();
61 bool NeedsDeferredFrame() const {
62 return !NeedsEagerFrame() && info()->is_deferred_calling();
65 LinkRegisterStatus GetLinkRegisterState() const {
66 return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
69 // Try to generate code for the entire chunk, but it may fail if the
70 // chunk contains constructs we cannot handle. Returns true if the
71 // code generation attempt succeeded.
74 // Finish the code by setting stack height, safepoint, and bailout
76 void FinishCode(Handle<Code> code);
78 // Support for converting LOperands to assembler types.
79 // LOperand must be a register.
80 Register ToRegister(LOperand* op) const;
81 Register ToRegister32(LOperand* op) const;
82 Operand ToOperand(LOperand* op);
83 Operand ToOperand32I(LOperand* op);
84 Operand ToOperand32U(LOperand* op);
85 MemOperand ToMemOperand(LOperand* op) const;
86 Handle<Object> ToHandle(LConstantOperand* op) const;
88 // TODO(jbramley): Examine these helpers and check that they make sense.
89 // IsInteger32Constant returns true for smi constants, for example.
90 bool IsInteger32Constant(LConstantOperand* op) const;
91 bool IsSmi(LConstantOperand* op) const;
93 int32_t ToInteger32(LConstantOperand* op) const;
94 Smi* ToSmi(LConstantOperand* op) const;
95 double ToDouble(LConstantOperand* op) const;
96 DoubleRegister ToDoubleRegister(LOperand* op) const;
98 // Declare methods that deal with the individual node types.
99 #define DECLARE_DO(type) void Do##type(L##type* node);
100 LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
104 // Return a double scratch register which can be used locally
105 // when generating code for a lithium instruction.
106 DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
108 // Deferred code support.
109 void DoDeferredNumberTagD(LNumberTagD* instr);
110 void DoDeferredStackCheck(LStackCheck* instr);
111 void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
112 void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
113 void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
115 Label* allocation_entry);
117 enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
118 void DoDeferredNumberTagU(LInstruction* instr,
122 void DoDeferredTaggedToI(LTaggedToI* instr,
126 void DoDeferredAllocate(LAllocate* instr);
127 void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
128 void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
129 void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
134 Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
136 static Condition TokenToCondition(Token::Value op, bool is_unsigned);
137 void EmitGoto(int block);
138 void DoGap(LGap* instr);
140 // Generic version of EmitBranch. It contains some code to avoid emitting a
141 // branch on the next emitted basic block where we could just fall-through.
142 // You shouldn't use that directly but rather consider one of the helper like
143 // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
144 template<class InstrType>
145 void EmitBranchGeneric(InstrType instr,
146 const BranchGenerator& branch);
148 template<class InstrType>
149 void EmitBranch(InstrType instr, Condition condition);
151 template<class InstrType>
152 void EmitCompareAndBranch(InstrType instr,
157 template<class InstrType>
158 void EmitTestAndBranch(InstrType instr,
160 const Register& value,
163 template<class InstrType>
164 void EmitBranchIfNonZeroNumber(InstrType instr,
165 const FPRegister& value,
166 const FPRegister& scratch);
168 template<class InstrType>
169 void EmitBranchIfHeapNumber(InstrType instr,
170 const Register& value);
172 template<class InstrType>
173 void EmitBranchIfRoot(InstrType instr,
174 const Register& value,
175 Heap::RootListIndex index);
177 // Emits optimized code to deep-copy the contents of statically known object
178 // graphs (e.g. object literal boilerplate). Expects a pointer to the
179 // allocated destination object in the result register, and a pointer to the
180 // source object in the source register.
181 void EmitDeepCopy(Handle<JSObject> object,
186 AllocationSiteMode mode);
188 // Emits optimized code for %_IsString(x). Preserves input register.
189 // Returns the condition on which a final split to
190 // true and false label should be made, to optimize fallthrough.
191 Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
192 SmiCheck check_needed);
194 int DefineDeoptimizationLiteral(Handle<Object> literal);
195 void PopulateDeoptimizationData(Handle<Code> code);
196 void PopulateDeoptimizationLiteralsWithInlinedFunctions();
198 MemOperand BuildSeqStringOperand(Register string,
201 String::Encoding encoding);
202 void DeoptimizeBranch(
203 LEnvironment* environment,
204 BranchType branch_type, Register reg = NoReg, int bit = -1,
205 Deoptimizer::BailoutType* override_bailout_type = NULL);
206 void Deoptimize(LEnvironment* environment,
207 Deoptimizer::BailoutType* override_bailout_type = NULL);
208 void DeoptimizeIf(Condition cond, LEnvironment* environment);
209 void DeoptimizeIfZero(Register rt, LEnvironment* environment);
210 void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
211 void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
212 void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
213 void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
214 void DeoptimizeIfRoot(Register rt,
215 Heap::RootListIndex index,
216 LEnvironment* environment);
217 void DeoptimizeIfNotRoot(Register rt,
218 Heap::RootListIndex index,
219 LEnvironment* environment);
220 void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
221 void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
222 void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
224 MemOperand PrepareKeyedExternalArrayOperand(Register key,
228 bool key_is_constant,
230 ElementsKind elements_kind,
231 int additional_index);
232 void CalcKeyedArrayBaseRegister(Register base,
236 ElementsKind elements_kind);
238 void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
239 Safepoint::DeoptMode mode);
241 int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
243 void Abort(BailoutReason reason);
245 void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
247 // Emit frame translation commands for an environment.
248 void WriteTranslation(LEnvironment* environment, Translation* translation);
250 void AddToTranslation(LEnvironment* environment,
251 Translation* translation,
255 int* object_index_pointer,
256 int* dematerialized_index_pointer);
258 void SaveCallerDoubles();
259 void RestoreCallerDoubles();
261 // Code generation steps. Returns true if code generation should continue.
262 void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
263 bool GeneratePrologue();
264 bool GenerateDeferredCode();
265 bool GenerateDeoptJumpTable();
266 bool GenerateSafepointTable();
268 // Generates the custom OSR entrypoint and sets the osr_pc_offset.
269 void GenerateOsrPrologue();
272 RECORD_SIMPLE_SAFEPOINT,
273 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
276 void CallCode(Handle<Code> code,
277 RelocInfo::Mode mode,
278 LInstruction* instr);
280 void CallCodeGeneric(Handle<Code> code,
281 RelocInfo::Mode mode,
283 SafepointMode safepoint_mode);
285 void CallRuntime(const Runtime::Function* function,
288 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
290 void CallRuntime(Runtime::FunctionId id,
292 LInstruction* instr) {
293 const Runtime::Function* function = Runtime::FunctionForId(id);
294 CallRuntime(function, num_arguments, instr);
297 void LoadContextFromDeferred(LOperand* context);
298 void CallRuntimeFromDeferred(Runtime::FunctionId id,
303 // Generate a direct call to a known function.
304 // If the function is already loaded into x1 by the caller, function_reg may
305 // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
306 // automatically load it.
307 void CallKnownFunction(Handle<JSFunction> function,
308 int formal_parameter_count,
311 Register function_reg = NoReg);
313 // Support for recording safepoint and position information.
314 void RecordAndWritePosition(int position) V8_OVERRIDE;
315 void RecordSafepoint(LPointerMap* pointers,
316 Safepoint::Kind kind,
318 Safepoint::DeoptMode mode);
319 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
320 void RecordSafepoint(Safepoint::DeoptMode mode);
321 void RecordSafepointWithRegisters(LPointerMap* pointers,
323 Safepoint::DeoptMode mode);
324 void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
326 Safepoint::DeoptMode mode);
327 void RecordSafepointWithLazyDeopt(LInstruction* instr,
328 SafepointMode safepoint_mode);
330 void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
332 ZoneList<LEnvironment*> deoptimizations_;
333 ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
334 ZoneList<Handle<Object> > deoptimization_literals_;
335 int inlined_function_count_;
337 TranslationBuffer translations_;
338 ZoneList<LDeferredCode*> deferred_;
340 bool frame_is_built_;
342 // Builder that keeps track of safepoints in the code. The table itself is
343 // emitted at the end of the generated code.
344 SafepointTableBuilder safepoints_;
346 // Compiler from a set of parallel moves to a sequential list of moves.
347 LGapResolver resolver_;
349 Safepoint::Kind expected_safepoint_kind_;
353 class PushSafepointRegistersScope BASE_EMBEDDED {
355 PushSafepointRegistersScope(LCodeGen* codegen,
356 Safepoint::Kind kind)
357 : codegen_(codegen) {
358 ASSERT(codegen_->info()->is_calling());
359 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
360 codegen_->expected_safepoint_kind_ = kind;
362 UseScratchRegisterScope temps(codegen_->masm_);
363 // Preserve the value of lr which must be saved on the stack (the call to
364 // the stub will clobber it).
365 Register to_be_pushed_lr =
366 temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
367 codegen_->masm_->Mov(to_be_pushed_lr, lr);
368 switch (codegen_->expected_safepoint_kind_) {
369 case Safepoint::kWithRegisters: {
370 StoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
371 codegen_->masm_->CallStub(&stub);
374 case Safepoint::kWithRegistersAndDoubles: {
375 StoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
376 codegen_->masm_->CallStub(&stub);
384 ~PushSafepointRegistersScope() {
385 Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
386 ASSERT((kind & Safepoint::kWithRegisters) != 0);
388 case Safepoint::kWithRegisters: {
389 RestoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
390 codegen_->masm_->CallStub(&stub);
393 case Safepoint::kWithRegistersAndDoubles: {
394 RestoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
395 codegen_->masm_->CallStub(&stub);
401 codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
408 friend class LDeferredCode;
409 friend class SafepointGenerator;
410 DISALLOW_COPY_AND_ASSIGN(LCodeGen);
414 class LDeferredCode: public ZoneObject {
416 explicit LDeferredCode(LCodeGen* codegen)
418 external_exit_(NULL),
419 instruction_index_(codegen->current_instruction_) {
420 codegen->AddDeferredCode(this);
423 virtual ~LDeferredCode() { }
424 virtual void Generate() = 0;
425 virtual LInstruction* instr() = 0;
427 void SetExit(Label* exit) { external_exit_ = exit; }
428 Label* entry() { return &entry_; }
429 Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
430 int instruction_index() const { return instruction_index_; }
433 LCodeGen* codegen() const { return codegen_; }
434 MacroAssembler* masm() const { return codegen_->masm(); }
440 Label* external_exit_;
441 int instruction_index_;
445 // This is the abstract class used by EmitBranchGeneric.
446 // It is used to emit code for conditional branching. The Emit() function
447 // emits code to branch when the condition holds and EmitInverted() emits
448 // the branch when the inverted condition is verified.
450 // For actual examples of condition see the concrete implementation in
451 // lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
452 class BranchGenerator BASE_EMBEDDED {
454 explicit BranchGenerator(LCodeGen* codegen)
455 : codegen_(codegen) { }
457 virtual ~BranchGenerator() { }
459 virtual void Emit(Label* label) const = 0;
460 virtual void EmitInverted(Label* label) const = 0;
463 MacroAssembler* masm() const { return codegen_->masm(); }
468 } } // namespace v8::internal
470 #endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_