1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
6 #define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
8 #include "src/arm64/lithium-arm64.h"
10 #include "src/arm64/lithium-gap-resolver-arm64.h"
11 #include "src/deoptimizer.h"
12 #include "src/lithium-codegen.h"
13 #include "src/safepoint-table.h"
14 #include "src/scopes.h"
15 #include "src/utils.h"
20 // Forward declarations.
22 class SafepointGenerator;
23 class BranchGenerator;
25 class LCodeGen: public LCodeGenBase {
27 LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
28 : LCodeGenBase(chunk, assembler, info),
29 deoptimizations_(4, info->zone()),
30 deopt_jump_table_(4, info->zone()),
31 deoptimization_literals_(8, info->zone()),
32 inlined_function_count_(0),
33 scope_(info->scope()),
34 translations_(info->zone()),
35 deferred_(8, info->zone()),
37 frame_is_built_(false),
38 safepoints_(info->zone()),
40 expected_safepoint_kind_(Safepoint::kSimple),
41 after_push_argument_(false),
42 inlined_arguments_(false) {
43 PopulateDeoptimizationLiteralsWithInlinedFunctions();
47 ASSERT(!after_push_argument_ || inlined_arguments_);
51 Scope* scope() const { return scope_; }
53 int LookupDestination(int block_id) const {
54 return chunk()->LookupDestination(block_id);
57 bool IsNextEmittedBlock(int block_id) const {
58 return LookupDestination(block_id) == GetNextEmittedBlock();
61 bool NeedsEagerFrame() const {
62 return GetStackSlotCount() > 0 ||
63 info()->is_non_deferred_calling() ||
65 info()->requires_frame();
67 bool NeedsDeferredFrame() const {
68 return !NeedsEagerFrame() && info()->is_deferred_calling();
71 LinkRegisterStatus GetLinkRegisterState() const {
72 return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
75 // Try to generate code for the entire chunk, but it may fail if the
76 // chunk contains constructs we cannot handle. Returns true if the
77 // code generation attempt succeeded.
80 // Finish the code by setting stack height, safepoint, and bailout
82 void FinishCode(Handle<Code> code);
84 enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
85 // Support for converting LOperands to assembler types.
86 // LOperand must be a register.
87 Register ToRegister(LOperand* op) const;
88 Register ToRegister32(LOperand* op) const;
89 Operand ToOperand(LOperand* op);
90 Operand ToOperand32I(LOperand* op);
91 Operand ToOperand32U(LOperand* op);
92 enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
93 MemOperand ToMemOperand(LOperand* op,
94 StackMode stack_mode = kCanUseStackPointer) const;
95 Handle<Object> ToHandle(LConstantOperand* op) const;
98 Operand ToShiftedRightOperand32I(LOperand* right,
100 return ToShiftedRightOperand32(right, shift_info, SIGNED_INT32);
103 Operand ToShiftedRightOperand32U(LOperand* right,
105 return ToShiftedRightOperand32(right, shift_info, UNSIGNED_INT32);
108 Operand ToShiftedRightOperand32(LOperand* right,
110 IntegerSignedness signedness);
112 int JSShiftAmountFromLConstant(LOperand* constant) {
113 return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
116 // TODO(jbramley): Examine these helpers and check that they make sense.
117 // IsInteger32Constant returns true for smi constants, for example.
118 bool IsInteger32Constant(LConstantOperand* op) const;
119 bool IsSmi(LConstantOperand* op) const;
121 int32_t ToInteger32(LConstantOperand* op) const;
122 Smi* ToSmi(LConstantOperand* op) const;
123 double ToDouble(LConstantOperand* op) const;
124 DoubleRegister ToDoubleRegister(LOperand* op) const;
126 // Declare methods that deal with the individual node types.
127 #define DECLARE_DO(type) void Do##type(L##type* node);
128 LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
132 // Return a double scratch register which can be used locally
133 // when generating code for a lithium instruction.
134 DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
136 // Deferred code support.
137 void DoDeferredNumberTagD(LNumberTagD* instr);
138 void DoDeferredStackCheck(LStackCheck* instr);
139 void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
140 void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
141 void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
143 Label* allocation_entry);
145 void DoDeferredNumberTagU(LInstruction* instr,
149 void DoDeferredTaggedToI(LTaggedToI* instr,
153 void DoDeferredAllocate(LAllocate* instr);
154 void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
155 void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
156 void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
161 Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
163 static Condition TokenToCondition(Token::Value op, bool is_unsigned);
164 void EmitGoto(int block);
165 void DoGap(LGap* instr);
167 // Generic version of EmitBranch. It contains some code to avoid emitting a
168 // branch on the next emitted basic block where we could just fall-through.
169 // You shouldn't use that directly but rather consider one of the helper like
170 // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
171 template<class InstrType>
172 void EmitBranchGeneric(InstrType instr,
173 const BranchGenerator& branch);
175 template<class InstrType>
176 void EmitBranch(InstrType instr, Condition condition);
178 template<class InstrType>
179 void EmitCompareAndBranch(InstrType instr,
184 template<class InstrType>
185 void EmitTestAndBranch(InstrType instr,
187 const Register& value,
190 template<class InstrType>
191 void EmitBranchIfNonZeroNumber(InstrType instr,
192 const FPRegister& value,
193 const FPRegister& scratch);
195 template<class InstrType>
196 void EmitBranchIfHeapNumber(InstrType instr,
197 const Register& value);
199 template<class InstrType>
200 void EmitBranchIfRoot(InstrType instr,
201 const Register& value,
202 Heap::RootListIndex index);
204 // Emits optimized code to deep-copy the contents of statically known object
205 // graphs (e.g. object literal boilerplate). Expects a pointer to the
206 // allocated destination object in the result register, and a pointer to the
207 // source object in the source register.
208 void EmitDeepCopy(Handle<JSObject> object,
213 AllocationSiteMode mode);
215 // Emits optimized code for %_IsString(x). Preserves input register.
216 // Returns the condition on which a final split to
217 // true and false label should be made, to optimize fallthrough.
218 Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
219 SmiCheck check_needed);
221 int DefineDeoptimizationLiteral(Handle<Object> literal);
222 void PopulateDeoptimizationData(Handle<Code> code);
223 void PopulateDeoptimizationLiteralsWithInlinedFunctions();
225 MemOperand BuildSeqStringOperand(Register string,
228 String::Encoding encoding);
229 void DeoptimizeBranch(
230 LEnvironment* environment,
231 BranchType branch_type, Register reg = NoReg, int bit = -1,
232 Deoptimizer::BailoutType* override_bailout_type = NULL);
233 void Deoptimize(LEnvironment* environment,
234 Deoptimizer::BailoutType* override_bailout_type = NULL);
235 void DeoptimizeIf(Condition cond, LEnvironment* environment);
236 void DeoptimizeIfZero(Register rt, LEnvironment* environment);
237 void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
238 void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
239 void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
240 void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
241 void DeoptimizeIfRoot(Register rt,
242 Heap::RootListIndex index,
243 LEnvironment* environment);
244 void DeoptimizeIfNotRoot(Register rt,
245 Heap::RootListIndex index,
246 LEnvironment* environment);
247 void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
248 void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
249 void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
251 MemOperand PrepareKeyedExternalArrayOperand(Register key,
255 bool key_is_constant,
257 ElementsKind elements_kind,
259 MemOperand PrepareKeyedArrayOperand(Register base,
263 ElementsKind elements_kind,
264 Representation representation,
267 void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
268 Safepoint::DeoptMode mode);
270 int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
272 void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
274 // Emit frame translation commands for an environment.
275 void WriteTranslation(LEnvironment* environment, Translation* translation);
277 void AddToTranslation(LEnvironment* environment,
278 Translation* translation,
282 int* object_index_pointer,
283 int* dematerialized_index_pointer);
285 void SaveCallerDoubles();
286 void RestoreCallerDoubles();
288 // Code generation steps. Returns true if code generation should continue.
289 void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
290 bool GeneratePrologue();
291 bool GenerateDeferredCode();
292 bool GenerateDeoptJumpTable();
293 bool GenerateSafepointTable();
295 // Generates the custom OSR entrypoint and sets the osr_pc_offset.
296 void GenerateOsrPrologue();
299 RECORD_SIMPLE_SAFEPOINT,
300 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
303 void CallCode(Handle<Code> code,
304 RelocInfo::Mode mode,
305 LInstruction* instr);
307 void CallCodeGeneric(Handle<Code> code,
308 RelocInfo::Mode mode,
310 SafepointMode safepoint_mode);
312 void CallRuntime(const Runtime::Function* function,
315 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
317 void CallRuntime(Runtime::FunctionId id,
319 LInstruction* instr) {
320 const Runtime::Function* function = Runtime::FunctionForId(id);
321 CallRuntime(function, num_arguments, instr);
324 void LoadContextFromDeferred(LOperand* context);
325 void CallRuntimeFromDeferred(Runtime::FunctionId id,
330 // Generate a direct call to a known function.
331 // If the function is already loaded into x1 by the caller, function_reg may
332 // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
333 // automatically load it.
334 void CallKnownFunction(Handle<JSFunction> function,
335 int formal_parameter_count,
338 Register function_reg = NoReg);
340 // Support for recording safepoint and position information.
341 void RecordAndWritePosition(int position) V8_OVERRIDE;
342 void RecordSafepoint(LPointerMap* pointers,
343 Safepoint::Kind kind,
345 Safepoint::DeoptMode mode);
346 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
347 void RecordSafepoint(Safepoint::DeoptMode mode);
348 void RecordSafepointWithRegisters(LPointerMap* pointers,
350 Safepoint::DeoptMode mode);
351 void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
353 Safepoint::DeoptMode mode);
354 void RecordSafepointWithLazyDeopt(LInstruction* instr,
355 SafepointMode safepoint_mode);
357 void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
359 ZoneList<LEnvironment*> deoptimizations_;
360 ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
361 ZoneList<Handle<Object> > deoptimization_literals_;
362 int inlined_function_count_;
364 TranslationBuffer translations_;
365 ZoneList<LDeferredCode*> deferred_;
367 bool frame_is_built_;
369 // Builder that keeps track of safepoints in the code. The table itself is
370 // emitted at the end of the generated code.
371 SafepointTableBuilder safepoints_;
373 // Compiler from a set of parallel moves to a sequential list of moves.
374 LGapResolver resolver_;
376 Safepoint::Kind expected_safepoint_kind_;
378 // This flag is true when we are after a push (but before a call).
379 // In this situation, jssp no longer references the end of the stack slots so,
380 // we can only reference a stack slot via fp.
381 bool after_push_argument_;
382 // If we have inlined arguments, we are no longer able to use jssp because
383 // jssp is modified and we never know if we are in a block after or before
384 // the pop of the arguments (which restores jssp).
385 bool inlined_arguments_;
389 class PushSafepointRegistersScope BASE_EMBEDDED {
391 PushSafepointRegistersScope(LCodeGen* codegen,
392 Safepoint::Kind kind)
393 : codegen_(codegen) {
394 ASSERT(codegen_->info()->is_calling());
395 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
396 codegen_->expected_safepoint_kind_ = kind;
398 UseScratchRegisterScope temps(codegen_->masm_);
399 // Preserve the value of lr which must be saved on the stack (the call to
400 // the stub will clobber it).
401 Register to_be_pushed_lr =
402 temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
403 codegen_->masm_->Mov(to_be_pushed_lr, lr);
404 switch (codegen_->expected_safepoint_kind_) {
405 case Safepoint::kWithRegisters: {
406 StoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
407 codegen_->masm_->CallStub(&stub);
410 case Safepoint::kWithRegistersAndDoubles: {
411 StoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
412 codegen_->masm_->CallStub(&stub);
420 ~PushSafepointRegistersScope() {
421 Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
422 ASSERT((kind & Safepoint::kWithRegisters) != 0);
424 case Safepoint::kWithRegisters: {
425 RestoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
426 codegen_->masm_->CallStub(&stub);
429 case Safepoint::kWithRegistersAndDoubles: {
430 RestoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
431 codegen_->masm_->CallStub(&stub);
437 codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
444 friend class LDeferredCode;
445 friend class SafepointGenerator;
446 DISALLOW_COPY_AND_ASSIGN(LCodeGen);
450 class LDeferredCode: public ZoneObject {
452 explicit LDeferredCode(LCodeGen* codegen)
454 external_exit_(NULL),
455 instruction_index_(codegen->current_instruction_) {
456 codegen->AddDeferredCode(this);
459 virtual ~LDeferredCode() { }
460 virtual void Generate() = 0;
461 virtual LInstruction* instr() = 0;
463 void SetExit(Label* exit) { external_exit_ = exit; }
464 Label* entry() { return &entry_; }
465 Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
466 int instruction_index() const { return instruction_index_; }
469 LCodeGen* codegen() const { return codegen_; }
470 MacroAssembler* masm() const { return codegen_->masm(); }
476 Label* external_exit_;
477 int instruction_index_;
481 // This is the abstract class used by EmitBranchGeneric.
482 // It is used to emit code for conditional branching. The Emit() function
483 // emits code to branch when the condition holds and EmitInverted() emits
484 // the branch when the inverted condition is verified.
486 // For actual examples of condition see the concrete implementation in
487 // lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
488 class BranchGenerator BASE_EMBEDDED {
490 explicit BranchGenerator(LCodeGen* codegen)
491 : codegen_(codegen) { }
493 virtual ~BranchGenerator() { }
495 virtual void Emit(Label* label) const = 0;
496 virtual void EmitInverted(Label* label) const = 0;
499 MacroAssembler* masm() const { return codegen_->masm(); }
504 } } // namespace v8::internal
506 #endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_