1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
29 #define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
31 #include "arm64/lithium-arm64.h"
33 #include "arm64/lithium-gap-resolver-arm64.h"
34 #include "deoptimizer.h"
35 #include "lithium-codegen.h"
36 #include "safepoint-table.h"
43 // Forward declarations.
45 class SafepointGenerator;
46 class BranchGenerator;
48 class LCodeGen: public LCodeGenBase {
50 LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
51 : LCodeGenBase(chunk, assembler, info),
52 deoptimizations_(4, info->zone()),
53 deopt_jump_table_(4, info->zone()),
54 deoptimization_literals_(8, info->zone()),
55 inlined_function_count_(0),
56 scope_(info->scope()),
57 translations_(info->zone()),
58 deferred_(8, info->zone()),
60 frame_is_built_(false),
61 safepoints_(info->zone()),
63 expected_safepoint_kind_(Safepoint::kSimple) {
64 PopulateDeoptimizationLiteralsWithInlinedFunctions();
68 Scope* scope() const { return scope_; }
70 int LookupDestination(int block_id) const {
71 return chunk()->LookupDestination(block_id);
74 bool IsNextEmittedBlock(int block_id) const {
75 return LookupDestination(block_id) == GetNextEmittedBlock();
78 bool NeedsEagerFrame() const {
79 return GetStackSlotCount() > 0 ||
80 info()->is_non_deferred_calling() ||
82 info()->requires_frame();
84 bool NeedsDeferredFrame() const {
85 return !NeedsEagerFrame() && info()->is_deferred_calling();
88 LinkRegisterStatus GetLinkRegisterState() const {
89 return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
92 // Try to generate code for the entire chunk, but it may fail if the
93 // chunk contains constructs we cannot handle. Returns true if the
94 // code generation attempt succeeded.
97 // Finish the code by setting stack height, safepoint, and bailout
99 void FinishCode(Handle<Code> code);
101 // Support for converting LOperands to assembler types.
102 // LOperand must be a register.
103 Register ToRegister(LOperand* op) const;
104 Register ToRegister32(LOperand* op) const;
105 Operand ToOperand(LOperand* op);
106 Operand ToOperand32I(LOperand* op);
107 Operand ToOperand32U(LOperand* op);
108 MemOperand ToMemOperand(LOperand* op) const;
109 Handle<Object> ToHandle(LConstantOperand* op) const;
111 // TODO(jbramley): Examine these helpers and check that they make sense.
112 // IsInteger32Constant returns true for smi constants, for example.
113 bool IsInteger32Constant(LConstantOperand* op) const;
114 bool IsSmi(LConstantOperand* op) const;
116 int32_t ToInteger32(LConstantOperand* op) const;
117 Smi* ToSmi(LConstantOperand* op) const;
118 double ToDouble(LConstantOperand* op) const;
119 DoubleRegister ToDoubleRegister(LOperand* op) const;
121 // Declare methods that deal with the individual node types.
122 #define DECLARE_DO(type) void Do##type(L##type* node);
123 LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
127 // Return a double scratch register which can be used locally
128 // when generating code for a lithium instruction.
129 DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
131 // Deferred code support.
132 void DoDeferredNumberTagD(LNumberTagD* instr);
133 void DoDeferredStackCheck(LStackCheck* instr);
134 void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
135 void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
136 void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
138 Label* allocation_entry);
140 enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
141 void DoDeferredNumberTagU(LInstruction* instr,
145 void DoDeferredTaggedToI(LTaggedToI* instr,
149 void DoDeferredAllocate(LAllocate* instr);
150 void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
151 void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
153 Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
155 static Condition TokenToCondition(Token::Value op, bool is_unsigned);
156 void EmitGoto(int block);
157 void DoGap(LGap* instr);
159 // Generic version of EmitBranch. It contains some code to avoid emitting a
160 // branch on the next emitted basic block where we could just fall-through.
161 // You shouldn't use that directly but rather consider one of the helper like
162 // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
163 template<class InstrType>
164 void EmitBranchGeneric(InstrType instr,
165 const BranchGenerator& branch);
167 template<class InstrType>
168 void EmitBranch(InstrType instr, Condition condition);
170 template<class InstrType>
171 void EmitCompareAndBranch(InstrType instr,
176 template<class InstrType>
177 void EmitTestAndBranch(InstrType instr,
179 const Register& value,
182 template<class InstrType>
183 void EmitBranchIfNonZeroNumber(InstrType instr,
184 const FPRegister& value,
185 const FPRegister& scratch);
187 template<class InstrType>
188 void EmitBranchIfHeapNumber(InstrType instr,
189 const Register& value);
191 template<class InstrType>
192 void EmitBranchIfRoot(InstrType instr,
193 const Register& value,
194 Heap::RootListIndex index);
196 // Emits optimized code to deep-copy the contents of statically known object
197 // graphs (e.g. object literal boilerplate). Expects a pointer to the
198 // allocated destination object in the result register, and a pointer to the
199 // source object in the source register.
200 void EmitDeepCopy(Handle<JSObject> object,
205 AllocationSiteMode mode);
207 // Emits optimized code for %_IsString(x). Preserves input register.
208 // Returns the condition on which a final split to
209 // true and false label should be made, to optimize fallthrough.
210 Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
211 SmiCheck check_needed);
213 int DefineDeoptimizationLiteral(Handle<Object> literal);
214 void PopulateDeoptimizationData(Handle<Code> code);
215 void PopulateDeoptimizationLiteralsWithInlinedFunctions();
217 MemOperand BuildSeqStringOperand(Register string,
220 String::Encoding encoding);
221 void DeoptimizeBranch(
222 LEnvironment* environment,
223 BranchType branch_type, Register reg = NoReg, int bit = -1,
224 Deoptimizer::BailoutType* override_bailout_type = NULL);
225 void Deoptimize(LEnvironment* environment,
226 Deoptimizer::BailoutType* override_bailout_type = NULL);
227 void DeoptimizeIf(Condition cc, LEnvironment* environment);
228 void DeoptimizeIfZero(Register rt, LEnvironment* environment);
229 void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
230 void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
231 void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
232 void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
233 void DeoptimizeIfRoot(Register rt,
234 Heap::RootListIndex index,
235 LEnvironment* environment);
236 void DeoptimizeIfNotRoot(Register rt,
237 Heap::RootListIndex index,
238 LEnvironment* environment);
239 void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
240 void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
241 void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
242 void ApplyCheckIf(Condition cc, LBoundsCheck* check);
244 MemOperand PrepareKeyedExternalArrayOperand(Register key,
248 bool key_is_constant,
250 ElementsKind elements_kind,
251 int additional_index);
252 void CalcKeyedArrayBaseRegister(Register base,
256 ElementsKind elements_kind);
258 void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
259 Safepoint::DeoptMode mode);
261 int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
263 void Abort(BailoutReason reason);
265 void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
267 // Emit frame translation commands for an environment.
268 void WriteTranslation(LEnvironment* environment, Translation* translation);
270 void AddToTranslation(LEnvironment* environment,
271 Translation* translation,
275 int* object_index_pointer,
276 int* dematerialized_index_pointer);
278 void SaveCallerDoubles();
279 void RestoreCallerDoubles();
281 // Code generation steps. Returns true if code generation should continue.
282 void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
283 bool GeneratePrologue();
284 bool GenerateDeferredCode();
285 bool GenerateDeoptJumpTable();
286 bool GenerateSafepointTable();
288 // Generates the custom OSR entrypoint and sets the osr_pc_offset.
289 void GenerateOsrPrologue();
292 RECORD_SIMPLE_SAFEPOINT,
293 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
296 void CallCode(Handle<Code> code,
297 RelocInfo::Mode mode,
298 LInstruction* instr);
300 void CallCodeGeneric(Handle<Code> code,
301 RelocInfo::Mode mode,
303 SafepointMode safepoint_mode);
305 void CallRuntime(const Runtime::Function* function,
308 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
310 void CallRuntime(Runtime::FunctionId id,
312 LInstruction* instr) {
313 const Runtime::Function* function = Runtime::FunctionForId(id);
314 CallRuntime(function, num_arguments, instr);
317 void LoadContextFromDeferred(LOperand* context);
318 void CallRuntimeFromDeferred(Runtime::FunctionId id,
323 // Generate a direct call to a known function.
324 // If the function is already loaded into x1 by the caller, function_reg may
325 // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
326 // automatically load it.
327 void CallKnownFunction(Handle<JSFunction> function,
328 int formal_parameter_count,
331 Register function_reg = NoReg);
333 // Support for recording safepoint and position information.
334 void RecordAndWritePosition(int position) V8_OVERRIDE;
335 void RecordSafepoint(LPointerMap* pointers,
336 Safepoint::Kind kind,
338 Safepoint::DeoptMode mode);
339 void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
340 void RecordSafepoint(Safepoint::DeoptMode mode);
341 void RecordSafepointWithRegisters(LPointerMap* pointers,
343 Safepoint::DeoptMode mode);
344 void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
346 Safepoint::DeoptMode mode);
347 void RecordSafepointWithLazyDeopt(LInstruction* instr,
348 SafepointMode safepoint_mode);
350 void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
352 ZoneList<LEnvironment*> deoptimizations_;
353 ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
354 ZoneList<Handle<Object> > deoptimization_literals_;
355 int inlined_function_count_;
357 TranslationBuffer translations_;
358 ZoneList<LDeferredCode*> deferred_;
360 bool frame_is_built_;
362 // Builder that keeps track of safepoints in the code. The table itself is
363 // emitted at the end of the generated code.
364 SafepointTableBuilder safepoints_;
366 // Compiler from a set of parallel moves to a sequential list of moves.
367 LGapResolver resolver_;
369 Safepoint::Kind expected_safepoint_kind_;
373 class PushSafepointRegistersScope BASE_EMBEDDED {
375 PushSafepointRegistersScope(LCodeGen* codegen,
376 Safepoint::Kind kind)
377 : codegen_(codegen) {
378 ASSERT(codegen_->info()->is_calling());
379 ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
380 codegen_->expected_safepoint_kind_ = kind;
382 UseScratchRegisterScope temps(codegen_->masm_);
383 // Preserve the value of lr which must be saved on the stack (the call to
384 // the stub will clobber it).
385 Register to_be_pushed_lr =
386 temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
387 codegen_->masm_->Mov(to_be_pushed_lr, lr);
388 switch (codegen_->expected_safepoint_kind_) {
389 case Safepoint::kWithRegisters: {
390 StoreRegistersStateStub stub(kDontSaveFPRegs);
391 codegen_->masm_->CallStub(&stub);
394 case Safepoint::kWithRegistersAndDoubles: {
395 StoreRegistersStateStub stub(kSaveFPRegs);
396 codegen_->masm_->CallStub(&stub);
404 ~PushSafepointRegistersScope() {
405 Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
406 ASSERT((kind & Safepoint::kWithRegisters) != 0);
408 case Safepoint::kWithRegisters: {
409 RestoreRegistersStateStub stub(kDontSaveFPRegs);
410 codegen_->masm_->CallStub(&stub);
413 case Safepoint::kWithRegistersAndDoubles: {
414 RestoreRegistersStateStub stub(kSaveFPRegs);
415 codegen_->masm_->CallStub(&stub);
421 codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
428 friend class LDeferredCode;
429 friend class SafepointGenerator;
430 DISALLOW_COPY_AND_ASSIGN(LCodeGen);
434 class LDeferredCode: public ZoneObject {
436 explicit LDeferredCode(LCodeGen* codegen)
438 external_exit_(NULL),
439 instruction_index_(codegen->current_instruction_) {
440 codegen->AddDeferredCode(this);
443 virtual ~LDeferredCode() { }
444 virtual void Generate() = 0;
445 virtual LInstruction* instr() = 0;
447 void SetExit(Label* exit) { external_exit_ = exit; }
448 Label* entry() { return &entry_; }
449 Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
450 int instruction_index() const { return instruction_index_; }
453 LCodeGen* codegen() const { return codegen_; }
454 MacroAssembler* masm() const { return codegen_->masm(); }
460 Label* external_exit_;
461 int instruction_index_;
465 // This is the abstract class used by EmitBranchGeneric.
466 // It is used to emit code for conditional branching. The Emit() function
467 // emits code to branch when the condition holds and EmitInverted() emits
468 // the branch when the inverted condition is verified.
470 // For actual examples of condition see the concrete implementation in
471 // lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
472 class BranchGenerator BASE_EMBEDDED {
474 explicit BranchGenerator(LCodeGen* codegen)
475 : codegen_(codegen) { }
477 virtual ~BranchGenerator() { }
479 virtual void Emit(Label* label) const = 0;
480 virtual void EmitInverted(Label* label) const = 0;
483 MacroAssembler* masm() const { return codegen_->masm(); }
488 } } // namespace v8::internal
490 #endif // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_