Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / v8 / src / arm64 / lithium-codegen-arm64.h
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
6 #define V8_ARM64_LITHIUM_CODEGEN_ARM64_H_
7
8 #include "src/arm64/lithium-arm64.h"
9
10 #include "src/arm64/lithium-gap-resolver-arm64.h"
11 #include "src/deoptimizer.h"
12 #include "src/lithium-codegen.h"
13 #include "src/safepoint-table.h"
14 #include "src/scopes.h"
15 #include "src/utils.h"
16
17 namespace v8 {
18 namespace internal {
19
20 // Forward declarations.
21 class LDeferredCode;
22 class SafepointGenerator;
23 class BranchGenerator;
24
25 class LCodeGen: public LCodeGenBase {
26  public:
27   LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
28       : LCodeGenBase(chunk, assembler, info),
29         deoptimizations_(4, info->zone()),
30         deopt_jump_table_(4, info->zone()),
31         deoptimization_literals_(8, info->zone()),
32         inlined_function_count_(0),
33         scope_(info->scope()),
34         translations_(info->zone()),
35         deferred_(8, info->zone()),
36         osr_pc_offset_(-1),
37         frame_is_built_(false),
38         safepoints_(info->zone()),
39         resolver_(this),
40         expected_safepoint_kind_(Safepoint::kSimple),
41         after_push_argument_(false),
42         inlined_arguments_(false) {
43     PopulateDeoptimizationLiteralsWithInlinedFunctions();
44   }
45
46   ~LCodeGen() {
47     DCHECK(!after_push_argument_ || inlined_arguments_);
48   }
49
50   // Simple accessors.
51   Scope* scope() const { return scope_; }
52
53   int LookupDestination(int block_id) const {
54     return chunk()->LookupDestination(block_id);
55   }
56
57   bool IsNextEmittedBlock(int block_id) const {
58     return LookupDestination(block_id) == GetNextEmittedBlock();
59   }
60
61   bool NeedsEagerFrame() const {
62     return GetStackSlotCount() > 0 ||
63         info()->is_non_deferred_calling() ||
64         !info()->IsStub() ||
65         info()->requires_frame();
66   }
67   bool NeedsDeferredFrame() const {
68     return !NeedsEagerFrame() && info()->is_deferred_calling();
69   }
70
71   LinkRegisterStatus GetLinkRegisterState() const {
72     return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
73   }
74
75   // Try to generate code for the entire chunk, but it may fail if the
76   // chunk contains constructs we cannot handle. Returns true if the
77   // code generation attempt succeeded.
78   bool GenerateCode();
79
80   // Finish the code by setting stack height, safepoint, and bailout
81   // information on it.
82   void FinishCode(Handle<Code> code);
83
84   enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
85   // Support for converting LOperands to assembler types.
86   // LOperand must be a register.
87   Register ToRegister(LOperand* op) const;
88   Register ToRegister32(LOperand* op) const;
89   Operand ToOperand(LOperand* op);
90   Operand ToOperand32I(LOperand* op);
91   Operand ToOperand32U(LOperand* op);
92   enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
93   MemOperand ToMemOperand(LOperand* op,
94                           StackMode stack_mode = kCanUseStackPointer) const;
95   Handle<Object> ToHandle(LConstantOperand* op) const;
96
97   template<class LI>
98   Operand ToShiftedRightOperand32I(LOperand* right,
99                                    LI* shift_info) {
100     return ToShiftedRightOperand32(right, shift_info, SIGNED_INT32);
101   }
102   template<class LI>
103   Operand ToShiftedRightOperand32U(LOperand* right,
104                                    LI* shift_info) {
105     return ToShiftedRightOperand32(right, shift_info, UNSIGNED_INT32);
106   }
107   template<class LI>
108   Operand ToShiftedRightOperand32(LOperand* right,
109                                   LI* shift_info,
110                                   IntegerSignedness signedness);
111
112   int JSShiftAmountFromLConstant(LOperand* constant) {
113     return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
114   }
115
116   // TODO(jbramley): Examine these helpers and check that they make sense.
117   // IsInteger32Constant returns true for smi constants, for example.
118   bool IsInteger32Constant(LConstantOperand* op) const;
119   bool IsSmi(LConstantOperand* op) const;
120
121   int32_t ToInteger32(LConstantOperand* op) const;
122   Smi* ToSmi(LConstantOperand* op) const;
123   double ToDouble(LConstantOperand* op) const;
124   DoubleRegister ToDoubleRegister(LOperand* op) const;
125
126   // Declare methods that deal with the individual node types.
127 #define DECLARE_DO(type) void Do##type(L##type* node);
128   LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
129 #undef DECLARE_DO
130
131  private:
132   // Return a double scratch register which can be used locally
133   // when generating code for a lithium instruction.
134   DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
135
136   // Deferred code support.
137   void DoDeferredNumberTagD(LNumberTagD* instr);
138   void DoDeferredStackCheck(LStackCheck* instr);
139   void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
140   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
141   void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
142                                Label* exit,
143                                Label* allocation_entry);
144
145   void DoDeferredNumberTagU(LInstruction* instr,
146                             LOperand* value,
147                             LOperand* temp1,
148                             LOperand* temp2);
149   void DoDeferredTaggedToI(LTaggedToI* instr,
150                            LOperand* value,
151                            LOperand* temp1,
152                            LOperand* temp2);
153   void DoDeferredAllocate(LAllocate* instr);
154   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
155   void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
156   void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
157                                    Register result,
158                                    Register object,
159                                    Register index);
160
161   Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
162
163   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
164   void EmitGoto(int block);
165   void DoGap(LGap* instr);
166
167   // Generic version of EmitBranch. It contains some code to avoid emitting a
168   // branch on the next emitted basic block where we could just fall-through.
169   // You shouldn't use that directly but rather consider one of the helper like
170   // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
171   template<class InstrType>
172   void EmitBranchGeneric(InstrType instr,
173                          const BranchGenerator& branch);
174
175   template<class InstrType>
176   void EmitBranch(InstrType instr, Condition condition);
177
178   template<class InstrType>
179   void EmitCompareAndBranch(InstrType instr,
180                             Condition condition,
181                             const Register& lhs,
182                             const Operand& rhs);
183
184   template<class InstrType>
185   void EmitTestAndBranch(InstrType instr,
186                          Condition condition,
187                          const Register& value,
188                          uint64_t mask);
189
190   template<class InstrType>
191   void EmitBranchIfNonZeroNumber(InstrType instr,
192                                  const FPRegister& value,
193                                  const FPRegister& scratch);
194
195   template<class InstrType>
196   void EmitBranchIfHeapNumber(InstrType instr,
197                               const Register& value);
198
199   template<class InstrType>
200   void EmitBranchIfRoot(InstrType instr,
201                         const Register& value,
202                         Heap::RootListIndex index);
203
204   // Emits optimized code to deep-copy the contents of statically known object
205   // graphs (e.g. object literal boilerplate). Expects a pointer to the
206   // allocated destination object in the result register, and a pointer to the
207   // source object in the source register.
208   void EmitDeepCopy(Handle<JSObject> object,
209                     Register result,
210                     Register source,
211                     Register scratch,
212                     int* offset,
213                     AllocationSiteMode mode);
214
215   // Emits optimized code for %_IsString(x).  Preserves input register.
216   // Returns the condition on which a final split to
217   // true and false label should be made, to optimize fallthrough.
218   Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
219                          SmiCheck check_needed);
220
221   int DefineDeoptimizationLiteral(Handle<Object> literal);
222   void PopulateDeoptimizationData(Handle<Code> code);
223   void PopulateDeoptimizationLiteralsWithInlinedFunctions();
224
225   MemOperand BuildSeqStringOperand(Register string,
226                                    Register temp,
227                                    LOperand* index,
228                                    String::Encoding encoding);
229   void DeoptimizeBranch(
230       LEnvironment* environment,
231       BranchType branch_type, Register reg = NoReg, int bit = -1,
232       Deoptimizer::BailoutType* override_bailout_type = NULL);
233   void Deoptimize(LEnvironment* environment,
234                   Deoptimizer::BailoutType* override_bailout_type = NULL);
235   void DeoptimizeIf(Condition cond, LEnvironment* environment);
236   void DeoptimizeIfZero(Register rt, LEnvironment* environment);
237   void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
238   void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
239   void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
240   void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
241   void DeoptimizeIfRoot(Register rt,
242                         Heap::RootListIndex index,
243                         LEnvironment* environment);
244   void DeoptimizeIfNotRoot(Register rt,
245                            Heap::RootListIndex index,
246                            LEnvironment* environment);
247   void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
248   void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
249   void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
250
251   MemOperand PrepareKeyedExternalArrayOperand(Register key,
252                                               Register base,
253                                               Register scratch,
254                                               bool key_is_smi,
255                                               bool key_is_constant,
256                                               int constant_key,
257                                               ElementsKind elements_kind,
258                                               int base_offset);
259   MemOperand PrepareKeyedArrayOperand(Register base,
260                                       Register elements,
261                                       Register key,
262                                       bool key_is_tagged,
263                                       ElementsKind elements_kind,
264                                       Representation representation,
265                                       int base_offset);
266
267   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
268                                             Safepoint::DeoptMode mode);
269
270   int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
271
272   void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
273
274   // Emit frame translation commands for an environment.
275   void WriteTranslation(LEnvironment* environment, Translation* translation);
276
277   void AddToTranslation(LEnvironment* environment,
278                         Translation* translation,
279                         LOperand* op,
280                         bool is_tagged,
281                         bool is_uint32,
282                         int* object_index_pointer,
283                         int* dematerialized_index_pointer);
284
285   void SaveCallerDoubles();
286   void RestoreCallerDoubles();
287
288   // Code generation steps.  Returns true if code generation should continue.
289   void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
290   bool GeneratePrologue();
291   bool GenerateDeferredCode();
292   bool GenerateDeoptJumpTable();
293   bool GenerateSafepointTable();
294
295   // Generates the custom OSR entrypoint and sets the osr_pc_offset.
296   void GenerateOsrPrologue();
297
298   enum SafepointMode {
299     RECORD_SIMPLE_SAFEPOINT,
300     RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
301   };
302
303   void CallCode(Handle<Code> code,
304                 RelocInfo::Mode mode,
305                 LInstruction* instr);
306
307   void CallCodeGeneric(Handle<Code> code,
308                        RelocInfo::Mode mode,
309                        LInstruction* instr,
310                        SafepointMode safepoint_mode);
311
312   void CallRuntime(const Runtime::Function* function,
313                    int num_arguments,
314                    LInstruction* instr,
315                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
316
317   void CallRuntime(Runtime::FunctionId id,
318                    int num_arguments,
319                    LInstruction* instr) {
320     const Runtime::Function* function = Runtime::FunctionForId(id);
321     CallRuntime(function, num_arguments, instr);
322   }
323
324   void LoadContextFromDeferred(LOperand* context);
325   void CallRuntimeFromDeferred(Runtime::FunctionId id,
326                                int argc,
327                                LInstruction* instr,
328                                LOperand* context);
329
330   // Generate a direct call to a known function.
331   // If the function is already loaded into x1 by the caller, function_reg may
332   // be set to x1. Otherwise, it must be NoReg, and CallKnownFunction will
333   // automatically load it.
334   void CallKnownFunction(Handle<JSFunction> function,
335                          int formal_parameter_count,
336                          int arity,
337                          LInstruction* instr,
338                          Register function_reg = NoReg);
339
340   // Support for recording safepoint and position information.
341   void RecordAndWritePosition(int position) V8_OVERRIDE;
342   void RecordSafepoint(LPointerMap* pointers,
343                        Safepoint::Kind kind,
344                        int arguments,
345                        Safepoint::DeoptMode mode);
346   void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
347   void RecordSafepoint(Safepoint::DeoptMode mode);
348   void RecordSafepointWithRegisters(LPointerMap* pointers,
349                                     int arguments,
350                                     Safepoint::DeoptMode mode);
351   void RecordSafepointWithLazyDeopt(LInstruction* instr,
352                                     SafepointMode safepoint_mode);
353
354   void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
355
356   ZoneList<LEnvironment*> deoptimizations_;
357   ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
358   ZoneList<Handle<Object> > deoptimization_literals_;
359   int inlined_function_count_;
360   Scope* const scope_;
361   TranslationBuffer translations_;
362   ZoneList<LDeferredCode*> deferred_;
363   int osr_pc_offset_;
364   bool frame_is_built_;
365
366   // Builder that keeps track of safepoints in the code. The table itself is
367   // emitted at the end of the generated code.
368   SafepointTableBuilder safepoints_;
369
370   // Compiler from a set of parallel moves to a sequential list of moves.
371   LGapResolver resolver_;
372
373   Safepoint::Kind expected_safepoint_kind_;
374
375   // This flag is true when we are after a push (but before a call).
376   // In this situation, jssp no longer references the end of the stack slots so,
377   // we can only reference a stack slot via fp.
378   bool after_push_argument_;
379   // If we have inlined arguments, we are no longer able to use jssp because
380   // jssp is modified and we never know if we are in a block after or before
381   // the pop of the arguments (which restores jssp).
382   bool inlined_arguments_;
383
384   int old_position_;
385
386   class PushSafepointRegistersScope BASE_EMBEDDED {
387    public:
388     explicit PushSafepointRegistersScope(LCodeGen* codegen)
389         : codegen_(codegen) {
390       DCHECK(codegen_->info()->is_calling());
391       DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
392       codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
393
394       UseScratchRegisterScope temps(codegen_->masm_);
395       // Preserve the value of lr which must be saved on the stack (the call to
396       // the stub will clobber it).
397       Register to_be_pushed_lr =
398           temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
399       codegen_->masm_->Mov(to_be_pushed_lr, lr);
400       StoreRegistersStateStub stub(codegen_->isolate());
401       codegen_->masm_->CallStub(&stub);
402     }
403
404     ~PushSafepointRegistersScope() {
405       DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
406       RestoreRegistersStateStub stub(codegen_->isolate());
407       codegen_->masm_->CallStub(&stub);
408       codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
409     }
410
411    private:
412     LCodeGen* codegen_;
413   };
414
415   friend class LDeferredCode;
416   friend class SafepointGenerator;
417   DISALLOW_COPY_AND_ASSIGN(LCodeGen);
418 };
419
420
421 class LDeferredCode: public ZoneObject {
422  public:
423   explicit LDeferredCode(LCodeGen* codegen)
424       : codegen_(codegen),
425         external_exit_(NULL),
426         instruction_index_(codegen->current_instruction_) {
427     codegen->AddDeferredCode(this);
428   }
429
430   virtual ~LDeferredCode() { }
431   virtual void Generate() = 0;
432   virtual LInstruction* instr() = 0;
433
434   void SetExit(Label* exit) { external_exit_ = exit; }
435   Label* entry() { return &entry_; }
436   Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
437   int instruction_index() const { return instruction_index_; }
438
439  protected:
440   LCodeGen* codegen() const { return codegen_; }
441   MacroAssembler* masm() const { return codegen_->masm(); }
442
443  private:
444   LCodeGen* codegen_;
445   Label entry_;
446   Label exit_;
447   Label* external_exit_;
448   int instruction_index_;
449 };
450
451
452 // This is the abstract class used by EmitBranchGeneric.
453 // It is used to emit code for conditional branching. The Emit() function
454 // emits code to branch when the condition holds and EmitInverted() emits
455 // the branch when the inverted condition is verified.
456 //
457 // For actual examples of condition see the concrete implementation in
458 // lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
459 class BranchGenerator BASE_EMBEDDED {
460  public:
461   explicit BranchGenerator(LCodeGen* codegen)
462     : codegen_(codegen) { }
463
464   virtual ~BranchGenerator() { }
465
466   virtual void Emit(Label* label) const = 0;
467   virtual void EmitInverted(Label* label) const = 0;
468
469  protected:
470   MacroAssembler* masm() const { return codegen_->masm(); }
471
472   LCodeGen* codegen_;
473 };
474
475 } }  // namespace v8::internal
476
477 #endif  // V8_ARM64_LITHIUM_CODEGEN_ARM64_H_