1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_ASSEMBLER_ARM64_H_
12 #include "src/arm64/instructions-arm64.h"
13 #include "src/assembler.h"
14 #include "src/globals.h"
15 #include "src/serialize.h"
16 #include "src/utils.h"
23 // -----------------------------------------------------------------------------
25 #define REGISTER_CODE_LIST(R) \
26 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
27 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
28 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
29 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
32 static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
35 // Some CPURegister methods can return Register and FPRegister types, so we
36 // need to declare them in advance.
43 // The kInvalid value is used to detect uninitialized static instances,
44 // which are always zero-initialized before any constructors are called.
51 static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
52 CPURegister r = {code, size, type};
56 unsigned code() const;
57 RegisterType type() const;
59 unsigned SizeInBits() const;
60 int SizeInBytes() const;
61 bool Is32Bits() const;
62 bool Is64Bits() const;
64 bool IsValidOrNone() const;
65 bool IsValidRegister() const;
66 bool IsValidFPRegister() const;
68 bool Is(const CPURegister& other) const;
69 bool Aliases(const CPURegister& other) const;
74 bool IsRegister() const;
75 bool IsFPRegister() const;
82 bool IsSameSizeAndType(const CPURegister& other) const;
85 bool is(const CPURegister& other) const { return Is(other); }
86 bool is_valid() const { return IsValid(); }
90 RegisterType reg_type;
94 struct Register : public CPURegister {
95 static Register Create(unsigned code, unsigned size) {
96 return Register(CPURegister::Create(code, size, CPURegister::kRegister));
102 reg_type = CPURegister::kNoRegister;
105 explicit Register(const CPURegister& r) {
106 reg_code = r.reg_code;
107 reg_size = r.reg_size;
108 reg_type = r.reg_type;
109 DCHECK(IsValidOrNone());
112 Register(const Register& r) { // NOLINT(runtime/explicit)
113 reg_code = r.reg_code;
114 reg_size = r.reg_size;
115 reg_type = r.reg_type;
116 DCHECK(IsValidOrNone());
119 bool IsValid() const {
120 DCHECK(IsRegister() || IsNone());
121 return IsValidRegister();
124 static Register XRegFromCode(unsigned code);
125 static Register WRegFromCode(unsigned code);
127 // Start of V8 compatibility section ---------------------
128 // These memebers are necessary for compilation.
129 // A few of them may be unused for now.
131 static const int kNumRegisters = kNumberOfRegisters;
132 static int NumRegisters() { return kNumRegisters; }
134 // We allow crankshaft to use the following registers:
137 // - x27 (also context)
139 // TODO(all): Register x25 is currently free and could be available for
140 // crankshaft, but we don't use it as we might use it as a per function
141 // literal pool pointer in the future.
143 // TODO(all): Consider storing cp in x25 to have only two ranges.
144 // We split allocatable registers in three ranges called
148 static const unsigned kAllocatableLowRangeBegin = 0;
149 static const unsigned kAllocatableLowRangeEnd = 15;
150 static const unsigned kAllocatableHighRangeBegin = 18;
151 static const unsigned kAllocatableHighRangeEnd = 24;
152 static const unsigned kAllocatableContext = 27;
154 // Gap between low and high ranges.
155 static const int kAllocatableRangeGapSize =
156 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
158 static const int kMaxNumAllocatableRegisters =
159 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
160 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
161 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
163 // Return true if the register is one that crankshaft can allocate.
164 bool IsAllocatable() const {
165 return ((reg_code == kAllocatableContext) ||
166 (reg_code <= kAllocatableLowRangeEnd) ||
167 ((reg_code >= kAllocatableHighRangeBegin) &&
168 (reg_code <= kAllocatableHighRangeEnd)));
171 static Register FromAllocationIndex(unsigned index) {
172 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
173 // cp is the last allocatable register.
174 if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
175 return from_code(kAllocatableContext);
178 // Handle low and high ranges.
179 return (index <= kAllocatableLowRangeEnd)
181 : from_code(index + kAllocatableRangeGapSize);
184 static const char* AllocationIndexToString(int index) {
185 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
186 DCHECK((kAllocatableLowRangeBegin == 0) &&
187 (kAllocatableLowRangeEnd == 15) &&
188 (kAllocatableHighRangeBegin == 18) &&
189 (kAllocatableHighRangeEnd == 24) &&
190 (kAllocatableContext == 27));
191 const char* const names[] = {
192 "x0", "x1", "x2", "x3", "x4",
193 "x5", "x6", "x7", "x8", "x9",
194 "x10", "x11", "x12", "x13", "x14",
195 "x15", "x18", "x19", "x20", "x21",
196 "x22", "x23", "x24", "x27",
201 static int ToAllocationIndex(Register reg) {
202 DCHECK(reg.IsAllocatable());
203 unsigned code = reg.code();
204 if (code == kAllocatableContext) {
205 return NumAllocatableRegisters() - 1;
208 return (code <= kAllocatableLowRangeEnd)
210 : code - kAllocatableRangeGapSize;
213 static Register from_code(int code) {
214 // Always return an X register.
215 return Register::Create(code, kXRegSizeInBits);
218 // End of V8 compatibility section -----------------------
222 struct FPRegister : public CPURegister {
223 static FPRegister Create(unsigned code, unsigned size) {
225 CPURegister::Create(code, size, CPURegister::kFPRegister));
231 reg_type = CPURegister::kNoRegister;
234 explicit FPRegister(const CPURegister& r) {
235 reg_code = r.reg_code;
236 reg_size = r.reg_size;
237 reg_type = r.reg_type;
238 DCHECK(IsValidOrNone());
241 FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
242 reg_code = r.reg_code;
243 reg_size = r.reg_size;
244 reg_type = r.reg_type;
245 DCHECK(IsValidOrNone());
248 bool IsValid() const {
249 DCHECK(IsFPRegister() || IsNone());
250 return IsValidFPRegister();
253 static FPRegister SRegFromCode(unsigned code);
254 static FPRegister DRegFromCode(unsigned code);
256 // Start of V8 compatibility section ---------------------
257 static const int kMaxNumRegisters = kNumberOfFPRegisters;
259 // Crankshaft can use all the FP registers except:
260 // - d15 which is used to keep the 0 double value
261 // - d30 which is used in crankshaft as a double scratch register
262 // - d31 which is used in the MacroAssembler as a double scratch register
263 static const unsigned kAllocatableLowRangeBegin = 0;
264 static const unsigned kAllocatableLowRangeEnd = 14;
265 static const unsigned kAllocatableHighRangeBegin = 16;
266 static const unsigned kAllocatableHighRangeEnd = 28;
268 static const RegList kAllocatableFPRegisters = 0x1fff7fff;
270 // Gap between low and high ranges.
271 static const int kAllocatableRangeGapSize =
272 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
274 static const int kMaxNumAllocatableRegisters =
275 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
276 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
277 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
279 // Return true if the register is one that crankshaft can allocate.
280 bool IsAllocatable() const {
281 return (Bit() & kAllocatableFPRegisters) != 0;
284 static FPRegister FromAllocationIndex(unsigned int index) {
285 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
287 return (index <= kAllocatableLowRangeEnd)
289 : from_code(index + kAllocatableRangeGapSize);
292 static const char* AllocationIndexToString(int index) {
293 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
294 DCHECK((kAllocatableLowRangeBegin == 0) &&
295 (kAllocatableLowRangeEnd == 14) &&
296 (kAllocatableHighRangeBegin == 16) &&
297 (kAllocatableHighRangeEnd == 28));
298 const char* const names[] = {
299 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
300 "d8", "d9", "d10", "d11", "d12", "d13", "d14",
301 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
302 "d24", "d25", "d26", "d27", "d28"
307 static int ToAllocationIndex(FPRegister reg) {
308 DCHECK(reg.IsAllocatable());
309 unsigned code = reg.code();
311 return (code <= kAllocatableLowRangeEnd)
313 : code - kAllocatableRangeGapSize;
316 static FPRegister from_code(int code) {
317 // Always return a D register.
318 return FPRegister::Create(code, kDRegSizeInBits);
320 // End of V8 compatibility section -----------------------
323 struct SIMD128Register {
324 static const int kMaxNumRegisters = 0;
326 static int ToAllocationIndex(SIMD128Register reg) {
331 static const char* AllocationIndexToString(int index) {
338 STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
339 STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
342 #if defined(ARM64_DEFINE_REG_STATICS)
343 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
344 const CPURegister init_##register_class##_##name = {code, size, type}; \
345 const register_class& name = *reinterpret_cast<const register_class*>( \
346 &init_##register_class##_##name)
347 #define ALIAS_REGISTER(register_class, alias, name) \
348 const register_class& alias = *reinterpret_cast<const register_class*>( \
349 &init_##register_class##_##name)
351 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
352 extern const register_class& name
353 #define ALIAS_REGISTER(register_class, alias, name) \
354 extern const register_class& alias
355 #endif // defined(ARM64_DEFINE_REG_STATICS)
357 // No*Reg is used to indicate an unused argument, or an error case. Note that
358 // these all compare equal (using the Is() method). The Register and FPRegister
359 // variants are provided for convenience.
360 INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
361 INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
362 INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
365 INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
367 #define DEFINE_REGISTERS(N) \
368 INITIALIZE_REGISTER(Register, w##N, N, \
369 kWRegSizeInBits, CPURegister::kRegister); \
370 INITIALIZE_REGISTER(Register, x##N, N, \
371 kXRegSizeInBits, CPURegister::kRegister);
372 REGISTER_CODE_LIST(DEFINE_REGISTERS)
373 #undef DEFINE_REGISTERS
375 INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
376 CPURegister::kRegister);
377 INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
378 CPURegister::kRegister);
380 #define DEFINE_FPREGISTERS(N) \
381 INITIALIZE_REGISTER(FPRegister, s##N, N, \
382 kSRegSizeInBits, CPURegister::kFPRegister); \
383 INITIALIZE_REGISTER(FPRegister, d##N, N, \
384 kDRegSizeInBits, CPURegister::kFPRegister);
385 REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
386 #undef DEFINE_FPREGISTERS
388 #undef INITIALIZE_REGISTER
390 // Registers aliases.
391 ALIAS_REGISTER(Register, ip0, x16);
392 ALIAS_REGISTER(Register, ip1, x17);
393 ALIAS_REGISTER(Register, wip0, w16);
394 ALIAS_REGISTER(Register, wip1, w17);
396 ALIAS_REGISTER(Register, root, x26);
397 ALIAS_REGISTER(Register, rr, x26);
398 // Context pointer register.
399 ALIAS_REGISTER(Register, cp, x27);
400 // We use a register as a JS stack pointer to overcome the restriction on the
401 // architectural SP alignment.
402 // We chose x28 because it is contiguous with the other specific purpose
404 STATIC_ASSERT(kJSSPCode == 28);
405 ALIAS_REGISTER(Register, jssp, x28);
406 ALIAS_REGISTER(Register, wjssp, w28);
407 ALIAS_REGISTER(Register, fp, x29);
408 ALIAS_REGISTER(Register, lr, x30);
409 ALIAS_REGISTER(Register, xzr, x31);
410 ALIAS_REGISTER(Register, wzr, w31);
412 // Keeps the 0 double value.
413 ALIAS_REGISTER(FPRegister, fp_zero, d15);
414 // Crankshaft double scratch register.
415 ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
416 // MacroAssembler double scratch registers.
417 ALIAS_REGISTER(FPRegister, fp_scratch, d30);
418 ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
419 ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
421 #undef ALIAS_REGISTER
424 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
425 Register reg2 = NoReg,
426 Register reg3 = NoReg,
427 Register reg4 = NoReg);
430 // AreAliased returns true if any of the named registers overlap. Arguments set
431 // to NoReg are ignored. The system stack pointer may be specified.
432 bool AreAliased(const CPURegister& reg1,
433 const CPURegister& reg2,
434 const CPURegister& reg3 = NoReg,
435 const CPURegister& reg4 = NoReg,
436 const CPURegister& reg5 = NoReg,
437 const CPURegister& reg6 = NoReg,
438 const CPURegister& reg7 = NoReg,
439 const CPURegister& reg8 = NoReg);
441 // AreSameSizeAndType returns true if all of the specified registers have the
442 // same size, and are of the same type. The system stack pointer may be
443 // specified. Arguments set to NoReg are ignored, as are any subsequent
444 // arguments. At least one argument (reg1) must be valid (not NoCPUReg).
445 bool AreSameSizeAndType(const CPURegister& reg1,
446 const CPURegister& reg2,
447 const CPURegister& reg3 = NoCPUReg,
448 const CPURegister& reg4 = NoCPUReg,
449 const CPURegister& reg5 = NoCPUReg,
450 const CPURegister& reg6 = NoCPUReg,
451 const CPURegister& reg7 = NoCPUReg,
452 const CPURegister& reg8 = NoCPUReg);
455 typedef FPRegister DoubleRegister;
458 // -----------------------------------------------------------------------------
459 // Lists of registers.
462 explicit CPURegList(CPURegister reg1,
463 CPURegister reg2 = NoCPUReg,
464 CPURegister reg3 = NoCPUReg,
465 CPURegister reg4 = NoCPUReg)
466 : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
467 size_(reg1.SizeInBits()), type_(reg1.type()) {
468 DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
472 CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
473 : list_(list), size_(size), type_(type) {
477 CPURegList(CPURegister::RegisterType type, unsigned size,
478 unsigned first_reg, unsigned last_reg)
479 : size_(size), type_(type) {
480 DCHECK(((type == CPURegister::kRegister) &&
481 (last_reg < kNumberOfRegisters)) ||
482 ((type == CPURegister::kFPRegister) &&
483 (last_reg < kNumberOfFPRegisters)));
484 DCHECK(last_reg >= first_reg);
485 list_ = (1UL << (last_reg + 1)) - 1;
486 list_ &= ~((1UL << first_reg) - 1);
490 CPURegister::RegisterType type() const {
495 RegList list() const {
500 inline void set_list(RegList new_list) {
505 // Combine another CPURegList into this one. Registers that already exist in
506 // this list are left unchanged. The type and size of the registers in the
507 // 'other' list must match those in this list.
508 void Combine(const CPURegList& other);
510 // Remove every register in the other CPURegList from this one. Registers that
511 // do not exist in this list are ignored. The type of the registers in the
512 // 'other' list must match those in this list.
513 void Remove(const CPURegList& other);
515 // Variants of Combine and Remove which take CPURegisters.
516 void Combine(const CPURegister& other);
517 void Remove(const CPURegister& other1,
518 const CPURegister& other2 = NoCPUReg,
519 const CPURegister& other3 = NoCPUReg,
520 const CPURegister& other4 = NoCPUReg);
522 // Variants of Combine and Remove which take a single register by its code;
523 // the type and size of the register is inferred from this list.
524 void Combine(int code);
525 void Remove(int code);
527 // Remove all callee-saved registers from the list. This can be useful when
528 // preparing registers for an AAPCS64 function call, for example.
529 void RemoveCalleeSaved();
531 CPURegister PopLowestIndex();
532 CPURegister PopHighestIndex();
534 // AAPCS64 callee-saved registers.
535 static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
536 static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
538 // AAPCS64 caller-saved registers. Note that this includes lr.
539 static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
540 static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
542 // Registers saved as safepoints.
543 static CPURegList GetSafepointSavedRegisters();
545 bool IsEmpty() const {
550 bool IncludesAliasOf(const CPURegister& other1,
551 const CPURegister& other2 = NoCPUReg,
552 const CPURegister& other3 = NoCPUReg,
553 const CPURegister& other4 = NoCPUReg) const {
556 if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
557 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
558 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
559 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
560 return (list_ & list) != 0;
565 return CountSetBits(list_, kRegListSizeInBits);
568 unsigned RegisterSizeInBits() const {
573 unsigned RegisterSizeInBytes() const {
574 int size_in_bits = RegisterSizeInBits();
575 DCHECK((size_in_bits % kBitsPerByte) == 0);
576 return size_in_bits / kBitsPerByte;
579 unsigned TotalSizeInBytes() const {
581 return RegisterSizeInBytes() * Count();
587 CPURegister::RegisterType type_;
589 bool IsValid() const {
590 const RegList kValidRegisters = 0x8000000ffffffff;
591 const RegList kValidFPRegisters = 0x0000000ffffffff;
593 case CPURegister::kRegister:
594 return (list_ & kValidRegisters) == list_;
595 case CPURegister::kFPRegister:
596 return (list_ & kValidFPRegisters) == list_;
597 case CPURegister::kNoRegister:
607 // AAPCS64 callee-saved registers.
608 #define kCalleeSaved CPURegList::GetCalleeSaved()
609 #define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
612 // AAPCS64 caller-saved registers. Note that this includes lr.
613 #define kCallerSaved CPURegList::GetCallerSaved()
614 #define kCallerSavedFP CPURegList::GetCallerSavedFP()
616 // -----------------------------------------------------------------------------
621 inline explicit Immediate(Handle<T> handle);
623 // This is allowed to be an implicit constructor because Immediate is
624 // a wrapper class that doesn't normally perform any type conversion.
626 inline Immediate(T value); // NOLINT(runtime/explicit)
629 inline Immediate(T value, RelocInfo::Mode rmode);
631 int64_t value() const { return value_; }
632 RelocInfo::Mode rmode() const { return rmode_; }
635 void InitializeHandle(Handle<Object> value);
638 RelocInfo::Mode rmode_;
642 // -----------------------------------------------------------------------------
644 const int kSmiShift = kSmiTagSize + kSmiShiftSize;
645 const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
647 // Represents an operand in a machine instruction.
649 // TODO(all): If necessary, study more in details which methods
650 // TODO(all): should be inlined or not.
652 // rm, {<shift> {#<shift_amount>}}
653 // where <shift> is one of {LSL, LSR, ASR, ROR}.
654 // <shift_amount> is uint6_t.
655 // This is allowed to be an implicit constructor because Operand is
656 // a wrapper class that doesn't normally perform any type conversion.
657 inline Operand(Register reg,
659 unsigned shift_amount = 0); // NOLINT(runtime/explicit)
661 // rm, <extend> {#<shift_amount>}
662 // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
663 // <shift_amount> is uint2_t.
664 inline Operand(Register reg,
666 unsigned shift_amount = 0);
669 inline explicit Operand(Handle<T> handle);
671 // Implicit constructor for all int types, ExternalReference, and Smi.
673 inline Operand(T t); // NOLINT(runtime/explicit)
675 // Implicit constructor for int types.
677 inline Operand(T t, RelocInfo::Mode rmode);
679 inline bool IsImmediate() const;
680 inline bool IsShiftedRegister() const;
681 inline bool IsExtendedRegister() const;
682 inline bool IsZero() const;
684 // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
685 // which helps in the encoding of instructions that use the stack pointer.
686 inline Operand ToExtendedRegister() const;
688 inline Immediate immediate() const;
689 inline int64_t ImmediateValue() const;
690 inline Register reg() const;
691 inline Shift shift() const;
692 inline Extend extend() const;
693 inline unsigned shift_amount() const;
695 // Relocation information.
696 bool NeedsRelocation(const Assembler* assembler) const;
699 inline static Operand UntagSmi(Register smi);
700 inline static Operand UntagSmiAndScale(Register smi, int scale);
703 Immediate immediate_;
707 unsigned shift_amount_;
711 // MemOperand represents a memory operand in a load or store instruction.
715 inline explicit MemOperand(Register base,
717 AddrMode addrmode = Offset);
718 inline explicit MemOperand(Register base,
721 unsigned shift_amount = 0);
722 inline explicit MemOperand(Register base,
725 unsigned shift_amount = 0);
726 inline explicit MemOperand(Register base,
727 const Operand& offset,
728 AddrMode addrmode = Offset);
730 const Register& base() const { return base_; }
731 const Register& regoffset() const { return regoffset_; }
732 int64_t offset() const { return offset_; }
733 AddrMode addrmode() const { return addrmode_; }
734 Shift shift() const { return shift_; }
735 Extend extend() const { return extend_; }
736 unsigned shift_amount() const { return shift_amount_; }
737 inline bool IsImmediateOffset() const;
738 inline bool IsRegisterOffset() const;
739 inline bool IsPreIndex() const;
740 inline bool IsPostIndex() const;
742 // For offset modes, return the offset as an Operand. This helper cannot
743 // handle indexed modes.
744 inline Operand OffsetAsOperand() const;
747 kNotPair, // Can't use a pair instruction.
748 kPairAB, // Can use a pair instruction (operandA has lower address).
749 kPairBA // Can use a pair instruction (operandB has lower address).
751 // Check if two MemOperand are consistent for stp/ldp use.
752 static PairResult AreConsistentForPair(const MemOperand& operandA,
753 const MemOperand& operandB,
754 int access_size_log2 = kXRegSizeLog2);
763 unsigned shift_amount_;
769 explicit ConstPool(Assembler* assm)
772 shared_entries_count(0) {}
773 void RecordEntry(intptr_t data, RelocInfo::Mode mode);
774 int EntryCount() const {
775 return shared_entries_count + unique_entries_.size();
777 bool IsEmpty() const {
778 return shared_entries_.empty() && unique_entries_.empty();
780 // Distance in bytes between the current pc and the first instruction
781 // using the pool. If there are no pending entries return kMaxInt.
782 int DistanceToFirstUse();
783 // Offset after which instructions using the pool will be out of range.
785 // Maximum size the constant pool can be with current entries. It always
786 // includes alignment padding and branch over.
788 // Size in bytes of the literal pool *if* it is emitted at the current
789 // pc. The size will include the branch over the pool if it was requested.
790 int SizeIfEmittedAtCurrentPc(bool require_jump);
791 // Emit the literal pool at the current pc with a branch over the pool if
793 void Emit(bool require_jump);
794 // Discard any pending pool entries.
798 bool CanBeShared(RelocInfo::Mode mode);
804 // Keep track of the first instruction requiring a constant pool entry
805 // since the previous constant pool was emitted.
807 // values, pc offset(s) of entries which can be shared.
808 std::multimap<uint64_t, int> shared_entries_;
809 // Number of distinct literal in shared entries.
810 int shared_entries_count;
811 // values, pc offset of entries which cannot be shared.
812 std::vector<std::pair<uint64_t, int> > unique_entries_;
816 // -----------------------------------------------------------------------------
819 class Assembler : public AssemblerBase {
821 // Create an assembler. Instructions and relocation information are emitted
822 // into a buffer, with the instructions starting from the beginning and the
823 // relocation information starting from the end of the buffer. See CodeDesc
824 // for a detailed comment on the layout (globals.h).
826 // If the provided buffer is NULL, the assembler allocates and grows its own
827 // buffer, and buffer_size determines the initial buffer size. The buffer is
828 // owned by the assembler and deallocated upon destruction of the assembler.
830 // If the provided buffer is not NULL, the assembler uses the provided buffer
831 // for code generation and assumes its size to be buffer_size. If the buffer
832 // is too small, a fatal error occurs. No deallocation of the buffer is done
833 // upon destruction of the assembler.
834 Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
836 virtual ~Assembler();
838 virtual void AbortedCodeGeneration() {
842 // System functions ---------------------------------------------------------
843 // Start generating code from the beginning of the buffer, discarding any code
844 // and data that has already been emitted into the buffer.
846 // In order to avoid any accidental transfer of state, Reset DCHECKs that the
847 // constant pool is not blocked.
850 // GetCode emits any pending (non-emitted) code and fills the descriptor
851 // desc. GetCode() is idempotent; it returns the same result if no other
852 // Assembler functions are invoked in between GetCode() calls.
854 // The descriptor (desc) can be NULL. In that case, the code is finalized as
855 // usual, but the descriptor is not populated.
856 void GetCode(CodeDesc* desc);
858 // Insert the smallest number of nop instructions
859 // possible to align the pc offset to a multiple
860 // of m. m must be a power of 2 (>= 4).
863 inline void Unreachable();
865 // Label --------------------------------------------------------------------
866 // Bind a label to the current pc. Note that labels can only be bound once,
867 // and if labels are linked to other instructions, they _must_ be bound
868 // before they go out of scope.
869 void bind(Label* label);
872 // RelocInfo and pools ------------------------------------------------------
874 // Record relocation information for current pc_.
875 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
877 // Return the address in the constant pool of the code target address used by
878 // the branch/call instruction at pc.
879 inline static Address target_pointer_address_at(Address pc);
881 // Read/Modify the code target address in the branch/call instruction at pc.
882 inline static Address target_address_at(Address pc,
883 ConstantPoolArray* constant_pool);
884 inline static void set_target_address_at(Address pc,
885 ConstantPoolArray* constant_pool,
887 ICacheFlushMode icache_flush_mode =
888 FLUSH_ICACHE_IF_NEEDED);
889 static inline Address target_address_at(Address pc, Code* code);
890 static inline void set_target_address_at(Address pc,
893 ICacheFlushMode icache_flush_mode =
894 FLUSH_ICACHE_IF_NEEDED);
896 // Return the code target address at a call site from the return address of
897 // that call in the instruction stream.
898 inline static Address target_address_from_return_address(Address pc);
900 // Given the address of the beginning of a call, return the address in the
901 // instruction stream that call will return from.
902 inline static Address return_address_from_call_start(Address pc);
904 // Return the code target address of the patch debug break slot
905 inline static Address break_address_from_return_address(Address pc);
907 // This sets the branch destination (which is in the constant pool on ARM).
908 // This is for calls and branches within generated code.
909 inline static void deserialization_set_special_target_at(
910 Address constant_pool_entry, Code* code, Address target);
912 // All addresses in the constant pool are the same size as pointers.
913 static const int kSpecialTargetSize = kPointerSize;
915 // The sizes of the call sequences emitted by MacroAssembler::Call.
916 // Wherever possible, use MacroAssembler::CallSize instead of these constants,
917 // as it will choose the correct value for a given relocation mode.
919 // Without relocation:
920 // movz temp, #(target & 0x000000000000ffff)
921 // movk temp, #(target & 0x00000000ffff0000)
922 // movk temp, #(target & 0x0000ffff00000000)
928 static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
929 static const int kCallSizeWithRelocation = 2 * kInstructionSize;
931 // Size of the generated code in bytes
932 uint64_t SizeOfGeneratedCode() const {
933 DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
934 return pc_ - buffer_;
937 // Return the code size generated from label to the current position.
938 uint64_t SizeOfCodeGeneratedSince(const Label* label) {
939 DCHECK(label->is_bound());
940 DCHECK(pc_offset() >= label->pos());
941 DCHECK(pc_offset() < buffer_size_);
942 return pc_offset() - label->pos();
945 // Check the size of the code generated since the given label. This function
946 // is used primarily to work around comparisons between signed and unsigned
947 // quantities, since V8 uses both.
948 // TODO(jbramley): Work out what sign to use for these things and if possible,
949 // change things to be consistent.
950 void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
952 DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
955 // Return the number of instructions generated from label to the
957 int InstructionsGeneratedSince(const Label* label) {
958 return SizeOfCodeGeneratedSince(label) / kInstructionSize;
961 // Number of instructions generated for the return sequence in
962 // FullCodeGenerator::EmitReturnSequence.
963 static const int kJSRetSequenceInstructions = 7;
964 // Distance between start of patched return sequence and the emitted address
966 static const int kPatchReturnSequenceAddressOffset = 0;
967 static const int kPatchDebugBreakSlotAddressOffset = 0;
969 // Number of instructions necessary to be able to later patch it to a call.
970 // See DebugCodegen::GenerateSlot() and
971 // BreakLocationIterator::SetDebugBreakAtSlot().
972 static const int kDebugBreakSlotInstructions = 4;
973 static const int kDebugBreakSlotLength =
974 kDebugBreakSlotInstructions * kInstructionSize;
976 static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
978 // Prevent contant pool emission until EndBlockConstPool is called.
979 // Call to this function can be nested but must be followed by an equal
980 // number of call to EndBlockConstpool.
981 void StartBlockConstPool();
983 // Resume constant pool emission. Need to be called as many time as
984 // StartBlockConstPool to have an effect.
985 void EndBlockConstPool();
987 bool is_const_pool_blocked() const;
988 static bool IsConstantPoolAt(Instruction* instr);
989 static int ConstantPoolSizeAt(Instruction* instr);
990 // See Assembler::CheckConstPool for more info.
991 void EmitPoolGuard();
993 // Prevent veneer pool emission until EndBlockVeneerPool is called.
994 // Call to this function can be nested but must be followed by an equal
995 // number of call to EndBlockConstpool.
996 void StartBlockVeneerPool();
998 // Resume constant pool emission. Need to be called as many time as
999 // StartBlockVeneerPool to have an effect.
1000 void EndBlockVeneerPool();
1002 bool is_veneer_pool_blocked() const {
1003 return veneer_pool_blocked_nesting_ > 0;
1006 // Block/resume emission of constant pools and veneer pools.
1007 void StartBlockPools() {
1008 StartBlockConstPool();
1009 StartBlockVeneerPool();
1011 void EndBlockPools() {
1012 EndBlockConstPool();
1013 EndBlockVeneerPool();
1016 // Debugging ----------------------------------------------------------------
1017 PositionsRecorder* positions_recorder() { return &positions_recorder_; }
1018 void RecordComment(const char* msg);
1019 int buffer_space() const;
1021 // Mark address of the ExitJSFrame code.
1022 void RecordJSReturn();
1024 // Mark address of a debug break slot.
1025 void RecordDebugBreakSlot();
1027 // Record the emission of a constant pool.
1029 // The emission of constant and veneer pools depends on the size of the code
1030 // generated and the number of RelocInfo recorded.
1031 // The Debug mechanism needs to map code offsets between two versions of a
1032 // function, compiled with and without debugger support (see for example
1033 // Debug::PrepareForBreakPoints()).
1034 // Compiling functions with debugger support generates additional code
1035 // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
1036 // and cause the version of the code with debugger support to have pools
1037 // generated in different places.
1038 // Recording the position and size of emitted pools allows to correctly
1039 // compute the offset mappings between the different versions of a function in
1042 // The parameter indicates the size of the pool (in bytes), including
1043 // the marker and branch over the data.
1044 void RecordConstPool(int size);
1047 // Instruction set functions ------------------------------------------------
1049 // Branch / Jump instructions.
1050 // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
1051 // Branch to register.
1052 void br(const Register& xn);
1054 // Branch-link to register.
1055 void blr(const Register& xn);
1057 // Branch to register with return hint.
1058 void ret(const Register& xn = lr);
1060 // Unconditional branch to label.
1061 void b(Label* label);
1063 // Conditional branch to label.
1064 void b(Label* label, Condition cond);
1066 // Unconditional branch to PC offset.
1069 // Conditional branch to PC offset.
1070 void b(int imm19, Condition cond);
1072 // Branch-link to label / pc offset.
1073 void bl(Label* label);
1076 // Compare and branch to label / pc offset if zero.
1077 void cbz(const Register& rt, Label* label);
1078 void cbz(const Register& rt, int imm19);
1080 // Compare and branch to label / pc offset if not zero.
1081 void cbnz(const Register& rt, Label* label);
1082 void cbnz(const Register& rt, int imm19);
1084 // Test bit and branch to label / pc offset if zero.
1085 void tbz(const Register& rt, unsigned bit_pos, Label* label);
1086 void tbz(const Register& rt, unsigned bit_pos, int imm14);
1088 // Test bit and branch to label / pc offset if not zero.
1089 void tbnz(const Register& rt, unsigned bit_pos, Label* label);
1090 void tbnz(const Register& rt, unsigned bit_pos, int imm14);
1092 // Address calculation instructions.
1093 // Calculate a PC-relative address. Unlike for branches the offset in adr is
1094 // unscaled (i.e. the result can be unaligned).
1095 void adr(const Register& rd, Label* label);
1096 void adr(const Register& rd, int imm21);
1098 // Data Processing instructions.
1100 void add(const Register& rd,
1102 const Operand& operand);
1104 // Add and update status flags.
1105 void adds(const Register& rd,
1107 const Operand& operand);
1109 // Compare negative.
1110 void cmn(const Register& rn, const Operand& operand);
1113 void sub(const Register& rd,
1115 const Operand& operand);
1117 // Subtract and update status flags.
1118 void subs(const Register& rd,
1120 const Operand& operand);
1123 void cmp(const Register& rn, const Operand& operand);
1126 void neg(const Register& rd,
1127 const Operand& operand);
1129 // Negate and update status flags.
1130 void negs(const Register& rd,
1131 const Operand& operand);
1133 // Add with carry bit.
1134 void adc(const Register& rd,
1136 const Operand& operand);
1138 // Add with carry bit and update status flags.
1139 void adcs(const Register& rd,
1141 const Operand& operand);
1143 // Subtract with carry bit.
1144 void sbc(const Register& rd,
1146 const Operand& operand);
1148 // Subtract with carry bit and update status flags.
1149 void sbcs(const Register& rd,
1151 const Operand& operand);
1153 // Negate with carry bit.
1154 void ngc(const Register& rd,
1155 const Operand& operand);
1157 // Negate with carry bit and update status flags.
1158 void ngcs(const Register& rd,
1159 const Operand& operand);
1161 // Logical instructions.
1162 // Bitwise and (A & B).
1163 void and_(const Register& rd,
1165 const Operand& operand);
1167 // Bitwise and (A & B) and update status flags.
1168 void ands(const Register& rd,
1170 const Operand& operand);
1172 // Bit test, and set flags.
1173 void tst(const Register& rn, const Operand& operand);
1175 // Bit clear (A & ~B).
1176 void bic(const Register& rd,
1178 const Operand& operand);
1180 // Bit clear (A & ~B) and update status flags.
1181 void bics(const Register& rd,
1183 const Operand& operand);
1185 // Bitwise or (A | B).
1186 void orr(const Register& rd, const Register& rn, const Operand& operand);
1188 // Bitwise nor (A | ~B).
1189 void orn(const Register& rd, const Register& rn, const Operand& operand);
1191 // Bitwise eor/xor (A ^ B).
1192 void eor(const Register& rd, const Register& rn, const Operand& operand);
1194 // Bitwise enor/xnor (A ^ ~B).
1195 void eon(const Register& rd, const Register& rn, const Operand& operand);
1197 // Logical shift left variable.
1198 void lslv(const Register& rd, const Register& rn, const Register& rm);
1200 // Logical shift right variable.
1201 void lsrv(const Register& rd, const Register& rn, const Register& rm);
1203 // Arithmetic shift right variable.
1204 void asrv(const Register& rd, const Register& rn, const Register& rm);
1206 // Rotate right variable.
1207 void rorv(const Register& rd, const Register& rn, const Register& rm);
1209 // Bitfield instructions.
1211 void bfm(const Register& rd,
1216 // Signed bitfield move.
1217 void sbfm(const Register& rd,
1222 // Unsigned bitfield move.
1223 void ubfm(const Register& rd,
1230 void bfi(const Register& rd,
1235 DCHECK(lsb + width <= rn.SizeInBits());
1236 bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1239 // Bitfield extract and insert low.
1240 void bfxil(const Register& rd,
1245 DCHECK(lsb + width <= rn.SizeInBits());
1246 bfm(rd, rn, lsb, lsb + width - 1);
1250 // Arithmetic shift right.
1251 void asr(const Register& rd, const Register& rn, unsigned shift) {
1252 DCHECK(shift < rd.SizeInBits());
1253 sbfm(rd, rn, shift, rd.SizeInBits() - 1);
1256 // Signed bitfield insert in zero.
1257 void sbfiz(const Register& rd,
1262 DCHECK(lsb + width <= rn.SizeInBits());
1263 sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1266 // Signed bitfield extract.
1267 void sbfx(const Register& rd,
1272 DCHECK(lsb + width <= rn.SizeInBits());
1273 sbfm(rd, rn, lsb, lsb + width - 1);
1276 // Signed extend byte.
1277 void sxtb(const Register& rd, const Register& rn) {
1281 // Signed extend halfword.
1282 void sxth(const Register& rd, const Register& rn) {
1283 sbfm(rd, rn, 0, 15);
1286 // Signed extend word.
1287 void sxtw(const Register& rd, const Register& rn) {
1288 sbfm(rd, rn, 0, 31);
1292 // Logical shift left.
1293 void lsl(const Register& rd, const Register& rn, unsigned shift) {
1294 unsigned reg_size = rd.SizeInBits();
1295 DCHECK(shift < reg_size);
1296 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
1299 // Logical shift right.
1300 void lsr(const Register& rd, const Register& rn, unsigned shift) {
1301 DCHECK(shift < rd.SizeInBits());
1302 ubfm(rd, rn, shift, rd.SizeInBits() - 1);
1305 // Unsigned bitfield insert in zero.
1306 void ubfiz(const Register& rd,
1311 DCHECK(lsb + width <= rn.SizeInBits());
1312 ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1315 // Unsigned bitfield extract.
1316 void ubfx(const Register& rd,
1321 DCHECK(lsb + width <= rn.SizeInBits());
1322 ubfm(rd, rn, lsb, lsb + width - 1);
1325 // Unsigned extend byte.
1326 void uxtb(const Register& rd, const Register& rn) {
1330 // Unsigned extend halfword.
1331 void uxth(const Register& rd, const Register& rn) {
1332 ubfm(rd, rn, 0, 15);
1335 // Unsigned extend word.
1336 void uxtw(const Register& rd, const Register& rn) {
1337 ubfm(rd, rn, 0, 31);
1341 void extr(const Register& rd,
1346 // Conditional select: rd = cond ? rn : rm.
1347 void csel(const Register& rd,
1352 // Conditional select increment: rd = cond ? rn : rm + 1.
1353 void csinc(const Register& rd,
1358 // Conditional select inversion: rd = cond ? rn : ~rm.
1359 void csinv(const Register& rd,
1364 // Conditional select negation: rd = cond ? rn : -rm.
1365 void csneg(const Register& rd,
1370 // Conditional set: rd = cond ? 1 : 0.
1371 void cset(const Register& rd, Condition cond);
1373 // Conditional set minus: rd = cond ? -1 : 0.
1374 void csetm(const Register& rd, Condition cond);
1376 // Conditional increment: rd = cond ? rn + 1 : rn.
1377 void cinc(const Register& rd, const Register& rn, Condition cond);
1379 // Conditional invert: rd = cond ? ~rn : rn.
1380 void cinv(const Register& rd, const Register& rn, Condition cond);
1382 // Conditional negate: rd = cond ? -rn : rn.
1383 void cneg(const Register& rd, const Register& rn, Condition cond);
1386 void ror(const Register& rd, const Register& rs, unsigned shift) {
1387 extr(rd, rs, rs, shift);
1390 // Conditional comparison.
1391 // Conditional compare negative.
1392 void ccmn(const Register& rn,
1393 const Operand& operand,
1397 // Conditional compare.
1398 void ccmp(const Register& rn,
1399 const Operand& operand,
1404 // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
1405 void mul(const Register& rd, const Register& rn, const Register& rm);
1407 // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
1408 void madd(const Register& rd,
1411 const Register& ra);
1413 // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
1414 void mneg(const Register& rd, const Register& rn, const Register& rm);
1416 // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
1417 void msub(const Register& rd,
1420 const Register& ra);
1422 // 32 x 32 -> 64-bit multiply.
1423 void smull(const Register& rd, const Register& rn, const Register& rm);
1425 // Xd = bits<127:64> of Xn * Xm.
1426 void smulh(const Register& rd, const Register& rn, const Register& rm);
1428 // Signed 32 x 32 -> 64-bit multiply and accumulate.
1429 void smaddl(const Register& rd,
1432 const Register& ra);
1434 // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
1435 void umaddl(const Register& rd,
1438 const Register& ra);
1440 // Signed 32 x 32 -> 64-bit multiply and subtract.
1441 void smsubl(const Register& rd,
1444 const Register& ra);
1446 // Unsigned 32 x 32 -> 64-bit multiply and subtract.
1447 void umsubl(const Register& rd,
1450 const Register& ra);
1452 // Signed integer divide.
1453 void sdiv(const Register& rd, const Register& rn, const Register& rm);
1455 // Unsigned integer divide.
1456 void udiv(const Register& rd, const Register& rn, const Register& rm);
1458 // Bit count, bit reverse and endian reverse.
1459 void rbit(const Register& rd, const Register& rn);
1460 void rev16(const Register& rd, const Register& rn);
1461 void rev32(const Register& rd, const Register& rn);
1462 void rev(const Register& rd, const Register& rn);
1463 void clz(const Register& rd, const Register& rn);
1464 void cls(const Register& rd, const Register& rn);
1466 // Memory instructions.
1468 // Load integer or FP register.
1469 void ldr(const CPURegister& rt, const MemOperand& src);
1471 // Store integer or FP register.
1472 void str(const CPURegister& rt, const MemOperand& dst);
1474 // Load word with sign extension.
1475 void ldrsw(const Register& rt, const MemOperand& src);
1478 void ldrb(const Register& rt, const MemOperand& src);
1481 void strb(const Register& rt, const MemOperand& dst);
1483 // Load byte with sign extension.
1484 void ldrsb(const Register& rt, const MemOperand& src);
1487 void ldrh(const Register& rt, const MemOperand& src);
1490 void strh(const Register& rt, const MemOperand& dst);
1492 // Load half-word with sign extension.
1493 void ldrsh(const Register& rt, const MemOperand& src);
1495 // Load integer or FP register pair.
1496 void ldp(const CPURegister& rt, const CPURegister& rt2,
1497 const MemOperand& src);
1499 // Store integer or FP register pair.
1500 void stp(const CPURegister& rt, const CPURegister& rt2,
1501 const MemOperand& dst);
1503 // Load word pair with sign extension.
1504 void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
1506 // Load integer or FP register pair, non-temporal.
1507 void ldnp(const CPURegister& rt, const CPURegister& rt2,
1508 const MemOperand& src);
1510 // Store integer or FP register pair, non-temporal.
1511 void stnp(const CPURegister& rt, const CPURegister& rt2,
1512 const MemOperand& dst);
1514 // Load literal to register from a pc relative address.
1515 void ldr_pcrel(const CPURegister& rt, int imm19);
1517 // Load literal to register.
1518 void ldr(const CPURegister& rt, const Immediate& imm);
1520 // Move instructions. The default shift of -1 indicates that the move
1521 // instruction will calculate an appropriate 16-bit immediate and left shift
1522 // that is equal to the 64-bit immediate argument. If an explicit left shift
1523 // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
1525 // For movk, an explicit shift can be used to indicate which half word should
1526 // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
1527 // half word with zero, whereas movk(x0, 0, 48) will overwrite the
1528 // most-significant.
1531 void movk(const Register& rd, uint64_t imm, int shift = -1) {
1532 MoveWide(rd, imm, shift, MOVK);
1535 // Move with non-zero.
1536 void movn(const Register& rd, uint64_t imm, int shift = -1) {
1537 MoveWide(rd, imm, shift, MOVN);
1541 void movz(const Register& rd, uint64_t imm, int shift = -1) {
1542 MoveWide(rd, imm, shift, MOVZ);
1545 // Misc instructions.
1546 // Monitor debug-mode breakpoint.
1549 // Halting debug-mode breakpoint.
1552 // Move register to register.
1553 void mov(const Register& rd, const Register& rn);
1555 // Move NOT(operand) to register.
1556 void mvn(const Register& rd, const Operand& operand);
1558 // System instructions.
1559 // Move to register from system register.
1560 void mrs(const Register& rt, SystemRegister sysreg);
1562 // Move from register to system register.
1563 void msr(SystemRegister sysreg, const Register& rt);
1566 void hint(SystemHint code);
1568 // Data memory barrier
1569 void dmb(BarrierDomain domain, BarrierType type);
1571 // Data synchronization barrier
1572 void dsb(BarrierDomain domain, BarrierType type);
1574 // Instruction synchronization barrier
1577 // Alias for system instructions.
1578 void nop() { hint(NOP); }
1580 // Different nop operations are used by the code generator to detect certain
1581 // states of the generated code.
1582 enum NopMarkerTypes {
1586 FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
1587 LAST_NOP_MARKER = ADR_FAR_NOP
1590 void nop(NopMarkerTypes n) {
1591 DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
1592 mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
1596 // Move immediate to FP register.
1597 void fmov(FPRegister fd, double imm);
1598 void fmov(FPRegister fd, float imm);
1600 // Move FP register to register.
1601 void fmov(Register rd, FPRegister fn);
1603 // Move register to FP register.
1604 void fmov(FPRegister fd, Register rn);
1606 // Move FP register to FP register.
1607 void fmov(FPRegister fd, FPRegister fn);
1610 void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1613 void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1616 void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1618 // FP fused multiply and add.
1619 void fmadd(const FPRegister& fd,
1620 const FPRegister& fn,
1621 const FPRegister& fm,
1622 const FPRegister& fa);
1624 // FP fused multiply and subtract.
1625 void fmsub(const FPRegister& fd,
1626 const FPRegister& fn,
1627 const FPRegister& fm,
1628 const FPRegister& fa);
1630 // FP fused multiply, add and negate.
1631 void fnmadd(const FPRegister& fd,
1632 const FPRegister& fn,
1633 const FPRegister& fm,
1634 const FPRegister& fa);
1636 // FP fused multiply, subtract and negate.
1637 void fnmsub(const FPRegister& fd,
1638 const FPRegister& fn,
1639 const FPRegister& fm,
1640 const FPRegister& fa);
1643 void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1646 void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1649 void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1652 void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1655 void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1658 void fabs(const FPRegister& fd, const FPRegister& fn);
1661 void fneg(const FPRegister& fd, const FPRegister& fn);
1664 void fsqrt(const FPRegister& fd, const FPRegister& fn);
1666 // FP round to integer (nearest with ties to away).
1667 void frinta(const FPRegister& fd, const FPRegister& fn);
1669 // FP round to integer (toward minus infinity).
1670 void frintm(const FPRegister& fd, const FPRegister& fn);
1672 // FP round to integer (nearest with ties to even).
1673 void frintn(const FPRegister& fd, const FPRegister& fn);
1675 // FP round to integer (towards zero.)
1676 void frintz(const FPRegister& fd, const FPRegister& fn);
1678 // FP compare registers.
1679 void fcmp(const FPRegister& fn, const FPRegister& fm);
1681 // FP compare immediate.
1682 void fcmp(const FPRegister& fn, double value);
1684 // FP conditional compare.
1685 void fccmp(const FPRegister& fn,
1686 const FPRegister& fm,
1690 // FP conditional select.
1691 void fcsel(const FPRegister& fd,
1692 const FPRegister& fn,
1693 const FPRegister& fm,
1696 // Common FP Convert function
1697 void FPConvertToInt(const Register& rd,
1698 const FPRegister& fn,
1699 FPIntegerConvertOp op);
1701 // FP convert between single and double precision.
1702 void fcvt(const FPRegister& fd, const FPRegister& fn);
1704 // Convert FP to unsigned integer (nearest with ties to away).
1705 void fcvtau(const Register& rd, const FPRegister& fn);
1707 // Convert FP to signed integer (nearest with ties to away).
1708 void fcvtas(const Register& rd, const FPRegister& fn);
1710 // Convert FP to unsigned integer (round towards -infinity).
1711 void fcvtmu(const Register& rd, const FPRegister& fn);
1713 // Convert FP to signed integer (round towards -infinity).
1714 void fcvtms(const Register& rd, const FPRegister& fn);
1716 // Convert FP to unsigned integer (nearest with ties to even).
1717 void fcvtnu(const Register& rd, const FPRegister& fn);
1719 // Convert FP to signed integer (nearest with ties to even).
1720 void fcvtns(const Register& rd, const FPRegister& fn);
1722 // Convert FP to unsigned integer (round towards zero).
1723 void fcvtzu(const Register& rd, const FPRegister& fn);
1725 // Convert FP to signed integer (rounf towards zero).
1726 void fcvtzs(const Register& rd, const FPRegister& fn);
1728 // Convert signed integer or fixed point to FP.
1729 void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1731 // Convert unsigned integer or fixed point to FP.
1732 void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1734 // Instruction functions used only for test, debug, and patching.
1735 // Emit raw instructions in the instruction stream.
1736 void dci(Instr raw_inst) { Emit(raw_inst); }
1738 // Emit 8 bits of data in the instruction stream.
1739 void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
1741 // Emit 32 bits of data in the instruction stream.
1742 void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
1744 // Emit 64 bits of data in the instruction stream.
1745 void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
1747 // Copy a string into the instruction stream, including the terminating NULL
1748 // character. The instruction pointer (pc_) is then aligned correctly for
1749 // subsequent instructions.
1750 void EmitStringData(const char* string);
1752 // Pseudo-instructions ------------------------------------------------------
1754 // Parameters are described in arm64/instructions-arm64.h.
1755 void debug(const char* message, uint32_t code, Instr params = BREAK);
1758 void dd(uint32_t data) { dc32(data); }
1759 void db(uint8_t data) { dc8(data); }
1761 // Code generation helpers --------------------------------------------------
1763 bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
1765 Instruction* pc() const { return Instruction::Cast(pc_); }
1767 Instruction* InstructionAt(int offset) const {
1768 return reinterpret_cast<Instruction*>(buffer_ + offset);
1771 ptrdiff_t InstructionOffset(Instruction* instr) const {
1772 return reinterpret_cast<byte*>(instr) - buffer_;
1775 // Register encoding.
1776 static Instr Rd(CPURegister rd) {
1777 DCHECK(rd.code() != kSPRegInternalCode);
1778 return rd.code() << Rd_offset;
1781 static Instr Rn(CPURegister rn) {
1782 DCHECK(rn.code() != kSPRegInternalCode);
1783 return rn.code() << Rn_offset;
1786 static Instr Rm(CPURegister rm) {
1787 DCHECK(rm.code() != kSPRegInternalCode);
1788 return rm.code() << Rm_offset;
1791 static Instr Ra(CPURegister ra) {
1792 DCHECK(ra.code() != kSPRegInternalCode);
1793 return ra.code() << Ra_offset;
1796 static Instr Rt(CPURegister rt) {
1797 DCHECK(rt.code() != kSPRegInternalCode);
1798 return rt.code() << Rt_offset;
1801 static Instr Rt2(CPURegister rt2) {
1802 DCHECK(rt2.code() != kSPRegInternalCode);
1803 return rt2.code() << Rt2_offset;
1806 // These encoding functions allow the stack pointer to be encoded, and
1807 // disallow the zero register.
1808 static Instr RdSP(Register rd) {
1809 DCHECK(!rd.IsZero());
1810 return (rd.code() & kRegCodeMask) << Rd_offset;
1813 static Instr RnSP(Register rn) {
1814 DCHECK(!rn.IsZero());
1815 return (rn.code() & kRegCodeMask) << Rn_offset;
1819 inline static Instr Flags(FlagsUpdate S);
1820 inline static Instr Cond(Condition cond);
1822 // PC-relative address encoding.
1823 inline static Instr ImmPCRelAddress(int imm21);
1826 inline static Instr ImmUncondBranch(int imm26);
1827 inline static Instr ImmCondBranch(int imm19);
1828 inline static Instr ImmCmpBranch(int imm19);
1829 inline static Instr ImmTestBranch(int imm14);
1830 inline static Instr ImmTestBranchBit(unsigned bit_pos);
1832 // Data Processing encoding.
1833 inline static Instr SF(Register rd);
1834 inline static Instr ImmAddSub(int64_t imm);
1835 inline static Instr ImmS(unsigned imms, unsigned reg_size);
1836 inline static Instr ImmR(unsigned immr, unsigned reg_size);
1837 inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
1838 inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
1839 inline static Instr ImmLLiteral(int imm19);
1840 inline static Instr BitN(unsigned bitn, unsigned reg_size);
1841 inline static Instr ShiftDP(Shift shift);
1842 inline static Instr ImmDPShift(unsigned amount);
1843 inline static Instr ExtendMode(Extend extend);
1844 inline static Instr ImmExtendShift(unsigned left_shift);
1845 inline static Instr ImmCondCmp(unsigned imm);
1846 inline static Instr Nzcv(StatusFlags nzcv);
1848 static bool IsImmAddSub(int64_t immediate);
1849 static bool IsImmLogical(uint64_t value,
1855 // MemOperand offset encoding.
1856 inline static Instr ImmLSUnsigned(int imm12);
1857 inline static Instr ImmLS(int imm9);
1858 inline static Instr ImmLSPair(int imm7, LSDataSize size);
1859 inline static Instr ImmShiftLS(unsigned shift_amount);
1860 inline static Instr ImmException(int imm16);
1861 inline static Instr ImmSystemRegister(int imm15);
1862 inline static Instr ImmHint(int imm7);
1863 inline static Instr ImmBarrierDomain(int imm2);
1864 inline static Instr ImmBarrierType(int imm2);
1865 inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
1867 static bool IsImmLSUnscaled(int64_t offset);
1868 static bool IsImmLSScaled(int64_t offset, LSDataSize size);
1870 // Move immediates encoding.
1871 inline static Instr ImmMoveWide(uint64_t imm);
1872 inline static Instr ShiftMoveWide(int64_t shift);
1875 static Instr ImmFP32(float imm);
1876 static Instr ImmFP64(double imm);
1877 inline static Instr FPScale(unsigned scale);
1879 // FP register type.
1880 inline static Instr FPType(FPRegister fd);
1882 // Class for scoping postponing the constant pool generation.
1883 class BlockConstPoolScope {
1885 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1886 assem_->StartBlockConstPool();
1888 ~BlockConstPoolScope() {
1889 assem_->EndBlockConstPool();
1895 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1898 // Check if is time to emit a constant pool.
1899 void CheckConstPool(bool force_emit, bool require_jump);
1901 // Allocate a constant pool of the correct size for the generated code.
1902 Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
1904 // Generate the constant pool for the generated code.
1905 void PopulateConstantPool(ConstantPoolArray* constant_pool);
1907 // Returns true if we should emit a veneer as soon as possible for a branch
1908 // which can at most reach to specified pc.
1909 bool ShouldEmitVeneer(int max_reachable_pc,
1910 int margin = kVeneerDistanceMargin);
1911 bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
1912 return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
1915 // The maximum code size generated for a veneer. Currently one branch
1916 // instruction. This is for code size checking purposes, and can be extended
1917 // in the future for example if we decide to add nops between the veneers.
1918 static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
1920 void RecordVeneerPool(int location_offset, int size);
1921 // Emits veneers for branches that are approaching their maximum range.
1922 // If need_protection is true, the veneers are protected by a branch jumping
1924 void EmitVeneers(bool force_emit, bool need_protection,
1925 int margin = kVeneerDistanceMargin);
1926 void EmitVeneersGuard() { EmitPoolGuard(); }
1927 // Checks whether veneers need to be emitted at this point.
1928 // If force_emit is set, a veneer is generated for *all* unresolved branches.
1929 void CheckVeneerPool(bool force_emit, bool require_jump,
1930 int margin = kVeneerDistanceMargin);
1932 class BlockPoolsScope {
1934 explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
1935 assem_->StartBlockPools();
1937 ~BlockPoolsScope() {
1938 assem_->EndBlockPools();
1944 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
1948 inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
1950 void LoadStore(const CPURegister& rt,
1951 const MemOperand& addr,
1954 void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
1955 const MemOperand& addr, LoadStorePairOp op);
1956 static bool IsImmLSPair(int64_t offset, LSDataSize size);
1958 void Logical(const Register& rd,
1960 const Operand& operand,
1962 void LogicalImmediate(const Register& rd,
1969 void ConditionalCompare(const Register& rn,
1970 const Operand& operand,
1973 ConditionalCompareOp op);
1974 static bool IsImmConditionalCompare(int64_t immediate);
1976 void AddSubWithCarry(const Register& rd,
1978 const Operand& operand,
1980 AddSubWithCarryOp op);
1982 // Functions for emulating operands not directly supported by the instruction
1984 void EmitShift(const Register& rd,
1988 void EmitExtendShift(const Register& rd,
1991 unsigned left_shift);
1993 void AddSub(const Register& rd,
1995 const Operand& operand,
1999 static bool IsImmFP32(float imm);
2000 static bool IsImmFP64(double imm);
2002 // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
2003 // registers. Only simple loads are supported; sign- and zero-extension (such
2004 // as in LDPSW_x or LDRB_w) are not supported.
2005 static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
2006 static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
2007 const CPURegister& rt2);
2008 static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
2009 static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
2010 const CPURegister& rt2);
2011 static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
2012 const CPURegister& rt, const CPURegister& rt2);
2013 static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
2014 const CPURegister& rt, const CPURegister& rt2);
2015 static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
2017 // Remove the specified branch from the unbound label link chain.
2018 // If available, a veneer for this label can be used for other branches in the
2019 // chain if the link chain cannot be fixed up without this branch.
2020 void RemoveBranchFromLabelLinkChain(Instruction* branch,
2022 Instruction* label_veneer = NULL);
2025 // Instruction helpers.
2026 void MoveWide(const Register& rd,
2029 MoveWideImmediateOp mov_op);
2030 void DataProcShiftedRegister(const Register& rd,
2032 const Operand& operand,
2035 void DataProcExtendedRegister(const Register& rd,
2037 const Operand& operand,
2040 void LoadStorePairNonTemporal(const CPURegister& rt,
2041 const CPURegister& rt2,
2042 const MemOperand& addr,
2043 LoadStorePairNonTemporalOp op);
2044 void ConditionalSelect(const Register& rd,
2048 ConditionalSelectOp op);
2049 void DataProcessing1Source(const Register& rd,
2051 DataProcessing1SourceOp op);
2052 void DataProcessing3Source(const Register& rd,
2056 DataProcessing3SourceOp op);
2057 void FPDataProcessing1Source(const FPRegister& fd,
2058 const FPRegister& fn,
2059 FPDataProcessing1SourceOp op);
2060 void FPDataProcessing2Source(const FPRegister& fd,
2061 const FPRegister& fn,
2062 const FPRegister& fm,
2063 FPDataProcessing2SourceOp op);
2064 void FPDataProcessing3Source(const FPRegister& fd,
2065 const FPRegister& fn,
2066 const FPRegister& fm,
2067 const FPRegister& fa,
2068 FPDataProcessing3SourceOp op);
2072 // Return an offset for a label-referencing instruction, typically a branch.
2073 int LinkAndGetByteOffsetTo(Label* label);
2075 // This is the same as LinkAndGetByteOffsetTo, but return an offset
2076 // suitable for fields that take instruction offsets.
2077 inline int LinkAndGetInstructionOffsetTo(Label* label);
2079 static const int kStartOfLabelLinkChain = 0;
2081 // Verify that a label's link chain is intact.
2082 void CheckLabelLinkChain(Label const * label);
2084 void RecordLiteral(int64_t imm, unsigned size);
2086 // Postpone the generation of the constant pool for the specified number of
2088 void BlockConstPoolFor(int instructions);
2090 // Set how far from current pc the next constant pool check will be.
2091 void SetNextConstPoolCheckIn(int instructions) {
2092 next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
2095 // Emit the instruction at pc_.
2096 void Emit(Instr instruction) {
2097 STATIC_ASSERT(sizeof(*pc_) == 1);
2098 STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
2099 DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
2101 memcpy(pc_, &instruction, sizeof(instruction));
2102 pc_ += sizeof(instruction);
2106 // Emit data inline in the instruction stream.
2107 void EmitData(void const * data, unsigned size) {
2108 DCHECK(sizeof(*pc_) == 1);
2109 DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
2111 // TODO(all): Somehow register we have some data here. Then we can
2112 // disassemble it correctly.
2113 memcpy(pc_, data, size);
2119 void CheckBufferSpace();
2122 // Pc offset of the next constant pool check.
2123 int next_constant_pool_check_;
2125 // Constant pool generation
2126 // Pools are emitted in the instruction stream. They are emitted when:
2127 // * the distance to the first use is above a pre-defined distance or
2128 // * the numbers of entries in the pool is above a pre-defined size or
2129 // * code generation is finished
2130 // If a pool needs to be emitted before code generation is finished a branch
2131 // over the emitted pool will be inserted.
2133 // Constants in the pool may be addresses of functions that gets relocated;
2134 // if so, a relocation info entry is associated to the constant pool entry.
2136 // Repeated checking whether the constant pool should be emitted is rather
2137 // expensive. By default we only check again once a number of instructions
2138 // has been generated. That also means that the sizing of the buffers is not
2139 // an exact science, and that we rely on some slop to not overrun buffers.
2140 static const int kCheckConstPoolInterval = 128;
2142 // Distance to first use after a which a pool will be emitted. Pool entries
2143 // are accessed with pc relative load therefore this cannot be more than
2144 // 1 * MB. Since constant pool emission checks are interval based this value
2145 // is an approximation.
2146 static const int kApproxMaxDistToConstPool = 64 * KB;
2148 // Number of pool entries after which a pool will be emitted. Since constant
2149 // pool emission checks are interval based this value is an approximation.
2150 static const int kApproxMaxPoolEntryCount = 512;
2152 // Emission of the constant pool may be blocked in some code sequences.
2153 int const_pool_blocked_nesting_; // Block emission if this is not zero.
2154 int no_const_pool_before_; // Block emission before this pc offset.
2156 // Emission of the veneer pools may be blocked in some code sequences.
2157 int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
2159 // Relocation info generation
2160 // Each relocation is encoded as a variable size value
2161 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
2162 RelocInfoWriter reloc_info_writer;
2164 // Relocation info records are also used during code generation as temporary
2165 // containers for constants and code target addresses until they are emitted
2166 // to the constant pool. These pending relocation info records are temporarily
2167 // stored in a separate buffer until a constant pool is emitted.
2168 // If every instruction in a long sequence is accessing the pool, we need one
2169 // pending relocation entry per instruction.
2171 // The pending constant pool.
2172 ConstPool constpool_;
2174 // Relocation for a type-recording IC has the AST id added to it. This
2175 // member variable is a way to pass the information from the call site to
2176 // the relocation info.
2177 TypeFeedbackId recorded_ast_id_;
2179 inline TypeFeedbackId RecordedAstId();
2180 inline void ClearRecordedAstId();
2183 // Record the AST id of the CallIC being compiled, so that it can be placed
2184 // in the relocation information.
2185 void SetRecordedAstId(TypeFeedbackId ast_id) {
2186 DCHECK(recorded_ast_id_.IsNone());
2187 recorded_ast_id_ = ast_id;
2191 // The relocation writer's position is at least kGap bytes below the end of
2192 // the generated instructions. This is so that multi-instruction sequences do
2193 // not have to check for overflow. The same is true for writes of large
2194 // relocation info entries, and debug strings encoded in the instruction
2196 static const int kGap = 128;
2199 class FarBranchInfo {
2201 FarBranchInfo(int offset, Label* label)
2202 : pc_offset_(offset), label_(label) {}
2203 // Offset of the branch in the code generation buffer.
2205 // The label branched to.
2210 // Information about unresolved (forward) branches.
2211 // The Assembler is only allowed to delete out-of-date information from here
2212 // after a label is bound. The MacroAssembler uses this information to
2213 // generate veneers.
2215 // The second member gives information about the unresolved branch. The first
2216 // member of the pair is the maximum offset that the branch can reach in the
2217 // buffer. The map is sorted according to this reachable offset, allowing to
2218 // easily check when veneers need to be emitted.
2219 // Note that the maximum reachable offset (first member of the pairs) should
2220 // always be positive but has the same type as the return value for
2221 // pc_offset() for convenience.
2222 std::multimap<int, FarBranchInfo> unresolved_branches_;
2224 // We generate a veneer for a branch if we reach within this distance of the
2225 // limit of the range.
2226 static const int kVeneerDistanceMargin = 1 * KB;
2227 // The factor of 2 is a finger in the air guess. With a default margin of
2228 // 1KB, that leaves us an addional 256 instructions to avoid generating a
2229 // protective branch.
2230 static const int kVeneerNoProtectionFactor = 2;
2231 static const int kVeneerDistanceCheckMargin =
2232 kVeneerNoProtectionFactor * kVeneerDistanceMargin;
2233 int unresolved_branches_first_limit() const {
2234 DCHECK(!unresolved_branches_.empty());
2235 return unresolved_branches_.begin()->first;
2237 // This is similar to next_constant_pool_check_ and helps reduce the overhead
2238 // of checking for veneer pools.
2239 // It is maintained to the closest unresolved branch limit minus the maximum
2240 // veneer margin (or kMaxInt if there are no unresolved branches).
2241 int next_veneer_pool_check_;
2244 // If a veneer is emitted for a branch instruction, that instruction must be
2245 // removed from the associated label's link chain so that the assembler does
2246 // not later attempt (likely unsuccessfully) to patch it to branch directly to
2248 void DeleteUnresolvedBranchInfoForLabel(Label* label);
2249 // This function deletes the information related to the label by traversing
2250 // the label chain, and for each PC-relative instruction in the chain checking
2251 // if pending unresolved information exists. Its complexity is proportional to
2252 // the length of the label chain.
2253 void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
2256 PositionsRecorder positions_recorder_;
2257 friend class PositionsRecorder;
2258 friend class EnsureSpace;
2259 friend class ConstPool;
2262 class PatchingAssembler : public Assembler {
2264 // Create an Assembler with a buffer starting at 'start'.
2265 // The buffer size is
2266 // size of instructions to patch + kGap
2267 // Where kGap is the distance from which the Assembler tries to grow the
2269 // If more or fewer instructions than expected are generated or if some
2270 // relocation information takes space in the buffer, the PatchingAssembler
2271 // will crash trying to grow the buffer.
2272 PatchingAssembler(Instruction* start, unsigned count)
2274 reinterpret_cast<byte*>(start),
2275 count * kInstructionSize + kGap) {
2279 PatchingAssembler(byte* start, unsigned count)
2280 : Assembler(NULL, start, count * kInstructionSize + kGap) {
2281 // Block constant pool emission.
2285 ~PatchingAssembler() {
2286 // Const pool should still be blocked.
2287 DCHECK(is_const_pool_blocked());
2289 // Verify we have generated the number of instruction we expected.
2290 DCHECK((pc_offset() + kGap) == buffer_size_);
2291 // Verify no relocation information has been emitted.
2292 DCHECK(IsConstPoolEmpty());
2293 // Flush the Instruction cache.
2294 size_t length = buffer_size_ - kGap;
2295 CpuFeatures::FlushICache(buffer_, length);
2298 // See definition of PatchAdrFar() for details.
2299 static const int kAdrFarPatchableNNops = 2;
2300 static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
2301 void PatchAdrFar(int64_t target_offset);
2305 class EnsureSpace BASE_EMBEDDED {
2307 explicit EnsureSpace(Assembler* assembler) {
2308 assembler->CheckBufferSpace();
2312 } } // namespace v8::internal
2314 #endif // V8_ARM64_ASSEMBLER_ARM64_H_