1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_ASSEMBLER_ARM64_H_
12 #include "src/arm64/instructions-arm64.h"
13 #include "src/assembler.h"
14 #include "src/globals.h"
15 #include "src/serialize.h"
16 #include "src/utils.h"
23 // -----------------------------------------------------------------------------
25 #define REGISTER_CODE_LIST(R) \
26 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
27 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
28 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
29 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
32 static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
35 // Some CPURegister methods can return Register and FPRegister types, so we
36 // need to declare them in advance.
43 // The kInvalid value is used to detect uninitialized static instances,
44 // which are always zero-initialized before any constructors are called.
51 static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
52 CPURegister r = {code, size, type};
56 unsigned code() const;
57 RegisterType type() const;
59 unsigned SizeInBits() const;
60 int SizeInBytes() const;
61 bool Is32Bits() const;
62 bool Is64Bits() const;
64 bool IsValidOrNone() const;
65 bool IsValidRegister() const;
66 bool IsValidFPRegister() const;
68 bool Is(const CPURegister& other) const;
69 bool Aliases(const CPURegister& other) const;
74 bool IsRegister() const;
75 bool IsFPRegister() const;
82 bool IsSameSizeAndType(const CPURegister& other) const;
85 bool is(const CPURegister& other) const { return Is(other); }
86 bool is_valid() const { return IsValid(); }
90 RegisterType reg_type;
94 struct Register : public CPURegister {
95 static Register Create(unsigned code, unsigned size) {
96 return Register(CPURegister::Create(code, size, CPURegister::kRegister));
102 reg_type = CPURegister::kNoRegister;
105 explicit Register(const CPURegister& r) {
106 reg_code = r.reg_code;
107 reg_size = r.reg_size;
108 reg_type = r.reg_type;
109 DCHECK(IsValidOrNone());
112 Register(const Register& r) { // NOLINT(runtime/explicit)
113 reg_code = r.reg_code;
114 reg_size = r.reg_size;
115 reg_type = r.reg_type;
116 DCHECK(IsValidOrNone());
119 bool IsValid() const {
120 DCHECK(IsRegister() || IsNone());
121 return IsValidRegister();
124 static Register XRegFromCode(unsigned code);
125 static Register WRegFromCode(unsigned code);
127 // Start of V8 compatibility section ---------------------
128 // These memebers are necessary for compilation.
129 // A few of them may be unused for now.
131 static const int kNumRegisters = kNumberOfRegisters;
132 static int NumRegisters() { return kNumRegisters; }
134 // We allow crankshaft to use the following registers:
137 // - x27 (also context)
139 // TODO(all): Register x25 is currently free and could be available for
140 // crankshaft, but we don't use it as we might use it as a per function
141 // literal pool pointer in the future.
143 // TODO(all): Consider storing cp in x25 to have only two ranges.
144 // We split allocatable registers in three ranges called
148 static const unsigned kAllocatableLowRangeBegin = 0;
149 static const unsigned kAllocatableLowRangeEnd = 15;
150 static const unsigned kAllocatableHighRangeBegin = 18;
151 static const unsigned kAllocatableHighRangeEnd = 24;
152 static const unsigned kAllocatableContext = 27;
154 // Gap between low and high ranges.
155 static const int kAllocatableRangeGapSize =
156 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
158 static const int kMaxNumAllocatableRegisters =
159 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
160 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
161 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
163 // Return true if the register is one that crankshaft can allocate.
164 bool IsAllocatable() const {
165 return ((reg_code == kAllocatableContext) ||
166 (reg_code <= kAllocatableLowRangeEnd) ||
167 ((reg_code >= kAllocatableHighRangeBegin) &&
168 (reg_code <= kAllocatableHighRangeEnd)));
171 static Register FromAllocationIndex(unsigned index) {
172 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
173 // cp is the last allocatable register.
174 if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
175 return from_code(kAllocatableContext);
178 // Handle low and high ranges.
179 return (index <= kAllocatableLowRangeEnd)
181 : from_code(index + kAllocatableRangeGapSize);
184 static const char* AllocationIndexToString(int index) {
185 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
186 DCHECK((kAllocatableLowRangeBegin == 0) &&
187 (kAllocatableLowRangeEnd == 15) &&
188 (kAllocatableHighRangeBegin == 18) &&
189 (kAllocatableHighRangeEnd == 24) &&
190 (kAllocatableContext == 27));
191 const char* const names[] = {
192 "x0", "x1", "x2", "x3", "x4",
193 "x5", "x6", "x7", "x8", "x9",
194 "x10", "x11", "x12", "x13", "x14",
195 "x15", "x18", "x19", "x20", "x21",
196 "x22", "x23", "x24", "x27",
201 static int ToAllocationIndex(Register reg) {
202 DCHECK(reg.IsAllocatable());
203 unsigned code = reg.code();
204 if (code == kAllocatableContext) {
205 return NumAllocatableRegisters() - 1;
208 return (code <= kAllocatableLowRangeEnd)
210 : code - kAllocatableRangeGapSize;
213 static Register from_code(int code) {
214 // Always return an X register.
215 return Register::Create(code, kXRegSizeInBits);
218 // End of V8 compatibility section -----------------------
222 struct FPRegister : public CPURegister {
223 static FPRegister Create(unsigned code, unsigned size) {
225 CPURegister::Create(code, size, CPURegister::kFPRegister));
231 reg_type = CPURegister::kNoRegister;
234 explicit FPRegister(const CPURegister& r) {
235 reg_code = r.reg_code;
236 reg_size = r.reg_size;
237 reg_type = r.reg_type;
238 DCHECK(IsValidOrNone());
241 FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
242 reg_code = r.reg_code;
243 reg_size = r.reg_size;
244 reg_type = r.reg_type;
245 DCHECK(IsValidOrNone());
248 bool IsValid() const {
249 DCHECK(IsFPRegister() || IsNone());
250 return IsValidFPRegister();
253 static FPRegister SRegFromCode(unsigned code);
254 static FPRegister DRegFromCode(unsigned code);
256 // Start of V8 compatibility section ---------------------
257 static const int kMaxNumRegisters = kNumberOfFPRegisters;
259 // Crankshaft can use all the FP registers except:
260 // - d15 which is used to keep the 0 double value
261 // - d30 which is used in crankshaft as a double scratch register
262 // - d31 which is used in the MacroAssembler as a double scratch register
263 static const unsigned kAllocatableLowRangeBegin = 0;
264 static const unsigned kAllocatableLowRangeEnd = 14;
265 static const unsigned kAllocatableHighRangeBegin = 16;
266 static const unsigned kAllocatableHighRangeEnd = 28;
268 static const RegList kAllocatableFPRegisters = 0x1fff7fff;
270 // Gap between low and high ranges.
271 static const int kAllocatableRangeGapSize =
272 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
274 static const int kMaxNumAllocatableRegisters =
275 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
276 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
277 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
279 // TODO(turbofan): Proper float32 support.
280 static int NumAllocatableAliasedRegisters() {
281 return NumAllocatableRegisters();
284 // Return true if the register is one that crankshaft can allocate.
285 bool IsAllocatable() const {
286 return (Bit() & kAllocatableFPRegisters) != 0;
289 static FPRegister FromAllocationIndex(unsigned int index) {
290 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
292 return (index <= kAllocatableLowRangeEnd)
294 : from_code(index + kAllocatableRangeGapSize);
297 static const char* AllocationIndexToString(int index) {
298 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
299 DCHECK((kAllocatableLowRangeBegin == 0) &&
300 (kAllocatableLowRangeEnd == 14) &&
301 (kAllocatableHighRangeBegin == 16) &&
302 (kAllocatableHighRangeEnd == 28));
303 const char* const names[] = {
304 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
305 "d8", "d9", "d10", "d11", "d12", "d13", "d14",
306 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
307 "d24", "d25", "d26", "d27", "d28"
312 static int ToAllocationIndex(FPRegister reg) {
313 DCHECK(reg.IsAllocatable());
314 unsigned code = reg.code();
316 return (code <= kAllocatableLowRangeEnd)
318 : code - kAllocatableRangeGapSize;
321 static FPRegister from_code(int code) {
322 // Always return a D register.
323 return FPRegister::Create(code, kDRegSizeInBits);
325 // End of V8 compatibility section -----------------------
328 struct SIMD128Register {
329 static const int kMaxNumRegisters = 0;
331 static int ToAllocationIndex(SIMD128Register reg) {
336 static const char* AllocationIndexToString(int index) {
343 STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
344 STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
347 #if defined(ARM64_DEFINE_REG_STATICS)
348 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
349 const CPURegister init_##register_class##_##name = {code, size, type}; \
350 const register_class& name = *reinterpret_cast<const register_class*>( \
351 &init_##register_class##_##name)
352 #define ALIAS_REGISTER(register_class, alias, name) \
353 const register_class& alias = *reinterpret_cast<const register_class*>( \
354 &init_##register_class##_##name)
356 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
357 extern const register_class& name
358 #define ALIAS_REGISTER(register_class, alias, name) \
359 extern const register_class& alias
360 #endif // defined(ARM64_DEFINE_REG_STATICS)
362 // No*Reg is used to indicate an unused argument, or an error case. Note that
363 // these all compare equal (using the Is() method). The Register and FPRegister
364 // variants are provided for convenience.
365 INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
366 INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
367 INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
370 INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
372 #define DEFINE_REGISTERS(N) \
373 INITIALIZE_REGISTER(Register, w##N, N, \
374 kWRegSizeInBits, CPURegister::kRegister); \
375 INITIALIZE_REGISTER(Register, x##N, N, \
376 kXRegSizeInBits, CPURegister::kRegister);
377 REGISTER_CODE_LIST(DEFINE_REGISTERS)
378 #undef DEFINE_REGISTERS
380 INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
381 CPURegister::kRegister);
382 INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
383 CPURegister::kRegister);
385 #define DEFINE_FPREGISTERS(N) \
386 INITIALIZE_REGISTER(FPRegister, s##N, N, \
387 kSRegSizeInBits, CPURegister::kFPRegister); \
388 INITIALIZE_REGISTER(FPRegister, d##N, N, \
389 kDRegSizeInBits, CPURegister::kFPRegister);
390 REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
391 #undef DEFINE_FPREGISTERS
393 #undef INITIALIZE_REGISTER
395 // Registers aliases.
396 ALIAS_REGISTER(Register, ip0, x16);
397 ALIAS_REGISTER(Register, ip1, x17);
398 ALIAS_REGISTER(Register, wip0, w16);
399 ALIAS_REGISTER(Register, wip1, w17);
401 ALIAS_REGISTER(Register, root, x26);
402 ALIAS_REGISTER(Register, rr, x26);
403 // Context pointer register.
404 ALIAS_REGISTER(Register, cp, x27);
405 // We use a register as a JS stack pointer to overcome the restriction on the
406 // architectural SP alignment.
407 // We chose x28 because it is contiguous with the other specific purpose
409 STATIC_ASSERT(kJSSPCode == 28);
410 ALIAS_REGISTER(Register, jssp, x28);
411 ALIAS_REGISTER(Register, wjssp, w28);
412 ALIAS_REGISTER(Register, fp, x29);
413 ALIAS_REGISTER(Register, lr, x30);
414 ALIAS_REGISTER(Register, xzr, x31);
415 ALIAS_REGISTER(Register, wzr, w31);
417 // Keeps the 0 double value.
418 ALIAS_REGISTER(FPRegister, fp_zero, d15);
419 // Crankshaft double scratch register.
420 ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
421 // MacroAssembler double scratch registers.
422 ALIAS_REGISTER(FPRegister, fp_scratch, d30);
423 ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
424 ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
426 #undef ALIAS_REGISTER
429 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
430 Register reg2 = NoReg,
431 Register reg3 = NoReg,
432 Register reg4 = NoReg);
435 // AreAliased returns true if any of the named registers overlap. Arguments set
436 // to NoReg are ignored. The system stack pointer may be specified.
437 bool AreAliased(const CPURegister& reg1,
438 const CPURegister& reg2,
439 const CPURegister& reg3 = NoReg,
440 const CPURegister& reg4 = NoReg,
441 const CPURegister& reg5 = NoReg,
442 const CPURegister& reg6 = NoReg,
443 const CPURegister& reg7 = NoReg,
444 const CPURegister& reg8 = NoReg);
446 // AreSameSizeAndType returns true if all of the specified registers have the
447 // same size, and are of the same type. The system stack pointer may be
448 // specified. Arguments set to NoReg are ignored, as are any subsequent
449 // arguments. At least one argument (reg1) must be valid (not NoCPUReg).
450 bool AreSameSizeAndType(const CPURegister& reg1,
451 const CPURegister& reg2,
452 const CPURegister& reg3 = NoCPUReg,
453 const CPURegister& reg4 = NoCPUReg,
454 const CPURegister& reg5 = NoCPUReg,
455 const CPURegister& reg6 = NoCPUReg,
456 const CPURegister& reg7 = NoCPUReg,
457 const CPURegister& reg8 = NoCPUReg);
460 typedef FPRegister DoubleRegister;
463 // -----------------------------------------------------------------------------
464 // Lists of registers.
467 explicit CPURegList(CPURegister reg1,
468 CPURegister reg2 = NoCPUReg,
469 CPURegister reg3 = NoCPUReg,
470 CPURegister reg4 = NoCPUReg)
471 : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
472 size_(reg1.SizeInBits()), type_(reg1.type()) {
473 DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
477 CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
478 : list_(list), size_(size), type_(type) {
482 CPURegList(CPURegister::RegisterType type, unsigned size,
483 unsigned first_reg, unsigned last_reg)
484 : size_(size), type_(type) {
485 DCHECK(((type == CPURegister::kRegister) &&
486 (last_reg < kNumberOfRegisters)) ||
487 ((type == CPURegister::kFPRegister) &&
488 (last_reg < kNumberOfFPRegisters)));
489 DCHECK(last_reg >= first_reg);
490 list_ = (1UL << (last_reg + 1)) - 1;
491 list_ &= ~((1UL << first_reg) - 1);
495 CPURegister::RegisterType type() const {
500 RegList list() const {
505 inline void set_list(RegList new_list) {
510 // Combine another CPURegList into this one. Registers that already exist in
511 // this list are left unchanged. The type and size of the registers in the
512 // 'other' list must match those in this list.
513 void Combine(const CPURegList& other);
515 // Remove every register in the other CPURegList from this one. Registers that
516 // do not exist in this list are ignored. The type of the registers in the
517 // 'other' list must match those in this list.
518 void Remove(const CPURegList& other);
520 // Variants of Combine and Remove which take CPURegisters.
521 void Combine(const CPURegister& other);
522 void Remove(const CPURegister& other1,
523 const CPURegister& other2 = NoCPUReg,
524 const CPURegister& other3 = NoCPUReg,
525 const CPURegister& other4 = NoCPUReg);
527 // Variants of Combine and Remove which take a single register by its code;
528 // the type and size of the register is inferred from this list.
529 void Combine(int code);
530 void Remove(int code);
532 // Remove all callee-saved registers from the list. This can be useful when
533 // preparing registers for an AAPCS64 function call, for example.
534 void RemoveCalleeSaved();
536 CPURegister PopLowestIndex();
537 CPURegister PopHighestIndex();
539 // AAPCS64 callee-saved registers.
540 static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
541 static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
543 // AAPCS64 caller-saved registers. Note that this includes lr.
544 static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
545 static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
547 // Registers saved as safepoints.
548 static CPURegList GetSafepointSavedRegisters();
550 bool IsEmpty() const {
555 bool IncludesAliasOf(const CPURegister& other1,
556 const CPURegister& other2 = NoCPUReg,
557 const CPURegister& other3 = NoCPUReg,
558 const CPURegister& other4 = NoCPUReg) const {
561 if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
562 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
563 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
564 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
565 return (list_ & list) != 0;
570 return CountSetBits(list_, kRegListSizeInBits);
573 unsigned RegisterSizeInBits() const {
578 unsigned RegisterSizeInBytes() const {
579 int size_in_bits = RegisterSizeInBits();
580 DCHECK((size_in_bits % kBitsPerByte) == 0);
581 return size_in_bits / kBitsPerByte;
584 unsigned TotalSizeInBytes() const {
586 return RegisterSizeInBytes() * Count();
592 CPURegister::RegisterType type_;
594 bool IsValid() const {
595 const RegList kValidRegisters = 0x8000000ffffffff;
596 const RegList kValidFPRegisters = 0x0000000ffffffff;
598 case CPURegister::kRegister:
599 return (list_ & kValidRegisters) == list_;
600 case CPURegister::kFPRegister:
601 return (list_ & kValidFPRegisters) == list_;
602 case CPURegister::kNoRegister:
612 // AAPCS64 callee-saved registers.
613 #define kCalleeSaved CPURegList::GetCalleeSaved()
614 #define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
617 // AAPCS64 caller-saved registers. Note that this includes lr.
618 #define kCallerSaved CPURegList::GetCallerSaved()
619 #define kCallerSavedFP CPURegList::GetCallerSavedFP()
621 // -----------------------------------------------------------------------------
626 inline explicit Immediate(Handle<T> handle);
628 // This is allowed to be an implicit constructor because Immediate is
629 // a wrapper class that doesn't normally perform any type conversion.
631 inline Immediate(T value); // NOLINT(runtime/explicit)
634 inline Immediate(T value, RelocInfo::Mode rmode);
636 int64_t value() const { return value_; }
637 RelocInfo::Mode rmode() const { return rmode_; }
640 void InitializeHandle(Handle<Object> value);
643 RelocInfo::Mode rmode_;
647 // -----------------------------------------------------------------------------
649 const int kSmiShift = kSmiTagSize + kSmiShiftSize;
650 const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
652 // Represents an operand in a machine instruction.
654 // TODO(all): If necessary, study more in details which methods
655 // TODO(all): should be inlined or not.
657 // rm, {<shift> {#<shift_amount>}}
658 // where <shift> is one of {LSL, LSR, ASR, ROR}.
659 // <shift_amount> is uint6_t.
660 // This is allowed to be an implicit constructor because Operand is
661 // a wrapper class that doesn't normally perform any type conversion.
662 inline Operand(Register reg,
664 unsigned shift_amount = 0); // NOLINT(runtime/explicit)
666 // rm, <extend> {#<shift_amount>}
667 // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
668 // <shift_amount> is uint2_t.
669 inline Operand(Register reg,
671 unsigned shift_amount = 0);
674 inline explicit Operand(Handle<T> handle);
676 // Implicit constructor for all int types, ExternalReference, and Smi.
678 inline Operand(T t); // NOLINT(runtime/explicit)
680 // Implicit constructor for int types.
682 inline Operand(T t, RelocInfo::Mode rmode);
684 inline bool IsImmediate() const;
685 inline bool IsShiftedRegister() const;
686 inline bool IsExtendedRegister() const;
687 inline bool IsZero() const;
689 // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
690 // which helps in the encoding of instructions that use the stack pointer.
691 inline Operand ToExtendedRegister() const;
693 inline Immediate immediate() const;
694 inline int64_t ImmediateValue() const;
695 inline Register reg() const;
696 inline Shift shift() const;
697 inline Extend extend() const;
698 inline unsigned shift_amount() const;
700 // Relocation information.
701 bool NeedsRelocation(const Assembler* assembler) const;
704 inline static Operand UntagSmi(Register smi);
705 inline static Operand UntagSmiAndScale(Register smi, int scale);
708 Immediate immediate_;
712 unsigned shift_amount_;
716 // MemOperand represents a memory operand in a load or store instruction.
720 inline explicit MemOperand(Register base,
722 AddrMode addrmode = Offset);
723 inline explicit MemOperand(Register base,
726 unsigned shift_amount = 0);
727 inline explicit MemOperand(Register base,
730 unsigned shift_amount = 0);
731 inline explicit MemOperand(Register base,
732 const Operand& offset,
733 AddrMode addrmode = Offset);
735 const Register& base() const { return base_; }
736 const Register& regoffset() const { return regoffset_; }
737 int64_t offset() const { return offset_; }
738 AddrMode addrmode() const { return addrmode_; }
739 Shift shift() const { return shift_; }
740 Extend extend() const { return extend_; }
741 unsigned shift_amount() const { return shift_amount_; }
742 inline bool IsImmediateOffset() const;
743 inline bool IsRegisterOffset() const;
744 inline bool IsPreIndex() const;
745 inline bool IsPostIndex() const;
747 // For offset modes, return the offset as an Operand. This helper cannot
748 // handle indexed modes.
749 inline Operand OffsetAsOperand() const;
752 kNotPair, // Can't use a pair instruction.
753 kPairAB, // Can use a pair instruction (operandA has lower address).
754 kPairBA // Can use a pair instruction (operandB has lower address).
756 // Check if two MemOperand are consistent for stp/ldp use.
757 static PairResult AreConsistentForPair(const MemOperand& operandA,
758 const MemOperand& operandB,
759 int access_size_log2 = kXRegSizeLog2);
768 unsigned shift_amount_;
774 explicit ConstPool(Assembler* assm)
777 shared_entries_count(0) {}
778 void RecordEntry(intptr_t data, RelocInfo::Mode mode);
779 int EntryCount() const {
780 return shared_entries_count + unique_entries_.size();
782 bool IsEmpty() const {
783 return shared_entries_.empty() && unique_entries_.empty();
785 // Distance in bytes between the current pc and the first instruction
786 // using the pool. If there are no pending entries return kMaxInt.
787 int DistanceToFirstUse();
788 // Offset after which instructions using the pool will be out of range.
790 // Maximum size the constant pool can be with current entries. It always
791 // includes alignment padding and branch over.
793 // Size in bytes of the literal pool *if* it is emitted at the current
794 // pc. The size will include the branch over the pool if it was requested.
795 int SizeIfEmittedAtCurrentPc(bool require_jump);
796 // Emit the literal pool at the current pc with a branch over the pool if
798 void Emit(bool require_jump);
799 // Discard any pending pool entries.
803 bool CanBeShared(RelocInfo::Mode mode);
809 // Keep track of the first instruction requiring a constant pool entry
810 // since the previous constant pool was emitted.
812 // values, pc offset(s) of entries which can be shared.
813 std::multimap<uint64_t, int> shared_entries_;
814 // Number of distinct literal in shared entries.
815 int shared_entries_count;
816 // values, pc offset of entries which cannot be shared.
817 std::vector<std::pair<uint64_t, int> > unique_entries_;
821 // -----------------------------------------------------------------------------
824 class Assembler : public AssemblerBase {
826 // Create an assembler. Instructions and relocation information are emitted
827 // into a buffer, with the instructions starting from the beginning and the
828 // relocation information starting from the end of the buffer. See CodeDesc
829 // for a detailed comment on the layout (globals.h).
831 // If the provided buffer is NULL, the assembler allocates and grows its own
832 // buffer, and buffer_size determines the initial buffer size. The buffer is
833 // owned by the assembler and deallocated upon destruction of the assembler.
835 // If the provided buffer is not NULL, the assembler uses the provided buffer
836 // for code generation and assumes its size to be buffer_size. If the buffer
837 // is too small, a fatal error occurs. No deallocation of the buffer is done
838 // upon destruction of the assembler.
839 Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
841 virtual ~Assembler();
843 virtual void AbortedCodeGeneration() {
847 // System functions ---------------------------------------------------------
848 // Start generating code from the beginning of the buffer, discarding any code
849 // and data that has already been emitted into the buffer.
851 // In order to avoid any accidental transfer of state, Reset DCHECKs that the
852 // constant pool is not blocked.
855 // GetCode emits any pending (non-emitted) code and fills the descriptor
856 // desc. GetCode() is idempotent; it returns the same result if no other
857 // Assembler functions are invoked in between GetCode() calls.
859 // The descriptor (desc) can be NULL. In that case, the code is finalized as
860 // usual, but the descriptor is not populated.
861 void GetCode(CodeDesc* desc);
863 // Insert the smallest number of nop instructions
864 // possible to align the pc offset to a multiple
865 // of m. m must be a power of 2 (>= 4).
868 inline void Unreachable();
870 // Label --------------------------------------------------------------------
871 // Bind a label to the current pc. Note that labels can only be bound once,
872 // and if labels are linked to other instructions, they _must_ be bound
873 // before they go out of scope.
874 void bind(Label* label);
877 // RelocInfo and pools ------------------------------------------------------
879 // Record relocation information for current pc_.
880 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
882 // Return the address in the constant pool of the code target address used by
883 // the branch/call instruction at pc.
884 inline static Address target_pointer_address_at(Address pc);
886 // Read/Modify the code target address in the branch/call instruction at pc.
887 inline static Address target_address_at(Address pc,
888 ConstantPoolArray* constant_pool);
889 inline static void set_target_address_at(Address pc,
890 ConstantPoolArray* constant_pool,
892 ICacheFlushMode icache_flush_mode =
893 FLUSH_ICACHE_IF_NEEDED);
894 static inline Address target_address_at(Address pc, Code* code);
895 static inline void set_target_address_at(Address pc,
898 ICacheFlushMode icache_flush_mode =
899 FLUSH_ICACHE_IF_NEEDED);
901 // Return the code target address at a call site from the return address of
902 // that call in the instruction stream.
903 inline static Address target_address_from_return_address(Address pc);
905 // Given the address of the beginning of a call, return the address in the
906 // instruction stream that call will return from.
907 inline static Address return_address_from_call_start(Address pc);
909 // Return the code target address of the patch debug break slot
910 inline static Address break_address_from_return_address(Address pc);
912 // This sets the branch destination (which is in the constant pool on ARM).
913 // This is for calls and branches within generated code.
914 inline static void deserialization_set_special_target_at(
915 Address constant_pool_entry, Code* code, Address target);
917 // All addresses in the constant pool are the same size as pointers.
918 static const int kSpecialTargetSize = kPointerSize;
920 // The sizes of the call sequences emitted by MacroAssembler::Call.
921 // Wherever possible, use MacroAssembler::CallSize instead of these constants,
922 // as it will choose the correct value for a given relocation mode.
924 // Without relocation:
925 // movz temp, #(target & 0x000000000000ffff)
926 // movk temp, #(target & 0x00000000ffff0000)
927 // movk temp, #(target & 0x0000ffff00000000)
933 static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
934 static const int kCallSizeWithRelocation = 2 * kInstructionSize;
936 // Size of the generated code in bytes
937 uint64_t SizeOfGeneratedCode() const {
938 DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
939 return pc_ - buffer_;
942 // Return the code size generated from label to the current position.
943 uint64_t SizeOfCodeGeneratedSince(const Label* label) {
944 DCHECK(label->is_bound());
945 DCHECK(pc_offset() >= label->pos());
946 DCHECK(pc_offset() < buffer_size_);
947 return pc_offset() - label->pos();
950 // Check the size of the code generated since the given label. This function
951 // is used primarily to work around comparisons between signed and unsigned
952 // quantities, since V8 uses both.
953 // TODO(jbramley): Work out what sign to use for these things and if possible,
954 // change things to be consistent.
955 void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
957 DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
960 // Return the number of instructions generated from label to the
962 int InstructionsGeneratedSince(const Label* label) {
963 return SizeOfCodeGeneratedSince(label) / kInstructionSize;
966 // Number of instructions generated for the return sequence in
967 // FullCodeGenerator::EmitReturnSequence.
968 static const int kJSRetSequenceInstructions = 7;
969 // Distance between start of patched return sequence and the emitted address
971 static const int kPatchReturnSequenceAddressOffset = 0;
972 static const int kPatchDebugBreakSlotAddressOffset = 0;
974 // Number of instructions necessary to be able to later patch it to a call.
975 // See DebugCodegen::GenerateSlot() and
976 // BreakLocationIterator::SetDebugBreakAtSlot().
977 static const int kDebugBreakSlotInstructions = 4;
978 static const int kDebugBreakSlotLength =
979 kDebugBreakSlotInstructions * kInstructionSize;
981 static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
983 // Prevent contant pool emission until EndBlockConstPool is called.
984 // Call to this function can be nested but must be followed by an equal
985 // number of call to EndBlockConstpool.
986 void StartBlockConstPool();
988 // Resume constant pool emission. Need to be called as many time as
989 // StartBlockConstPool to have an effect.
990 void EndBlockConstPool();
992 bool is_const_pool_blocked() const;
993 static bool IsConstantPoolAt(Instruction* instr);
994 static int ConstantPoolSizeAt(Instruction* instr);
995 // See Assembler::CheckConstPool for more info.
996 void EmitPoolGuard();
998 // Prevent veneer pool emission until EndBlockVeneerPool is called.
999 // Call to this function can be nested but must be followed by an equal
1000 // number of call to EndBlockConstpool.
1001 void StartBlockVeneerPool();
1003 // Resume constant pool emission. Need to be called as many time as
1004 // StartBlockVeneerPool to have an effect.
1005 void EndBlockVeneerPool();
1007 bool is_veneer_pool_blocked() const {
1008 return veneer_pool_blocked_nesting_ > 0;
1011 // Block/resume emission of constant pools and veneer pools.
1012 void StartBlockPools() {
1013 StartBlockConstPool();
1014 StartBlockVeneerPool();
1016 void EndBlockPools() {
1017 EndBlockConstPool();
1018 EndBlockVeneerPool();
1021 // Debugging ----------------------------------------------------------------
1022 PositionsRecorder* positions_recorder() { return &positions_recorder_; }
1023 void RecordComment(const char* msg);
1024 int buffer_space() const;
1026 // Mark address of the ExitJSFrame code.
1027 void RecordJSReturn();
1029 // Mark address of a debug break slot.
1030 void RecordDebugBreakSlot();
1032 // Record the emission of a constant pool.
1034 // The emission of constant and veneer pools depends on the size of the code
1035 // generated and the number of RelocInfo recorded.
1036 // The Debug mechanism needs to map code offsets between two versions of a
1037 // function, compiled with and without debugger support (see for example
1038 // Debug::PrepareForBreakPoints()).
1039 // Compiling functions with debugger support generates additional code
1040 // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
1041 // and cause the version of the code with debugger support to have pools
1042 // generated in different places.
1043 // Recording the position and size of emitted pools allows to correctly
1044 // compute the offset mappings between the different versions of a function in
1047 // The parameter indicates the size of the pool (in bytes), including
1048 // the marker and branch over the data.
1049 void RecordConstPool(int size);
1052 // Instruction set functions ------------------------------------------------
1054 // Branch / Jump instructions.
1055 // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
1056 // Branch to register.
1057 void br(const Register& xn);
1059 // Branch-link to register.
1060 void blr(const Register& xn);
1062 // Branch to register with return hint.
1063 void ret(const Register& xn = lr);
1065 // Unconditional branch to label.
1066 void b(Label* label);
1068 // Conditional branch to label.
1069 void b(Label* label, Condition cond);
1071 // Unconditional branch to PC offset.
1074 // Conditional branch to PC offset.
1075 void b(int imm19, Condition cond);
1077 // Branch-link to label / pc offset.
1078 void bl(Label* label);
1081 // Compare and branch to label / pc offset if zero.
1082 void cbz(const Register& rt, Label* label);
1083 void cbz(const Register& rt, int imm19);
1085 // Compare and branch to label / pc offset if not zero.
1086 void cbnz(const Register& rt, Label* label);
1087 void cbnz(const Register& rt, int imm19);
1089 // Test bit and branch to label / pc offset if zero.
1090 void tbz(const Register& rt, unsigned bit_pos, Label* label);
1091 void tbz(const Register& rt, unsigned bit_pos, int imm14);
1093 // Test bit and branch to label / pc offset if not zero.
1094 void tbnz(const Register& rt, unsigned bit_pos, Label* label);
1095 void tbnz(const Register& rt, unsigned bit_pos, int imm14);
1097 // Address calculation instructions.
1098 // Calculate a PC-relative address. Unlike for branches the offset in adr is
1099 // unscaled (i.e. the result can be unaligned).
1100 void adr(const Register& rd, Label* label);
1101 void adr(const Register& rd, int imm21);
1103 // Data Processing instructions.
1105 void add(const Register& rd,
1107 const Operand& operand);
1109 // Add and update status flags.
1110 void adds(const Register& rd,
1112 const Operand& operand);
1114 // Compare negative.
1115 void cmn(const Register& rn, const Operand& operand);
1118 void sub(const Register& rd,
1120 const Operand& operand);
1122 // Subtract and update status flags.
1123 void subs(const Register& rd,
1125 const Operand& operand);
1128 void cmp(const Register& rn, const Operand& operand);
1131 void neg(const Register& rd,
1132 const Operand& operand);
1134 // Negate and update status flags.
1135 void negs(const Register& rd,
1136 const Operand& operand);
1138 // Add with carry bit.
1139 void adc(const Register& rd,
1141 const Operand& operand);
1143 // Add with carry bit and update status flags.
1144 void adcs(const Register& rd,
1146 const Operand& operand);
1148 // Subtract with carry bit.
1149 void sbc(const Register& rd,
1151 const Operand& operand);
1153 // Subtract with carry bit and update status flags.
1154 void sbcs(const Register& rd,
1156 const Operand& operand);
1158 // Negate with carry bit.
1159 void ngc(const Register& rd,
1160 const Operand& operand);
1162 // Negate with carry bit and update status flags.
1163 void ngcs(const Register& rd,
1164 const Operand& operand);
1166 // Logical instructions.
1167 // Bitwise and (A & B).
1168 void and_(const Register& rd,
1170 const Operand& operand);
1172 // Bitwise and (A & B) and update status flags.
1173 void ands(const Register& rd,
1175 const Operand& operand);
1177 // Bit test, and set flags.
1178 void tst(const Register& rn, const Operand& operand);
1180 // Bit clear (A & ~B).
1181 void bic(const Register& rd,
1183 const Operand& operand);
1185 // Bit clear (A & ~B) and update status flags.
1186 void bics(const Register& rd,
1188 const Operand& operand);
1190 // Bitwise or (A | B).
1191 void orr(const Register& rd, const Register& rn, const Operand& operand);
1193 // Bitwise nor (A | ~B).
1194 void orn(const Register& rd, const Register& rn, const Operand& operand);
1196 // Bitwise eor/xor (A ^ B).
1197 void eor(const Register& rd, const Register& rn, const Operand& operand);
1199 // Bitwise enor/xnor (A ^ ~B).
1200 void eon(const Register& rd, const Register& rn, const Operand& operand);
1202 // Logical shift left variable.
1203 void lslv(const Register& rd, const Register& rn, const Register& rm);
1205 // Logical shift right variable.
1206 void lsrv(const Register& rd, const Register& rn, const Register& rm);
1208 // Arithmetic shift right variable.
1209 void asrv(const Register& rd, const Register& rn, const Register& rm);
1211 // Rotate right variable.
1212 void rorv(const Register& rd, const Register& rn, const Register& rm);
1214 // Bitfield instructions.
1216 void bfm(const Register& rd,
1221 // Signed bitfield move.
1222 void sbfm(const Register& rd,
1227 // Unsigned bitfield move.
1228 void ubfm(const Register& rd,
1235 void bfi(const Register& rd,
1240 DCHECK(lsb + width <= rn.SizeInBits());
1241 bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1244 // Bitfield extract and insert low.
1245 void bfxil(const Register& rd,
1250 DCHECK(lsb + width <= rn.SizeInBits());
1251 bfm(rd, rn, lsb, lsb + width - 1);
1255 // Arithmetic shift right.
1256 void asr(const Register& rd, const Register& rn, unsigned shift) {
1257 DCHECK(shift < rd.SizeInBits());
1258 sbfm(rd, rn, shift, rd.SizeInBits() - 1);
1261 // Signed bitfield insert in zero.
1262 void sbfiz(const Register& rd,
1267 DCHECK(lsb + width <= rn.SizeInBits());
1268 sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1271 // Signed bitfield extract.
1272 void sbfx(const Register& rd,
1277 DCHECK(lsb + width <= rn.SizeInBits());
1278 sbfm(rd, rn, lsb, lsb + width - 1);
1281 // Signed extend byte.
1282 void sxtb(const Register& rd, const Register& rn) {
1286 // Signed extend halfword.
1287 void sxth(const Register& rd, const Register& rn) {
1288 sbfm(rd, rn, 0, 15);
1291 // Signed extend word.
1292 void sxtw(const Register& rd, const Register& rn) {
1293 sbfm(rd, rn, 0, 31);
1297 // Logical shift left.
1298 void lsl(const Register& rd, const Register& rn, unsigned shift) {
1299 unsigned reg_size = rd.SizeInBits();
1300 DCHECK(shift < reg_size);
1301 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
1304 // Logical shift right.
1305 void lsr(const Register& rd, const Register& rn, unsigned shift) {
1306 DCHECK(shift < rd.SizeInBits());
1307 ubfm(rd, rn, shift, rd.SizeInBits() - 1);
1310 // Unsigned bitfield insert in zero.
1311 void ubfiz(const Register& rd,
1316 DCHECK(lsb + width <= rn.SizeInBits());
1317 ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1320 // Unsigned bitfield extract.
1321 void ubfx(const Register& rd,
1326 DCHECK(lsb + width <= rn.SizeInBits());
1327 ubfm(rd, rn, lsb, lsb + width - 1);
1330 // Unsigned extend byte.
1331 void uxtb(const Register& rd, const Register& rn) {
1335 // Unsigned extend halfword.
1336 void uxth(const Register& rd, const Register& rn) {
1337 ubfm(rd, rn, 0, 15);
1340 // Unsigned extend word.
1341 void uxtw(const Register& rd, const Register& rn) {
1342 ubfm(rd, rn, 0, 31);
1346 void extr(const Register& rd,
1351 // Conditional select: rd = cond ? rn : rm.
1352 void csel(const Register& rd,
1357 // Conditional select increment: rd = cond ? rn : rm + 1.
1358 void csinc(const Register& rd,
1363 // Conditional select inversion: rd = cond ? rn : ~rm.
1364 void csinv(const Register& rd,
1369 // Conditional select negation: rd = cond ? rn : -rm.
1370 void csneg(const Register& rd,
1375 // Conditional set: rd = cond ? 1 : 0.
1376 void cset(const Register& rd, Condition cond);
1378 // Conditional set minus: rd = cond ? -1 : 0.
1379 void csetm(const Register& rd, Condition cond);
1381 // Conditional increment: rd = cond ? rn + 1 : rn.
1382 void cinc(const Register& rd, const Register& rn, Condition cond);
1384 // Conditional invert: rd = cond ? ~rn : rn.
1385 void cinv(const Register& rd, const Register& rn, Condition cond);
1387 // Conditional negate: rd = cond ? -rn : rn.
1388 void cneg(const Register& rd, const Register& rn, Condition cond);
1391 void ror(const Register& rd, const Register& rs, unsigned shift) {
1392 extr(rd, rs, rs, shift);
1395 // Conditional comparison.
1396 // Conditional compare negative.
1397 void ccmn(const Register& rn,
1398 const Operand& operand,
1402 // Conditional compare.
1403 void ccmp(const Register& rn,
1404 const Operand& operand,
1409 // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
1410 void mul(const Register& rd, const Register& rn, const Register& rm);
1412 // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
1413 void madd(const Register& rd,
1416 const Register& ra);
1418 // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
1419 void mneg(const Register& rd, const Register& rn, const Register& rm);
1421 // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
1422 void msub(const Register& rd,
1425 const Register& ra);
1427 // 32 x 32 -> 64-bit multiply.
1428 void smull(const Register& rd, const Register& rn, const Register& rm);
1430 // Xd = bits<127:64> of Xn * Xm.
1431 void smulh(const Register& rd, const Register& rn, const Register& rm);
1433 // Signed 32 x 32 -> 64-bit multiply and accumulate.
1434 void smaddl(const Register& rd,
1437 const Register& ra);
1439 // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
1440 void umaddl(const Register& rd,
1443 const Register& ra);
1445 // Signed 32 x 32 -> 64-bit multiply and subtract.
1446 void smsubl(const Register& rd,
1449 const Register& ra);
1451 // Unsigned 32 x 32 -> 64-bit multiply and subtract.
1452 void umsubl(const Register& rd,
1455 const Register& ra);
1457 // Signed integer divide.
1458 void sdiv(const Register& rd, const Register& rn, const Register& rm);
1460 // Unsigned integer divide.
1461 void udiv(const Register& rd, const Register& rn, const Register& rm);
1463 // Bit count, bit reverse and endian reverse.
1464 void rbit(const Register& rd, const Register& rn);
1465 void rev16(const Register& rd, const Register& rn);
1466 void rev32(const Register& rd, const Register& rn);
1467 void rev(const Register& rd, const Register& rn);
1468 void clz(const Register& rd, const Register& rn);
1469 void cls(const Register& rd, const Register& rn);
1471 // Memory instructions.
1473 // Load integer or FP register.
1474 void ldr(const CPURegister& rt, const MemOperand& src);
1476 // Store integer or FP register.
1477 void str(const CPURegister& rt, const MemOperand& dst);
1479 // Load word with sign extension.
1480 void ldrsw(const Register& rt, const MemOperand& src);
1483 void ldrb(const Register& rt, const MemOperand& src);
1486 void strb(const Register& rt, const MemOperand& dst);
1488 // Load byte with sign extension.
1489 void ldrsb(const Register& rt, const MemOperand& src);
1492 void ldrh(const Register& rt, const MemOperand& src);
1495 void strh(const Register& rt, const MemOperand& dst);
1497 // Load half-word with sign extension.
1498 void ldrsh(const Register& rt, const MemOperand& src);
1500 // Load integer or FP register pair.
1501 void ldp(const CPURegister& rt, const CPURegister& rt2,
1502 const MemOperand& src);
1504 // Store integer or FP register pair.
1505 void stp(const CPURegister& rt, const CPURegister& rt2,
1506 const MemOperand& dst);
1508 // Load word pair with sign extension.
1509 void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
1511 // Load integer or FP register pair, non-temporal.
1512 void ldnp(const CPURegister& rt, const CPURegister& rt2,
1513 const MemOperand& src);
1515 // Store integer or FP register pair, non-temporal.
1516 void stnp(const CPURegister& rt, const CPURegister& rt2,
1517 const MemOperand& dst);
1519 // Load literal to register from a pc relative address.
1520 void ldr_pcrel(const CPURegister& rt, int imm19);
1522 // Load literal to register.
1523 void ldr(const CPURegister& rt, const Immediate& imm);
1525 // Move instructions. The default shift of -1 indicates that the move
1526 // instruction will calculate an appropriate 16-bit immediate and left shift
1527 // that is equal to the 64-bit immediate argument. If an explicit left shift
1528 // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
1530 // For movk, an explicit shift can be used to indicate which half word should
1531 // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
1532 // half word with zero, whereas movk(x0, 0, 48) will overwrite the
1533 // most-significant.
1536 void movk(const Register& rd, uint64_t imm, int shift = -1) {
1537 MoveWide(rd, imm, shift, MOVK);
1540 // Move with non-zero.
1541 void movn(const Register& rd, uint64_t imm, int shift = -1) {
1542 MoveWide(rd, imm, shift, MOVN);
1546 void movz(const Register& rd, uint64_t imm, int shift = -1) {
1547 MoveWide(rd, imm, shift, MOVZ);
1550 // Misc instructions.
1551 // Monitor debug-mode breakpoint.
1554 // Halting debug-mode breakpoint.
1557 // Move register to register.
1558 void mov(const Register& rd, const Register& rn);
1560 // Move NOT(operand) to register.
1561 void mvn(const Register& rd, const Operand& operand);
1563 // System instructions.
1564 // Move to register from system register.
1565 void mrs(const Register& rt, SystemRegister sysreg);
1567 // Move from register to system register.
1568 void msr(SystemRegister sysreg, const Register& rt);
1571 void hint(SystemHint code);
1573 // Data memory barrier
1574 void dmb(BarrierDomain domain, BarrierType type);
1576 // Data synchronization barrier
1577 void dsb(BarrierDomain domain, BarrierType type);
1579 // Instruction synchronization barrier
1582 // Alias for system instructions.
1583 void nop() { hint(NOP); }
1585 // Different nop operations are used by the code generator to detect certain
1586 // states of the generated code.
1587 enum NopMarkerTypes {
1591 FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
1592 LAST_NOP_MARKER = ADR_FAR_NOP
1595 void nop(NopMarkerTypes n) {
1596 DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
1597 mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
1601 // Move immediate to FP register.
1602 void fmov(FPRegister fd, double imm);
1603 void fmov(FPRegister fd, float imm);
1605 // Move FP register to register.
1606 void fmov(Register rd, FPRegister fn);
1608 // Move register to FP register.
1609 void fmov(FPRegister fd, Register rn);
1611 // Move FP register to FP register.
1612 void fmov(FPRegister fd, FPRegister fn);
1615 void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1618 void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1621 void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1623 // FP fused multiply and add.
1624 void fmadd(const FPRegister& fd,
1625 const FPRegister& fn,
1626 const FPRegister& fm,
1627 const FPRegister& fa);
1629 // FP fused multiply and subtract.
1630 void fmsub(const FPRegister& fd,
1631 const FPRegister& fn,
1632 const FPRegister& fm,
1633 const FPRegister& fa);
1635 // FP fused multiply, add and negate.
1636 void fnmadd(const FPRegister& fd,
1637 const FPRegister& fn,
1638 const FPRegister& fm,
1639 const FPRegister& fa);
1641 // FP fused multiply, subtract and negate.
1642 void fnmsub(const FPRegister& fd,
1643 const FPRegister& fn,
1644 const FPRegister& fm,
1645 const FPRegister& fa);
1648 void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1651 void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1654 void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1657 void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1660 void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1663 void fabs(const FPRegister& fd, const FPRegister& fn);
1666 void fneg(const FPRegister& fd, const FPRegister& fn);
1669 void fsqrt(const FPRegister& fd, const FPRegister& fn);
1671 // FP round to integer (nearest with ties to away).
1672 void frinta(const FPRegister& fd, const FPRegister& fn);
1674 // FP round to integer (toward minus infinity).
1675 void frintm(const FPRegister& fd, const FPRegister& fn);
1677 // FP round to integer (nearest with ties to even).
1678 void frintn(const FPRegister& fd, const FPRegister& fn);
1680 // FP round to integer (towards plus infinity).
1681 void frintp(const FPRegister& fd, const FPRegister& fn);
1683 // FP round to integer (towards zero.)
1684 void frintz(const FPRegister& fd, const FPRegister& fn);
1686 // FP compare registers.
1687 void fcmp(const FPRegister& fn, const FPRegister& fm);
1689 // FP compare immediate.
1690 void fcmp(const FPRegister& fn, double value);
1692 // FP conditional compare.
1693 void fccmp(const FPRegister& fn,
1694 const FPRegister& fm,
1698 // FP conditional select.
1699 void fcsel(const FPRegister& fd,
1700 const FPRegister& fn,
1701 const FPRegister& fm,
1704 // Common FP Convert function
1705 void FPConvertToInt(const Register& rd,
1706 const FPRegister& fn,
1707 FPIntegerConvertOp op);
1709 // FP convert between single and double precision.
1710 void fcvt(const FPRegister& fd, const FPRegister& fn);
1712 // Convert FP to unsigned integer (nearest with ties to away).
1713 void fcvtau(const Register& rd, const FPRegister& fn);
1715 // Convert FP to signed integer (nearest with ties to away).
1716 void fcvtas(const Register& rd, const FPRegister& fn);
1718 // Convert FP to unsigned integer (round towards -infinity).
1719 void fcvtmu(const Register& rd, const FPRegister& fn);
1721 // Convert FP to signed integer (round towards -infinity).
1722 void fcvtms(const Register& rd, const FPRegister& fn);
1724 // Convert FP to unsigned integer (nearest with ties to even).
1725 void fcvtnu(const Register& rd, const FPRegister& fn);
1727 // Convert FP to signed integer (nearest with ties to even).
1728 void fcvtns(const Register& rd, const FPRegister& fn);
1730 // Convert FP to unsigned integer (round towards zero).
1731 void fcvtzu(const Register& rd, const FPRegister& fn);
1733 // Convert FP to signed integer (rounf towards zero).
1734 void fcvtzs(const Register& rd, const FPRegister& fn);
1736 // Convert signed integer or fixed point to FP.
1737 void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1739 // Convert unsigned integer or fixed point to FP.
1740 void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1742 // Instruction functions used only for test, debug, and patching.
1743 // Emit raw instructions in the instruction stream.
1744 void dci(Instr raw_inst) { Emit(raw_inst); }
1746 // Emit 8 bits of data in the instruction stream.
1747 void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
1749 // Emit 32 bits of data in the instruction stream.
1750 void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
1752 // Emit 64 bits of data in the instruction stream.
1753 void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
1755 // Copy a string into the instruction stream, including the terminating NULL
1756 // character. The instruction pointer (pc_) is then aligned correctly for
1757 // subsequent instructions.
1758 void EmitStringData(const char* string);
1760 // Pseudo-instructions ------------------------------------------------------
1762 // Parameters are described in arm64/instructions-arm64.h.
1763 void debug(const char* message, uint32_t code, Instr params = BREAK);
1766 void dd(uint32_t data) { dc32(data); }
1767 void db(uint8_t data) { dc8(data); }
1769 // Code generation helpers --------------------------------------------------
1771 bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
1773 Instruction* pc() const { return Instruction::Cast(pc_); }
1775 Instruction* InstructionAt(int offset) const {
1776 return reinterpret_cast<Instruction*>(buffer_ + offset);
1779 ptrdiff_t InstructionOffset(Instruction* instr) const {
1780 return reinterpret_cast<byte*>(instr) - buffer_;
1783 // Register encoding.
1784 static Instr Rd(CPURegister rd) {
1785 DCHECK(rd.code() != kSPRegInternalCode);
1786 return rd.code() << Rd_offset;
1789 static Instr Rn(CPURegister rn) {
1790 DCHECK(rn.code() != kSPRegInternalCode);
1791 return rn.code() << Rn_offset;
1794 static Instr Rm(CPURegister rm) {
1795 DCHECK(rm.code() != kSPRegInternalCode);
1796 return rm.code() << Rm_offset;
1799 static Instr Ra(CPURegister ra) {
1800 DCHECK(ra.code() != kSPRegInternalCode);
1801 return ra.code() << Ra_offset;
1804 static Instr Rt(CPURegister rt) {
1805 DCHECK(rt.code() != kSPRegInternalCode);
1806 return rt.code() << Rt_offset;
1809 static Instr Rt2(CPURegister rt2) {
1810 DCHECK(rt2.code() != kSPRegInternalCode);
1811 return rt2.code() << Rt2_offset;
1814 // These encoding functions allow the stack pointer to be encoded, and
1815 // disallow the zero register.
1816 static Instr RdSP(Register rd) {
1817 DCHECK(!rd.IsZero());
1818 return (rd.code() & kRegCodeMask) << Rd_offset;
1821 static Instr RnSP(Register rn) {
1822 DCHECK(!rn.IsZero());
1823 return (rn.code() & kRegCodeMask) << Rn_offset;
1827 inline static Instr Flags(FlagsUpdate S);
1828 inline static Instr Cond(Condition cond);
1830 // PC-relative address encoding.
1831 inline static Instr ImmPCRelAddress(int imm21);
1834 inline static Instr ImmUncondBranch(int imm26);
1835 inline static Instr ImmCondBranch(int imm19);
1836 inline static Instr ImmCmpBranch(int imm19);
1837 inline static Instr ImmTestBranch(int imm14);
1838 inline static Instr ImmTestBranchBit(unsigned bit_pos);
1840 // Data Processing encoding.
1841 inline static Instr SF(Register rd);
1842 inline static Instr ImmAddSub(int64_t imm);
1843 inline static Instr ImmS(unsigned imms, unsigned reg_size);
1844 inline static Instr ImmR(unsigned immr, unsigned reg_size);
1845 inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
1846 inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
1847 inline static Instr ImmLLiteral(int imm19);
1848 inline static Instr BitN(unsigned bitn, unsigned reg_size);
1849 inline static Instr ShiftDP(Shift shift);
1850 inline static Instr ImmDPShift(unsigned amount);
1851 inline static Instr ExtendMode(Extend extend);
1852 inline static Instr ImmExtendShift(unsigned left_shift);
1853 inline static Instr ImmCondCmp(unsigned imm);
1854 inline static Instr Nzcv(StatusFlags nzcv);
1856 static bool IsImmAddSub(int64_t immediate);
1857 static bool IsImmLogical(uint64_t value,
1863 // MemOperand offset encoding.
1864 inline static Instr ImmLSUnsigned(int imm12);
1865 inline static Instr ImmLS(int imm9);
1866 inline static Instr ImmLSPair(int imm7, LSDataSize size);
1867 inline static Instr ImmShiftLS(unsigned shift_amount);
1868 inline static Instr ImmException(int imm16);
1869 inline static Instr ImmSystemRegister(int imm15);
1870 inline static Instr ImmHint(int imm7);
1871 inline static Instr ImmBarrierDomain(int imm2);
1872 inline static Instr ImmBarrierType(int imm2);
1873 inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
1875 static bool IsImmLSUnscaled(int64_t offset);
1876 static bool IsImmLSScaled(int64_t offset, LSDataSize size);
1878 // Move immediates encoding.
1879 inline static Instr ImmMoveWide(uint64_t imm);
1880 inline static Instr ShiftMoveWide(int64_t shift);
1883 static Instr ImmFP32(float imm);
1884 static Instr ImmFP64(double imm);
1885 inline static Instr FPScale(unsigned scale);
1887 // FP register type.
1888 inline static Instr FPType(FPRegister fd);
1890 // Class for scoping postponing the constant pool generation.
1891 class BlockConstPoolScope {
1893 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1894 assem_->StartBlockConstPool();
1896 ~BlockConstPoolScope() {
1897 assem_->EndBlockConstPool();
1903 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1906 // Check if is time to emit a constant pool.
1907 void CheckConstPool(bool force_emit, bool require_jump);
1909 // Allocate a constant pool of the correct size for the generated code.
1910 Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
1912 // Generate the constant pool for the generated code.
1913 void PopulateConstantPool(ConstantPoolArray* constant_pool);
1915 // Returns true if we should emit a veneer as soon as possible for a branch
1916 // which can at most reach to specified pc.
1917 bool ShouldEmitVeneer(int max_reachable_pc,
1918 int margin = kVeneerDistanceMargin);
1919 bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
1920 return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
1923 // The maximum code size generated for a veneer. Currently one branch
1924 // instruction. This is for code size checking purposes, and can be extended
1925 // in the future for example if we decide to add nops between the veneers.
1926 static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
1928 void RecordVeneerPool(int location_offset, int size);
1929 // Emits veneers for branches that are approaching their maximum range.
1930 // If need_protection is true, the veneers are protected by a branch jumping
1932 void EmitVeneers(bool force_emit, bool need_protection,
1933 int margin = kVeneerDistanceMargin);
1934 void EmitVeneersGuard() { EmitPoolGuard(); }
1935 // Checks whether veneers need to be emitted at this point.
1936 // If force_emit is set, a veneer is generated for *all* unresolved branches.
1937 void CheckVeneerPool(bool force_emit, bool require_jump,
1938 int margin = kVeneerDistanceMargin);
1940 class BlockPoolsScope {
1942 explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
1943 assem_->StartBlockPools();
1945 ~BlockPoolsScope() {
1946 assem_->EndBlockPools();
1952 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
1956 inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
1958 void LoadStore(const CPURegister& rt,
1959 const MemOperand& addr,
1962 void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
1963 const MemOperand& addr, LoadStorePairOp op);
1964 static bool IsImmLSPair(int64_t offset, LSDataSize size);
1966 void Logical(const Register& rd,
1968 const Operand& operand,
1970 void LogicalImmediate(const Register& rd,
1977 void ConditionalCompare(const Register& rn,
1978 const Operand& operand,
1981 ConditionalCompareOp op);
1982 static bool IsImmConditionalCompare(int64_t immediate);
1984 void AddSubWithCarry(const Register& rd,
1986 const Operand& operand,
1988 AddSubWithCarryOp op);
1990 // Functions for emulating operands not directly supported by the instruction
1992 void EmitShift(const Register& rd,
1996 void EmitExtendShift(const Register& rd,
1999 unsigned left_shift);
2001 void AddSub(const Register& rd,
2003 const Operand& operand,
2007 static bool IsImmFP32(float imm);
2008 static bool IsImmFP64(double imm);
2010 // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
2011 // registers. Only simple loads are supported; sign- and zero-extension (such
2012 // as in LDPSW_x or LDRB_w) are not supported.
2013 static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
2014 static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
2015 const CPURegister& rt2);
2016 static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
2017 static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
2018 const CPURegister& rt2);
2019 static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
2020 const CPURegister& rt, const CPURegister& rt2);
2021 static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
2022 const CPURegister& rt, const CPURegister& rt2);
2023 static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
2025 // Remove the specified branch from the unbound label link chain.
2026 // If available, a veneer for this label can be used for other branches in the
2027 // chain if the link chain cannot be fixed up without this branch.
2028 void RemoveBranchFromLabelLinkChain(Instruction* branch,
2030 Instruction* label_veneer = NULL);
2033 // Instruction helpers.
2034 void MoveWide(const Register& rd,
2037 MoveWideImmediateOp mov_op);
2038 void DataProcShiftedRegister(const Register& rd,
2040 const Operand& operand,
2043 void DataProcExtendedRegister(const Register& rd,
2045 const Operand& operand,
2048 void LoadStorePairNonTemporal(const CPURegister& rt,
2049 const CPURegister& rt2,
2050 const MemOperand& addr,
2051 LoadStorePairNonTemporalOp op);
2052 void ConditionalSelect(const Register& rd,
2056 ConditionalSelectOp op);
2057 void DataProcessing1Source(const Register& rd,
2059 DataProcessing1SourceOp op);
2060 void DataProcessing3Source(const Register& rd,
2064 DataProcessing3SourceOp op);
2065 void FPDataProcessing1Source(const FPRegister& fd,
2066 const FPRegister& fn,
2067 FPDataProcessing1SourceOp op);
2068 void FPDataProcessing2Source(const FPRegister& fd,
2069 const FPRegister& fn,
2070 const FPRegister& fm,
2071 FPDataProcessing2SourceOp op);
2072 void FPDataProcessing3Source(const FPRegister& fd,
2073 const FPRegister& fn,
2074 const FPRegister& fm,
2075 const FPRegister& fa,
2076 FPDataProcessing3SourceOp op);
2080 // Return an offset for a label-referencing instruction, typically a branch.
2081 int LinkAndGetByteOffsetTo(Label* label);
2083 // This is the same as LinkAndGetByteOffsetTo, but return an offset
2084 // suitable for fields that take instruction offsets.
2085 inline int LinkAndGetInstructionOffsetTo(Label* label);
2087 static const int kStartOfLabelLinkChain = 0;
2089 // Verify that a label's link chain is intact.
2090 void CheckLabelLinkChain(Label const * label);
2092 void RecordLiteral(int64_t imm, unsigned size);
2094 // Postpone the generation of the constant pool for the specified number of
2096 void BlockConstPoolFor(int instructions);
2098 // Set how far from current pc the next constant pool check will be.
2099 void SetNextConstPoolCheckIn(int instructions) {
2100 next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
2103 // Emit the instruction at pc_.
2104 void Emit(Instr instruction) {
2105 STATIC_ASSERT(sizeof(*pc_) == 1);
2106 STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
2107 DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
2109 memcpy(pc_, &instruction, sizeof(instruction));
2110 pc_ += sizeof(instruction);
2114 // Emit data inline in the instruction stream.
2115 void EmitData(void const * data, unsigned size) {
2116 DCHECK(sizeof(*pc_) == 1);
2117 DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
2119 // TODO(all): Somehow register we have some data here. Then we can
2120 // disassemble it correctly.
2121 memcpy(pc_, data, size);
2127 void CheckBufferSpace();
2130 // Pc offset of the next constant pool check.
2131 int next_constant_pool_check_;
2133 // Constant pool generation
2134 // Pools are emitted in the instruction stream. They are emitted when:
2135 // * the distance to the first use is above a pre-defined distance or
2136 // * the numbers of entries in the pool is above a pre-defined size or
2137 // * code generation is finished
2138 // If a pool needs to be emitted before code generation is finished a branch
2139 // over the emitted pool will be inserted.
2141 // Constants in the pool may be addresses of functions that gets relocated;
2142 // if so, a relocation info entry is associated to the constant pool entry.
2144 // Repeated checking whether the constant pool should be emitted is rather
2145 // expensive. By default we only check again once a number of instructions
2146 // has been generated. That also means that the sizing of the buffers is not
2147 // an exact science, and that we rely on some slop to not overrun buffers.
2148 static const int kCheckConstPoolInterval = 128;
2150 // Distance to first use after a which a pool will be emitted. Pool entries
2151 // are accessed with pc relative load therefore this cannot be more than
2152 // 1 * MB. Since constant pool emission checks are interval based this value
2153 // is an approximation.
2154 static const int kApproxMaxDistToConstPool = 64 * KB;
2156 // Number of pool entries after which a pool will be emitted. Since constant
2157 // pool emission checks are interval based this value is an approximation.
2158 static const int kApproxMaxPoolEntryCount = 512;
2160 // Emission of the constant pool may be blocked in some code sequences.
2161 int const_pool_blocked_nesting_; // Block emission if this is not zero.
2162 int no_const_pool_before_; // Block emission before this pc offset.
2164 // Emission of the veneer pools may be blocked in some code sequences.
2165 int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
2167 // Relocation info generation
2168 // Each relocation is encoded as a variable size value
2169 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
2170 RelocInfoWriter reloc_info_writer;
2172 // Relocation info records are also used during code generation as temporary
2173 // containers for constants and code target addresses until they are emitted
2174 // to the constant pool. These pending relocation info records are temporarily
2175 // stored in a separate buffer until a constant pool is emitted.
2176 // If every instruction in a long sequence is accessing the pool, we need one
2177 // pending relocation entry per instruction.
2179 // The pending constant pool.
2180 ConstPool constpool_;
2182 // Relocation for a type-recording IC has the AST id added to it. This
2183 // member variable is a way to pass the information from the call site to
2184 // the relocation info.
2185 TypeFeedbackId recorded_ast_id_;
2187 inline TypeFeedbackId RecordedAstId();
2188 inline void ClearRecordedAstId();
2191 // Record the AST id of the CallIC being compiled, so that it can be placed
2192 // in the relocation information.
2193 void SetRecordedAstId(TypeFeedbackId ast_id) {
2194 DCHECK(recorded_ast_id_.IsNone());
2195 recorded_ast_id_ = ast_id;
2199 // The relocation writer's position is at least kGap bytes below the end of
2200 // the generated instructions. This is so that multi-instruction sequences do
2201 // not have to check for overflow. The same is true for writes of large
2202 // relocation info entries, and debug strings encoded in the instruction
2204 static const int kGap = 128;
2207 class FarBranchInfo {
2209 FarBranchInfo(int offset, Label* label)
2210 : pc_offset_(offset), label_(label) {}
2211 // Offset of the branch in the code generation buffer.
2213 // The label branched to.
2218 // Information about unresolved (forward) branches.
2219 // The Assembler is only allowed to delete out-of-date information from here
2220 // after a label is bound. The MacroAssembler uses this information to
2221 // generate veneers.
2223 // The second member gives information about the unresolved branch. The first
2224 // member of the pair is the maximum offset that the branch can reach in the
2225 // buffer. The map is sorted according to this reachable offset, allowing to
2226 // easily check when veneers need to be emitted.
2227 // Note that the maximum reachable offset (first member of the pairs) should
2228 // always be positive but has the same type as the return value for
2229 // pc_offset() for convenience.
2230 std::multimap<int, FarBranchInfo> unresolved_branches_;
2232 // We generate a veneer for a branch if we reach within this distance of the
2233 // limit of the range.
2234 static const int kVeneerDistanceMargin = 1 * KB;
2235 // The factor of 2 is a finger in the air guess. With a default margin of
2236 // 1KB, that leaves us an addional 256 instructions to avoid generating a
2237 // protective branch.
2238 static const int kVeneerNoProtectionFactor = 2;
2239 static const int kVeneerDistanceCheckMargin =
2240 kVeneerNoProtectionFactor * kVeneerDistanceMargin;
2241 int unresolved_branches_first_limit() const {
2242 DCHECK(!unresolved_branches_.empty());
2243 return unresolved_branches_.begin()->first;
2245 // This is similar to next_constant_pool_check_ and helps reduce the overhead
2246 // of checking for veneer pools.
2247 // It is maintained to the closest unresolved branch limit minus the maximum
2248 // veneer margin (or kMaxInt if there are no unresolved branches).
2249 int next_veneer_pool_check_;
2252 // If a veneer is emitted for a branch instruction, that instruction must be
2253 // removed from the associated label's link chain so that the assembler does
2254 // not later attempt (likely unsuccessfully) to patch it to branch directly to
2256 void DeleteUnresolvedBranchInfoForLabel(Label* label);
2257 // This function deletes the information related to the label by traversing
2258 // the label chain, and for each PC-relative instruction in the chain checking
2259 // if pending unresolved information exists. Its complexity is proportional to
2260 // the length of the label chain.
2261 void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
2264 PositionsRecorder positions_recorder_;
2265 friend class PositionsRecorder;
2266 friend class EnsureSpace;
2267 friend class ConstPool;
2270 class PatchingAssembler : public Assembler {
2272 // Create an Assembler with a buffer starting at 'start'.
2273 // The buffer size is
2274 // size of instructions to patch + kGap
2275 // Where kGap is the distance from which the Assembler tries to grow the
2277 // If more or fewer instructions than expected are generated or if some
2278 // relocation information takes space in the buffer, the PatchingAssembler
2279 // will crash trying to grow the buffer.
2280 PatchingAssembler(Instruction* start, unsigned count)
2282 reinterpret_cast<byte*>(start),
2283 count * kInstructionSize + kGap) {
2287 PatchingAssembler(byte* start, unsigned count)
2288 : Assembler(NULL, start, count * kInstructionSize + kGap) {
2289 // Block constant pool emission.
2293 ~PatchingAssembler() {
2294 // Const pool should still be blocked.
2295 DCHECK(is_const_pool_blocked());
2297 // Verify we have generated the number of instruction we expected.
2298 DCHECK((pc_offset() + kGap) == buffer_size_);
2299 // Verify no relocation information has been emitted.
2300 DCHECK(IsConstPoolEmpty());
2301 // Flush the Instruction cache.
2302 size_t length = buffer_size_ - kGap;
2303 CpuFeatures::FlushICache(buffer_, length);
2306 // See definition of PatchAdrFar() for details.
2307 static const int kAdrFarPatchableNNops = 2;
2308 static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
2309 void PatchAdrFar(int64_t target_offset);
2313 class EnsureSpace BASE_EMBEDDED {
2315 explicit EnsureSpace(Assembler* assembler) {
2316 assembler->CheckBufferSpace();
2320 } } // namespace v8::internal
2322 #endif // V8_ARM64_ASSEMBLER_ARM64_H_