1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_ASSEMBLER_ARM64_H_
12 #include "src/arm64/instructions-arm64.h"
13 #include "src/assembler.h"
14 #include "src/globals.h"
15 #include "src/serialize.h"
16 #include "src/utils.h"
23 // -----------------------------------------------------------------------------
25 #define REGISTER_CODE_LIST(R) \
26 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
27 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
28 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
29 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
32 static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
35 // Some CPURegister methods can return Register and FPRegister types, so we
36 // need to declare them in advance.
43 // The kInvalid value is used to detect uninitialized static instances,
44 // which are always zero-initialized before any constructors are called.
51 static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
52 CPURegister r = {code, size, type};
56 unsigned code() const;
57 RegisterType type() const;
59 unsigned SizeInBits() const;
60 int SizeInBytes() const;
61 bool Is32Bits() const;
62 bool Is64Bits() const;
64 bool IsValidOrNone() const;
65 bool IsValidRegister() const;
66 bool IsValidFPRegister() const;
68 bool Is(const CPURegister& other) const;
69 bool Aliases(const CPURegister& other) const;
74 bool IsRegister() const;
75 bool IsFPRegister() const;
82 bool IsSameSizeAndType(const CPURegister& other) const;
85 bool is(const CPURegister& other) const { return Is(other); }
86 bool is_valid() const { return IsValid(); }
90 RegisterType reg_type;
94 struct Register : public CPURegister {
95 static Register Create(unsigned code, unsigned size) {
96 return Register(CPURegister::Create(code, size, CPURegister::kRegister));
102 reg_type = CPURegister::kNoRegister;
105 explicit Register(const CPURegister& r) {
106 reg_code = r.reg_code;
107 reg_size = r.reg_size;
108 reg_type = r.reg_type;
109 DCHECK(IsValidOrNone());
112 Register(const Register& r) { // NOLINT(runtime/explicit)
113 reg_code = r.reg_code;
114 reg_size = r.reg_size;
115 reg_type = r.reg_type;
116 DCHECK(IsValidOrNone());
119 bool IsValid() const {
120 DCHECK(IsRegister() || IsNone());
121 return IsValidRegister();
124 static Register XRegFromCode(unsigned code);
125 static Register WRegFromCode(unsigned code);
127 // Start of V8 compatibility section ---------------------
128 // These memebers are necessary for compilation.
129 // A few of them may be unused for now.
131 static const int kNumRegisters = kNumberOfRegisters;
132 static int NumRegisters() { return kNumRegisters; }
134 // We allow crankshaft to use the following registers:
137 // - x27 (also context)
139 // TODO(all): Register x25 is currently free and could be available for
140 // crankshaft, but we don't use it as we might use it as a per function
141 // literal pool pointer in the future.
143 // TODO(all): Consider storing cp in x25 to have only two ranges.
144 // We split allocatable registers in three ranges called
148 static const unsigned kAllocatableLowRangeBegin = 0;
149 static const unsigned kAllocatableLowRangeEnd = 15;
150 static const unsigned kAllocatableHighRangeBegin = 18;
151 static const unsigned kAllocatableHighRangeEnd = 24;
152 static const unsigned kAllocatableContext = 27;
154 // Gap between low and high ranges.
155 static const int kAllocatableRangeGapSize =
156 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
158 static const int kMaxNumAllocatableRegisters =
159 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
160 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
161 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
163 // Return true if the register is one that crankshaft can allocate.
164 bool IsAllocatable() const {
165 return ((reg_code == kAllocatableContext) ||
166 (reg_code <= kAllocatableLowRangeEnd) ||
167 ((reg_code >= kAllocatableHighRangeBegin) &&
168 (reg_code <= kAllocatableHighRangeEnd)));
171 static Register FromAllocationIndex(unsigned index) {
172 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
173 // cp is the last allocatable register.
174 if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
175 return from_code(kAllocatableContext);
178 // Handle low and high ranges.
179 return (index <= kAllocatableLowRangeEnd)
181 : from_code(index + kAllocatableRangeGapSize);
184 static const char* AllocationIndexToString(int index) {
185 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
186 DCHECK((kAllocatableLowRangeBegin == 0) &&
187 (kAllocatableLowRangeEnd == 15) &&
188 (kAllocatableHighRangeBegin == 18) &&
189 (kAllocatableHighRangeEnd == 24) &&
190 (kAllocatableContext == 27));
191 const char* const names[] = {
192 "x0", "x1", "x2", "x3", "x4",
193 "x5", "x6", "x7", "x8", "x9",
194 "x10", "x11", "x12", "x13", "x14",
195 "x15", "x18", "x19", "x20", "x21",
196 "x22", "x23", "x24", "x27",
201 static int ToAllocationIndex(Register reg) {
202 DCHECK(reg.IsAllocatable());
203 unsigned code = reg.code();
204 if (code == kAllocatableContext) {
205 return NumAllocatableRegisters() - 1;
208 return (code <= kAllocatableLowRangeEnd)
210 : code - kAllocatableRangeGapSize;
213 static Register from_code(int code) {
214 // Always return an X register.
215 return Register::Create(code, kXRegSizeInBits);
218 // End of V8 compatibility section -----------------------
222 struct FPRegister : public CPURegister {
223 static FPRegister Create(unsigned code, unsigned size) {
225 CPURegister::Create(code, size, CPURegister::kFPRegister));
231 reg_type = CPURegister::kNoRegister;
234 explicit FPRegister(const CPURegister& r) {
235 reg_code = r.reg_code;
236 reg_size = r.reg_size;
237 reg_type = r.reg_type;
238 DCHECK(IsValidOrNone());
241 FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
242 reg_code = r.reg_code;
243 reg_size = r.reg_size;
244 reg_type = r.reg_type;
245 DCHECK(IsValidOrNone());
248 bool IsValid() const {
249 DCHECK(IsFPRegister() || IsNone());
250 return IsValidFPRegister();
253 static FPRegister SRegFromCode(unsigned code);
254 static FPRegister DRegFromCode(unsigned code);
256 // Start of V8 compatibility section ---------------------
257 static const int kMaxNumRegisters = kNumberOfFPRegisters;
259 // Crankshaft can use all the FP registers except:
260 // - d15 which is used to keep the 0 double value
261 // - d30 which is used in crankshaft as a double scratch register
262 // - d31 which is used in the MacroAssembler as a double scratch register
263 static const unsigned kAllocatableLowRangeBegin = 0;
264 static const unsigned kAllocatableLowRangeEnd = 14;
265 static const unsigned kAllocatableHighRangeBegin = 16;
266 static const unsigned kAllocatableHighRangeEnd = 28;
268 static const RegList kAllocatableFPRegisters = 0x1fff7fff;
270 // Gap between low and high ranges.
271 static const int kAllocatableRangeGapSize =
272 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
274 static const int kMaxNumAllocatableRegisters =
275 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
276 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
277 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
279 // TODO(turbofan): Proper float32 support.
280 static int NumAllocatableAliasedRegisters() {
281 return NumAllocatableRegisters();
284 // Return true if the register is one that crankshaft can allocate.
285 bool IsAllocatable() const {
286 return (Bit() & kAllocatableFPRegisters) != 0;
289 static FPRegister FromAllocationIndex(unsigned int index) {
290 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
292 return (index <= kAllocatableLowRangeEnd)
294 : from_code(index + kAllocatableRangeGapSize);
297 static const char* AllocationIndexToString(int index) {
298 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
299 DCHECK((kAllocatableLowRangeBegin == 0) &&
300 (kAllocatableLowRangeEnd == 14) &&
301 (kAllocatableHighRangeBegin == 16) &&
302 (kAllocatableHighRangeEnd == 28));
303 const char* const names[] = {
304 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
305 "d8", "d9", "d10", "d11", "d12", "d13", "d14",
306 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
307 "d24", "d25", "d26", "d27", "d28"
312 static int ToAllocationIndex(FPRegister reg) {
313 DCHECK(reg.IsAllocatable());
314 unsigned code = reg.code();
316 return (code <= kAllocatableLowRangeEnd)
318 : code - kAllocatableRangeGapSize;
321 static FPRegister from_code(int code) {
322 // Always return a D register.
323 return FPRegister::Create(code, kDRegSizeInBits);
325 // End of V8 compatibility section -----------------------
329 STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
330 STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
333 #if defined(ARM64_DEFINE_REG_STATICS)
334 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
335 const CPURegister init_##register_class##_##name = {code, size, type}; \
336 const register_class& name = *reinterpret_cast<const register_class*>( \
337 &init_##register_class##_##name)
338 #define ALIAS_REGISTER(register_class, alias, name) \
339 const register_class& alias = *reinterpret_cast<const register_class*>( \
340 &init_##register_class##_##name)
342 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
343 extern const register_class& name
344 #define ALIAS_REGISTER(register_class, alias, name) \
345 extern const register_class& alias
346 #endif // defined(ARM64_DEFINE_REG_STATICS)
348 // No*Reg is used to indicate an unused argument, or an error case. Note that
349 // these all compare equal (using the Is() method). The Register and FPRegister
350 // variants are provided for convenience.
351 INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
352 INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
353 INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
356 INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
358 #define DEFINE_REGISTERS(N) \
359 INITIALIZE_REGISTER(Register, w##N, N, \
360 kWRegSizeInBits, CPURegister::kRegister); \
361 INITIALIZE_REGISTER(Register, x##N, N, \
362 kXRegSizeInBits, CPURegister::kRegister);
363 REGISTER_CODE_LIST(DEFINE_REGISTERS)
364 #undef DEFINE_REGISTERS
366 INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
367 CPURegister::kRegister);
368 INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
369 CPURegister::kRegister);
371 #define DEFINE_FPREGISTERS(N) \
372 INITIALIZE_REGISTER(FPRegister, s##N, N, \
373 kSRegSizeInBits, CPURegister::kFPRegister); \
374 INITIALIZE_REGISTER(FPRegister, d##N, N, \
375 kDRegSizeInBits, CPURegister::kFPRegister);
376 REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
377 #undef DEFINE_FPREGISTERS
379 #undef INITIALIZE_REGISTER
381 // Registers aliases.
382 ALIAS_REGISTER(Register, ip0, x16);
383 ALIAS_REGISTER(Register, ip1, x17);
384 ALIAS_REGISTER(Register, wip0, w16);
385 ALIAS_REGISTER(Register, wip1, w17);
387 ALIAS_REGISTER(Register, root, x26);
388 ALIAS_REGISTER(Register, rr, x26);
389 // Context pointer register.
390 ALIAS_REGISTER(Register, cp, x27);
391 // We use a register as a JS stack pointer to overcome the restriction on the
392 // architectural SP alignment.
393 // We chose x28 because it is contiguous with the other specific purpose
395 STATIC_ASSERT(kJSSPCode == 28);
396 ALIAS_REGISTER(Register, jssp, x28);
397 ALIAS_REGISTER(Register, wjssp, w28);
398 ALIAS_REGISTER(Register, fp, x29);
399 ALIAS_REGISTER(Register, lr, x30);
400 ALIAS_REGISTER(Register, xzr, x31);
401 ALIAS_REGISTER(Register, wzr, w31);
403 // Keeps the 0 double value.
404 ALIAS_REGISTER(FPRegister, fp_zero, d15);
405 // Crankshaft double scratch register.
406 ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
407 // MacroAssembler double scratch registers.
408 ALIAS_REGISTER(FPRegister, fp_scratch, d30);
409 ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
410 ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
412 #undef ALIAS_REGISTER
415 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
416 Register reg2 = NoReg,
417 Register reg3 = NoReg,
418 Register reg4 = NoReg);
421 // AreAliased returns true if any of the named registers overlap. Arguments set
422 // to NoReg are ignored. The system stack pointer may be specified.
423 bool AreAliased(const CPURegister& reg1,
424 const CPURegister& reg2,
425 const CPURegister& reg3 = NoReg,
426 const CPURegister& reg4 = NoReg,
427 const CPURegister& reg5 = NoReg,
428 const CPURegister& reg6 = NoReg,
429 const CPURegister& reg7 = NoReg,
430 const CPURegister& reg8 = NoReg);
432 // AreSameSizeAndType returns true if all of the specified registers have the
433 // same size, and are of the same type. The system stack pointer may be
434 // specified. Arguments set to NoReg are ignored, as are any subsequent
435 // arguments. At least one argument (reg1) must be valid (not NoCPUReg).
436 bool AreSameSizeAndType(const CPURegister& reg1,
437 const CPURegister& reg2,
438 const CPURegister& reg3 = NoCPUReg,
439 const CPURegister& reg4 = NoCPUReg,
440 const CPURegister& reg5 = NoCPUReg,
441 const CPURegister& reg6 = NoCPUReg,
442 const CPURegister& reg7 = NoCPUReg,
443 const CPURegister& reg8 = NoCPUReg);
446 typedef FPRegister DoubleRegister;
449 // -----------------------------------------------------------------------------
450 // Lists of registers.
453 explicit CPURegList(CPURegister reg1,
454 CPURegister reg2 = NoCPUReg,
455 CPURegister reg3 = NoCPUReg,
456 CPURegister reg4 = NoCPUReg)
457 : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
458 size_(reg1.SizeInBits()), type_(reg1.type()) {
459 DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
463 CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
464 : list_(list), size_(size), type_(type) {
468 CPURegList(CPURegister::RegisterType type, unsigned size,
469 unsigned first_reg, unsigned last_reg)
470 : size_(size), type_(type) {
471 DCHECK(((type == CPURegister::kRegister) &&
472 (last_reg < kNumberOfRegisters)) ||
473 ((type == CPURegister::kFPRegister) &&
474 (last_reg < kNumberOfFPRegisters)));
475 DCHECK(last_reg >= first_reg);
476 list_ = (1UL << (last_reg + 1)) - 1;
477 list_ &= ~((1UL << first_reg) - 1);
481 CPURegister::RegisterType type() const {
486 RegList list() const {
491 inline void set_list(RegList new_list) {
496 // Combine another CPURegList into this one. Registers that already exist in
497 // this list are left unchanged. The type and size of the registers in the
498 // 'other' list must match those in this list.
499 void Combine(const CPURegList& other);
501 // Remove every register in the other CPURegList from this one. Registers that
502 // do not exist in this list are ignored. The type of the registers in the
503 // 'other' list must match those in this list.
504 void Remove(const CPURegList& other);
506 // Variants of Combine and Remove which take CPURegisters.
507 void Combine(const CPURegister& other);
508 void Remove(const CPURegister& other1,
509 const CPURegister& other2 = NoCPUReg,
510 const CPURegister& other3 = NoCPUReg,
511 const CPURegister& other4 = NoCPUReg);
513 // Variants of Combine and Remove which take a single register by its code;
514 // the type and size of the register is inferred from this list.
515 void Combine(int code);
516 void Remove(int code);
518 // Remove all callee-saved registers from the list. This can be useful when
519 // preparing registers for an AAPCS64 function call, for example.
520 void RemoveCalleeSaved();
522 CPURegister PopLowestIndex();
523 CPURegister PopHighestIndex();
525 // AAPCS64 callee-saved registers.
526 static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
527 static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
529 // AAPCS64 caller-saved registers. Note that this includes lr.
530 static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
531 static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
533 // Registers saved as safepoints.
534 static CPURegList GetSafepointSavedRegisters();
536 bool IsEmpty() const {
541 bool IncludesAliasOf(const CPURegister& other1,
542 const CPURegister& other2 = NoCPUReg,
543 const CPURegister& other3 = NoCPUReg,
544 const CPURegister& other4 = NoCPUReg) const {
547 if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
548 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
549 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
550 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
551 return (list_ & list) != 0;
556 return CountSetBits(list_, kRegListSizeInBits);
559 unsigned RegisterSizeInBits() const {
564 unsigned RegisterSizeInBytes() const {
565 int size_in_bits = RegisterSizeInBits();
566 DCHECK((size_in_bits % kBitsPerByte) == 0);
567 return size_in_bits / kBitsPerByte;
570 unsigned TotalSizeInBytes() const {
572 return RegisterSizeInBytes() * Count();
578 CPURegister::RegisterType type_;
580 bool IsValid() const {
581 const RegList kValidRegisters = 0x8000000ffffffff;
582 const RegList kValidFPRegisters = 0x0000000ffffffff;
584 case CPURegister::kRegister:
585 return (list_ & kValidRegisters) == list_;
586 case CPURegister::kFPRegister:
587 return (list_ & kValidFPRegisters) == list_;
588 case CPURegister::kNoRegister:
598 // AAPCS64 callee-saved registers.
599 #define kCalleeSaved CPURegList::GetCalleeSaved()
600 #define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
603 // AAPCS64 caller-saved registers. Note that this includes lr.
604 #define kCallerSaved CPURegList::GetCallerSaved()
605 #define kCallerSavedFP CPURegList::GetCallerSavedFP()
607 // -----------------------------------------------------------------------------
612 inline explicit Immediate(Handle<T> handle);
614 // This is allowed to be an implicit constructor because Immediate is
615 // a wrapper class that doesn't normally perform any type conversion.
617 inline Immediate(T value); // NOLINT(runtime/explicit)
620 inline Immediate(T value, RelocInfo::Mode rmode);
622 int64_t value() const { return value_; }
623 RelocInfo::Mode rmode() const { return rmode_; }
626 void InitializeHandle(Handle<Object> value);
629 RelocInfo::Mode rmode_;
633 // -----------------------------------------------------------------------------
635 const int kSmiShift = kSmiTagSize + kSmiShiftSize;
636 const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
638 // Represents an operand in a machine instruction.
640 // TODO(all): If necessary, study more in details which methods
641 // TODO(all): should be inlined or not.
643 // rm, {<shift> {#<shift_amount>}}
644 // where <shift> is one of {LSL, LSR, ASR, ROR}.
645 // <shift_amount> is uint6_t.
646 // This is allowed to be an implicit constructor because Operand is
647 // a wrapper class that doesn't normally perform any type conversion.
648 inline Operand(Register reg,
650 unsigned shift_amount = 0); // NOLINT(runtime/explicit)
652 // rm, <extend> {#<shift_amount>}
653 // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
654 // <shift_amount> is uint2_t.
655 inline Operand(Register reg,
657 unsigned shift_amount = 0);
660 inline explicit Operand(Handle<T> handle);
662 // Implicit constructor for all int types, ExternalReference, and Smi.
664 inline Operand(T t); // NOLINT(runtime/explicit)
666 // Implicit constructor for int types.
668 inline Operand(T t, RelocInfo::Mode rmode);
670 inline bool IsImmediate() const;
671 inline bool IsShiftedRegister() const;
672 inline bool IsExtendedRegister() const;
673 inline bool IsZero() const;
675 // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
676 // which helps in the encoding of instructions that use the stack pointer.
677 inline Operand ToExtendedRegister() const;
679 inline Immediate immediate() const;
680 inline int64_t ImmediateValue() const;
681 inline Register reg() const;
682 inline Shift shift() const;
683 inline Extend extend() const;
684 inline unsigned shift_amount() const;
686 // Relocation information.
687 bool NeedsRelocation(const Assembler* assembler) const;
690 inline static Operand UntagSmi(Register smi);
691 inline static Operand UntagSmiAndScale(Register smi, int scale);
694 Immediate immediate_;
698 unsigned shift_amount_;
702 // MemOperand represents a memory operand in a load or store instruction.
706 inline explicit MemOperand(Register base,
708 AddrMode addrmode = Offset);
709 inline explicit MemOperand(Register base,
712 unsigned shift_amount = 0);
713 inline explicit MemOperand(Register base,
716 unsigned shift_amount = 0);
717 inline explicit MemOperand(Register base,
718 const Operand& offset,
719 AddrMode addrmode = Offset);
721 const Register& base() const { return base_; }
722 const Register& regoffset() const { return regoffset_; }
723 int64_t offset() const { return offset_; }
724 AddrMode addrmode() const { return addrmode_; }
725 Shift shift() const { return shift_; }
726 Extend extend() const { return extend_; }
727 unsigned shift_amount() const { return shift_amount_; }
728 inline bool IsImmediateOffset() const;
729 inline bool IsRegisterOffset() const;
730 inline bool IsPreIndex() const;
731 inline bool IsPostIndex() const;
733 // For offset modes, return the offset as an Operand. This helper cannot
734 // handle indexed modes.
735 inline Operand OffsetAsOperand() const;
738 kNotPair, // Can't use a pair instruction.
739 kPairAB, // Can use a pair instruction (operandA has lower address).
740 kPairBA // Can use a pair instruction (operandB has lower address).
742 // Check if two MemOperand are consistent for stp/ldp use.
743 static PairResult AreConsistentForPair(const MemOperand& operandA,
744 const MemOperand& operandB,
745 int access_size_log2 = kXRegSizeLog2);
754 unsigned shift_amount_;
760 explicit ConstPool(Assembler* assm)
763 shared_entries_count(0) {}
764 void RecordEntry(intptr_t data, RelocInfo::Mode mode);
765 int EntryCount() const {
766 return shared_entries_count + unique_entries_.size();
768 bool IsEmpty() const {
769 return shared_entries_.empty() && unique_entries_.empty();
771 // Distance in bytes between the current pc and the first instruction
772 // using the pool. If there are no pending entries return kMaxInt.
773 int DistanceToFirstUse();
774 // Offset after which instructions using the pool will be out of range.
776 // Maximum size the constant pool can be with current entries. It always
777 // includes alignment padding and branch over.
779 // Size in bytes of the literal pool *if* it is emitted at the current
780 // pc. The size will include the branch over the pool if it was requested.
781 int SizeIfEmittedAtCurrentPc(bool require_jump);
782 // Emit the literal pool at the current pc with a branch over the pool if
784 void Emit(bool require_jump);
785 // Discard any pending pool entries.
789 bool CanBeShared(RelocInfo::Mode mode);
795 // Keep track of the first instruction requiring a constant pool entry
796 // since the previous constant pool was emitted.
798 // values, pc offset(s) of entries which can be shared.
799 std::multimap<uint64_t, int> shared_entries_;
800 // Number of distinct literal in shared entries.
801 int shared_entries_count;
802 // values, pc offset of entries which cannot be shared.
803 std::vector<std::pair<uint64_t, int> > unique_entries_;
807 // -----------------------------------------------------------------------------
810 class Assembler : public AssemblerBase {
812 // Create an assembler. Instructions and relocation information are emitted
813 // into a buffer, with the instructions starting from the beginning and the
814 // relocation information starting from the end of the buffer. See CodeDesc
815 // for a detailed comment on the layout (globals.h).
817 // If the provided buffer is NULL, the assembler allocates and grows its own
818 // buffer, and buffer_size determines the initial buffer size. The buffer is
819 // owned by the assembler and deallocated upon destruction of the assembler.
821 // If the provided buffer is not NULL, the assembler uses the provided buffer
822 // for code generation and assumes its size to be buffer_size. If the buffer
823 // is too small, a fatal error occurs. No deallocation of the buffer is done
824 // upon destruction of the assembler.
825 Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
827 virtual ~Assembler();
829 virtual void AbortedCodeGeneration() {
833 // System functions ---------------------------------------------------------
834 // Start generating code from the beginning of the buffer, discarding any code
835 // and data that has already been emitted into the buffer.
837 // In order to avoid any accidental transfer of state, Reset DCHECKs that the
838 // constant pool is not blocked.
841 // GetCode emits any pending (non-emitted) code and fills the descriptor
842 // desc. GetCode() is idempotent; it returns the same result if no other
843 // Assembler functions are invoked in between GetCode() calls.
845 // The descriptor (desc) can be NULL. In that case, the code is finalized as
846 // usual, but the descriptor is not populated.
847 void GetCode(CodeDesc* desc);
849 // Insert the smallest number of nop instructions
850 // possible to align the pc offset to a multiple
851 // of m. m must be a power of 2 (>= 4).
854 inline void Unreachable();
856 // Label --------------------------------------------------------------------
857 // Bind a label to the current pc. Note that labels can only be bound once,
858 // and if labels are linked to other instructions, they _must_ be bound
859 // before they go out of scope.
860 void bind(Label* label);
863 // RelocInfo and pools ------------------------------------------------------
865 // Record relocation information for current pc_.
866 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
868 // Return the address in the constant pool of the code target address used by
869 // the branch/call instruction at pc.
870 inline static Address target_pointer_address_at(Address pc);
872 // Read/Modify the code target address in the branch/call instruction at pc.
873 inline static Address target_address_at(Address pc,
874 ConstantPoolArray* constant_pool);
875 inline static void set_target_address_at(Address pc,
876 ConstantPoolArray* constant_pool,
878 ICacheFlushMode icache_flush_mode =
879 FLUSH_ICACHE_IF_NEEDED);
880 static inline Address target_address_at(Address pc, Code* code);
881 static inline void set_target_address_at(Address pc,
884 ICacheFlushMode icache_flush_mode =
885 FLUSH_ICACHE_IF_NEEDED);
887 // Return the code target address at a call site from the return address of
888 // that call in the instruction stream.
889 inline static Address target_address_from_return_address(Address pc);
891 // Given the address of the beginning of a call, return the address in the
892 // instruction stream that call will return from.
893 inline static Address return_address_from_call_start(Address pc);
895 // Return the code target address of the patch debug break slot
896 inline static Address break_address_from_return_address(Address pc);
898 // This sets the branch destination (which is in the constant pool on ARM).
899 // This is for calls and branches within generated code.
900 inline static void deserialization_set_special_target_at(
901 Address constant_pool_entry, Code* code, Address target);
903 // All addresses in the constant pool are the same size as pointers.
904 static const int kSpecialTargetSize = kPointerSize;
906 // The sizes of the call sequences emitted by MacroAssembler::Call.
907 // Wherever possible, use MacroAssembler::CallSize instead of these constants,
908 // as it will choose the correct value for a given relocation mode.
910 // Without relocation:
911 // movz temp, #(target & 0x000000000000ffff)
912 // movk temp, #(target & 0x00000000ffff0000)
913 // movk temp, #(target & 0x0000ffff00000000)
919 static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
920 static const int kCallSizeWithRelocation = 2 * kInstructionSize;
922 // Size of the generated code in bytes
923 uint64_t SizeOfGeneratedCode() const {
924 DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
925 return pc_ - buffer_;
928 // Return the code size generated from label to the current position.
929 uint64_t SizeOfCodeGeneratedSince(const Label* label) {
930 DCHECK(label->is_bound());
931 DCHECK(pc_offset() >= label->pos());
932 DCHECK(pc_offset() < buffer_size_);
933 return pc_offset() - label->pos();
936 // Check the size of the code generated since the given label. This function
937 // is used primarily to work around comparisons between signed and unsigned
938 // quantities, since V8 uses both.
939 // TODO(jbramley): Work out what sign to use for these things and if possible,
940 // change things to be consistent.
941 void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
943 DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
946 // Return the number of instructions generated from label to the
948 int InstructionsGeneratedSince(const Label* label) {
949 return SizeOfCodeGeneratedSince(label) / kInstructionSize;
952 // Number of instructions generated for the return sequence in
953 // FullCodeGenerator::EmitReturnSequence.
954 static const int kJSRetSequenceInstructions = 7;
955 // Distance between start of patched return sequence and the emitted address
957 static const int kPatchReturnSequenceAddressOffset = 0;
958 static const int kPatchDebugBreakSlotAddressOffset = 0;
960 // Number of instructions necessary to be able to later patch it to a call.
961 // See DebugCodegen::GenerateSlot() and
962 // BreakLocationIterator::SetDebugBreakAtSlot().
963 static const int kDebugBreakSlotInstructions = 4;
964 static const int kDebugBreakSlotLength =
965 kDebugBreakSlotInstructions * kInstructionSize;
967 static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
969 // Prevent contant pool emission until EndBlockConstPool is called.
970 // Call to this function can be nested but must be followed by an equal
971 // number of call to EndBlockConstpool.
972 void StartBlockConstPool();
974 // Resume constant pool emission. Need to be called as many time as
975 // StartBlockConstPool to have an effect.
976 void EndBlockConstPool();
978 bool is_const_pool_blocked() const;
979 static bool IsConstantPoolAt(Instruction* instr);
980 static int ConstantPoolSizeAt(Instruction* instr);
981 // See Assembler::CheckConstPool for more info.
982 void EmitPoolGuard();
984 // Prevent veneer pool emission until EndBlockVeneerPool is called.
985 // Call to this function can be nested but must be followed by an equal
986 // number of call to EndBlockConstpool.
987 void StartBlockVeneerPool();
989 // Resume constant pool emission. Need to be called as many time as
990 // StartBlockVeneerPool to have an effect.
991 void EndBlockVeneerPool();
993 bool is_veneer_pool_blocked() const {
994 return veneer_pool_blocked_nesting_ > 0;
997 // Block/resume emission of constant pools and veneer pools.
998 void StartBlockPools() {
999 StartBlockConstPool();
1000 StartBlockVeneerPool();
1002 void EndBlockPools() {
1003 EndBlockConstPool();
1004 EndBlockVeneerPool();
1007 // Debugging ----------------------------------------------------------------
1008 PositionsRecorder* positions_recorder() { return &positions_recorder_; }
1009 void RecordComment(const char* msg);
1011 // Record a deoptimization reason that can be used by a log or cpu profiler.
1012 // Use --trace-deopt to enable.
1013 void RecordDeoptReason(const int reason, const int raw_position);
1015 int buffer_space() const;
1017 // Mark address of the ExitJSFrame code.
1018 void RecordJSReturn();
1020 // Mark address of a debug break slot.
1021 void RecordDebugBreakSlot();
1023 // Record the emission of a constant pool.
1025 // The emission of constant and veneer pools depends on the size of the code
1026 // generated and the number of RelocInfo recorded.
1027 // The Debug mechanism needs to map code offsets between two versions of a
1028 // function, compiled with and without debugger support (see for example
1029 // Debug::PrepareForBreakPoints()).
1030 // Compiling functions with debugger support generates additional code
1031 // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
1032 // and cause the version of the code with debugger support to have pools
1033 // generated in different places.
1034 // Recording the position and size of emitted pools allows to correctly
1035 // compute the offset mappings between the different versions of a function in
1038 // The parameter indicates the size of the pool (in bytes), including
1039 // the marker and branch over the data.
1040 void RecordConstPool(int size);
1043 // Instruction set functions ------------------------------------------------
1045 // Branch / Jump instructions.
1046 // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
1047 // Branch to register.
1048 void br(const Register& xn);
1050 // Branch-link to register.
1051 void blr(const Register& xn);
1053 // Branch to register with return hint.
1054 void ret(const Register& xn = lr);
1056 // Unconditional branch to label.
1057 void b(Label* label);
1059 // Conditional branch to label.
1060 void b(Label* label, Condition cond);
1062 // Unconditional branch to PC offset.
1065 // Conditional branch to PC offset.
1066 void b(int imm19, Condition cond);
1068 // Branch-link to label / pc offset.
1069 void bl(Label* label);
1072 // Compare and branch to label / pc offset if zero.
1073 void cbz(const Register& rt, Label* label);
1074 void cbz(const Register& rt, int imm19);
1076 // Compare and branch to label / pc offset if not zero.
1077 void cbnz(const Register& rt, Label* label);
1078 void cbnz(const Register& rt, int imm19);
1080 // Test bit and branch to label / pc offset if zero.
1081 void tbz(const Register& rt, unsigned bit_pos, Label* label);
1082 void tbz(const Register& rt, unsigned bit_pos, int imm14);
1084 // Test bit and branch to label / pc offset if not zero.
1085 void tbnz(const Register& rt, unsigned bit_pos, Label* label);
1086 void tbnz(const Register& rt, unsigned bit_pos, int imm14);
1088 // Address calculation instructions.
1089 // Calculate a PC-relative address. Unlike for branches the offset in adr is
1090 // unscaled (i.e. the result can be unaligned).
1091 void adr(const Register& rd, Label* label);
1092 void adr(const Register& rd, int imm21);
1094 // Data Processing instructions.
1096 void add(const Register& rd,
1098 const Operand& operand);
1100 // Add and update status flags.
1101 void adds(const Register& rd,
1103 const Operand& operand);
1105 // Compare negative.
1106 void cmn(const Register& rn, const Operand& operand);
1109 void sub(const Register& rd,
1111 const Operand& operand);
1113 // Subtract and update status flags.
1114 void subs(const Register& rd,
1116 const Operand& operand);
1119 void cmp(const Register& rn, const Operand& operand);
1122 void neg(const Register& rd,
1123 const Operand& operand);
1125 // Negate and update status flags.
1126 void negs(const Register& rd,
1127 const Operand& operand);
1129 // Add with carry bit.
1130 void adc(const Register& rd,
1132 const Operand& operand);
1134 // Add with carry bit and update status flags.
1135 void adcs(const Register& rd,
1137 const Operand& operand);
1139 // Subtract with carry bit.
1140 void sbc(const Register& rd,
1142 const Operand& operand);
1144 // Subtract with carry bit and update status flags.
1145 void sbcs(const Register& rd,
1147 const Operand& operand);
1149 // Negate with carry bit.
1150 void ngc(const Register& rd,
1151 const Operand& operand);
1153 // Negate with carry bit and update status flags.
1154 void ngcs(const Register& rd,
1155 const Operand& operand);
1157 // Logical instructions.
1158 // Bitwise and (A & B).
1159 void and_(const Register& rd,
1161 const Operand& operand);
1163 // Bitwise and (A & B) and update status flags.
1164 void ands(const Register& rd,
1166 const Operand& operand);
1168 // Bit test, and set flags.
1169 void tst(const Register& rn, const Operand& operand);
1171 // Bit clear (A & ~B).
1172 void bic(const Register& rd,
1174 const Operand& operand);
1176 // Bit clear (A & ~B) and update status flags.
1177 void bics(const Register& rd,
1179 const Operand& operand);
1181 // Bitwise or (A | B).
1182 void orr(const Register& rd, const Register& rn, const Operand& operand);
1184 // Bitwise nor (A | ~B).
1185 void orn(const Register& rd, const Register& rn, const Operand& operand);
1187 // Bitwise eor/xor (A ^ B).
1188 void eor(const Register& rd, const Register& rn, const Operand& operand);
1190 // Bitwise enor/xnor (A ^ ~B).
1191 void eon(const Register& rd, const Register& rn, const Operand& operand);
1193 // Logical shift left variable.
1194 void lslv(const Register& rd, const Register& rn, const Register& rm);
1196 // Logical shift right variable.
1197 void lsrv(const Register& rd, const Register& rn, const Register& rm);
1199 // Arithmetic shift right variable.
1200 void asrv(const Register& rd, const Register& rn, const Register& rm);
1202 // Rotate right variable.
1203 void rorv(const Register& rd, const Register& rn, const Register& rm);
1205 // Bitfield instructions.
1207 void bfm(const Register& rd,
1212 // Signed bitfield move.
1213 void sbfm(const Register& rd,
1218 // Unsigned bitfield move.
1219 void ubfm(const Register& rd,
1226 void bfi(const Register& rd,
1231 DCHECK(lsb + width <= rn.SizeInBits());
1232 bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1235 // Bitfield extract and insert low.
1236 void bfxil(const Register& rd,
1241 DCHECK(lsb + width <= rn.SizeInBits());
1242 bfm(rd, rn, lsb, lsb + width - 1);
1246 // Arithmetic shift right.
1247 void asr(const Register& rd, const Register& rn, unsigned shift) {
1248 DCHECK(shift < rd.SizeInBits());
1249 sbfm(rd, rn, shift, rd.SizeInBits() - 1);
1252 // Signed bitfield insert in zero.
1253 void sbfiz(const Register& rd,
1258 DCHECK(lsb + width <= rn.SizeInBits());
1259 sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1262 // Signed bitfield extract.
1263 void sbfx(const Register& rd,
1268 DCHECK(lsb + width <= rn.SizeInBits());
1269 sbfm(rd, rn, lsb, lsb + width - 1);
1272 // Signed extend byte.
1273 void sxtb(const Register& rd, const Register& rn) {
1277 // Signed extend halfword.
1278 void sxth(const Register& rd, const Register& rn) {
1279 sbfm(rd, rn, 0, 15);
1282 // Signed extend word.
1283 void sxtw(const Register& rd, const Register& rn) {
1284 sbfm(rd, rn, 0, 31);
1288 // Logical shift left.
1289 void lsl(const Register& rd, const Register& rn, unsigned shift) {
1290 unsigned reg_size = rd.SizeInBits();
1291 DCHECK(shift < reg_size);
1292 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
1295 // Logical shift right.
1296 void lsr(const Register& rd, const Register& rn, unsigned shift) {
1297 DCHECK(shift < rd.SizeInBits());
1298 ubfm(rd, rn, shift, rd.SizeInBits() - 1);
1301 // Unsigned bitfield insert in zero.
1302 void ubfiz(const Register& rd,
1307 DCHECK(lsb + width <= rn.SizeInBits());
1308 ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1311 // Unsigned bitfield extract.
1312 void ubfx(const Register& rd,
1317 DCHECK(lsb + width <= rn.SizeInBits());
1318 ubfm(rd, rn, lsb, lsb + width - 1);
1321 // Unsigned extend byte.
1322 void uxtb(const Register& rd, const Register& rn) {
1326 // Unsigned extend halfword.
1327 void uxth(const Register& rd, const Register& rn) {
1328 ubfm(rd, rn, 0, 15);
1331 // Unsigned extend word.
1332 void uxtw(const Register& rd, const Register& rn) {
1333 ubfm(rd, rn, 0, 31);
1337 void extr(const Register& rd,
1342 // Conditional select: rd = cond ? rn : rm.
1343 void csel(const Register& rd,
1348 // Conditional select increment: rd = cond ? rn : rm + 1.
1349 void csinc(const Register& rd,
1354 // Conditional select inversion: rd = cond ? rn : ~rm.
1355 void csinv(const Register& rd,
1360 // Conditional select negation: rd = cond ? rn : -rm.
1361 void csneg(const Register& rd,
1366 // Conditional set: rd = cond ? 1 : 0.
1367 void cset(const Register& rd, Condition cond);
1369 // Conditional set minus: rd = cond ? -1 : 0.
1370 void csetm(const Register& rd, Condition cond);
1372 // Conditional increment: rd = cond ? rn + 1 : rn.
1373 void cinc(const Register& rd, const Register& rn, Condition cond);
1375 // Conditional invert: rd = cond ? ~rn : rn.
1376 void cinv(const Register& rd, const Register& rn, Condition cond);
1378 // Conditional negate: rd = cond ? -rn : rn.
1379 void cneg(const Register& rd, const Register& rn, Condition cond);
1382 void ror(const Register& rd, const Register& rs, unsigned shift) {
1383 extr(rd, rs, rs, shift);
1386 // Conditional comparison.
1387 // Conditional compare negative.
1388 void ccmn(const Register& rn,
1389 const Operand& operand,
1393 // Conditional compare.
1394 void ccmp(const Register& rn,
1395 const Operand& operand,
1400 // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
1401 void mul(const Register& rd, const Register& rn, const Register& rm);
1403 // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
1404 void madd(const Register& rd,
1407 const Register& ra);
1409 // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
1410 void mneg(const Register& rd, const Register& rn, const Register& rm);
1412 // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
1413 void msub(const Register& rd,
1416 const Register& ra);
1418 // 32 x 32 -> 64-bit multiply.
1419 void smull(const Register& rd, const Register& rn, const Register& rm);
1421 // Xd = bits<127:64> of Xn * Xm.
1422 void smulh(const Register& rd, const Register& rn, const Register& rm);
1424 // Signed 32 x 32 -> 64-bit multiply and accumulate.
1425 void smaddl(const Register& rd,
1428 const Register& ra);
1430 // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
1431 void umaddl(const Register& rd,
1434 const Register& ra);
1436 // Signed 32 x 32 -> 64-bit multiply and subtract.
1437 void smsubl(const Register& rd,
1440 const Register& ra);
1442 // Unsigned 32 x 32 -> 64-bit multiply and subtract.
1443 void umsubl(const Register& rd,
1446 const Register& ra);
1448 // Signed integer divide.
1449 void sdiv(const Register& rd, const Register& rn, const Register& rm);
1451 // Unsigned integer divide.
1452 void udiv(const Register& rd, const Register& rn, const Register& rm);
1454 // Bit count, bit reverse and endian reverse.
1455 void rbit(const Register& rd, const Register& rn);
1456 void rev16(const Register& rd, const Register& rn);
1457 void rev32(const Register& rd, const Register& rn);
1458 void rev(const Register& rd, const Register& rn);
1459 void clz(const Register& rd, const Register& rn);
1460 void cls(const Register& rd, const Register& rn);
1462 // Memory instructions.
1464 // Load integer or FP register.
1465 void ldr(const CPURegister& rt, const MemOperand& src);
1467 // Store integer or FP register.
1468 void str(const CPURegister& rt, const MemOperand& dst);
1470 // Load word with sign extension.
1471 void ldrsw(const Register& rt, const MemOperand& src);
1474 void ldrb(const Register& rt, const MemOperand& src);
1477 void strb(const Register& rt, const MemOperand& dst);
1479 // Load byte with sign extension.
1480 void ldrsb(const Register& rt, const MemOperand& src);
1483 void ldrh(const Register& rt, const MemOperand& src);
1486 void strh(const Register& rt, const MemOperand& dst);
1488 // Load half-word with sign extension.
1489 void ldrsh(const Register& rt, const MemOperand& src);
1491 // Load integer or FP register pair.
1492 void ldp(const CPURegister& rt, const CPURegister& rt2,
1493 const MemOperand& src);
1495 // Store integer or FP register pair.
1496 void stp(const CPURegister& rt, const CPURegister& rt2,
1497 const MemOperand& dst);
1499 // Load word pair with sign extension.
1500 void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
1502 // Load integer or FP register pair, non-temporal.
1503 void ldnp(const CPURegister& rt, const CPURegister& rt2,
1504 const MemOperand& src);
1506 // Store integer or FP register pair, non-temporal.
1507 void stnp(const CPURegister& rt, const CPURegister& rt2,
1508 const MemOperand& dst);
1510 // Load literal to register from a pc relative address.
1511 void ldr_pcrel(const CPURegister& rt, int imm19);
1513 // Load literal to register.
1514 void ldr(const CPURegister& rt, const Immediate& imm);
1516 // Move instructions. The default shift of -1 indicates that the move
1517 // instruction will calculate an appropriate 16-bit immediate and left shift
1518 // that is equal to the 64-bit immediate argument. If an explicit left shift
1519 // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
1521 // For movk, an explicit shift can be used to indicate which half word should
1522 // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
1523 // half word with zero, whereas movk(x0, 0, 48) will overwrite the
1524 // most-significant.
1527 void movk(const Register& rd, uint64_t imm, int shift = -1) {
1528 MoveWide(rd, imm, shift, MOVK);
1531 // Move with non-zero.
1532 void movn(const Register& rd, uint64_t imm, int shift = -1) {
1533 MoveWide(rd, imm, shift, MOVN);
1537 void movz(const Register& rd, uint64_t imm, int shift = -1) {
1538 MoveWide(rd, imm, shift, MOVZ);
1541 // Misc instructions.
1542 // Monitor debug-mode breakpoint.
1545 // Halting debug-mode breakpoint.
1548 // Move register to register.
1549 void mov(const Register& rd, const Register& rn);
1551 // Move NOT(operand) to register.
1552 void mvn(const Register& rd, const Operand& operand);
1554 // System instructions.
1555 // Move to register from system register.
1556 void mrs(const Register& rt, SystemRegister sysreg);
1558 // Move from register to system register.
1559 void msr(SystemRegister sysreg, const Register& rt);
1562 void hint(SystemHint code);
1564 // Data memory barrier
1565 void dmb(BarrierDomain domain, BarrierType type);
1567 // Data synchronization barrier
1568 void dsb(BarrierDomain domain, BarrierType type);
1570 // Instruction synchronization barrier
1573 // Alias for system instructions.
1574 void nop() { hint(NOP); }
1576 // Different nop operations are used by the code generator to detect certain
1577 // states of the generated code.
1578 enum NopMarkerTypes {
1582 FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
1583 LAST_NOP_MARKER = ADR_FAR_NOP
1586 void nop(NopMarkerTypes n) {
1587 DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
1588 mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
1592 // Move immediate to FP register.
1593 void fmov(FPRegister fd, double imm);
1594 void fmov(FPRegister fd, float imm);
1596 // Move FP register to register.
1597 void fmov(Register rd, FPRegister fn);
1599 // Move register to FP register.
1600 void fmov(FPRegister fd, Register rn);
1602 // Move FP register to FP register.
1603 void fmov(FPRegister fd, FPRegister fn);
1606 void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1609 void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1612 void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1614 // FP fused multiply and add.
1615 void fmadd(const FPRegister& fd,
1616 const FPRegister& fn,
1617 const FPRegister& fm,
1618 const FPRegister& fa);
1620 // FP fused multiply and subtract.
1621 void fmsub(const FPRegister& fd,
1622 const FPRegister& fn,
1623 const FPRegister& fm,
1624 const FPRegister& fa);
1626 // FP fused multiply, add and negate.
1627 void fnmadd(const FPRegister& fd,
1628 const FPRegister& fn,
1629 const FPRegister& fm,
1630 const FPRegister& fa);
1632 // FP fused multiply, subtract and negate.
1633 void fnmsub(const FPRegister& fd,
1634 const FPRegister& fn,
1635 const FPRegister& fm,
1636 const FPRegister& fa);
1639 void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1642 void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1645 void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1648 void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1651 void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1654 void fabs(const FPRegister& fd, const FPRegister& fn);
1657 void fneg(const FPRegister& fd, const FPRegister& fn);
1660 void fsqrt(const FPRegister& fd, const FPRegister& fn);
1662 // FP round to integer (nearest with ties to away).
1663 void frinta(const FPRegister& fd, const FPRegister& fn);
1665 // FP round to integer (toward minus infinity).
1666 void frintm(const FPRegister& fd, const FPRegister& fn);
1668 // FP round to integer (nearest with ties to even).
1669 void frintn(const FPRegister& fd, const FPRegister& fn);
1671 // FP round to integer (towards plus infinity).
1672 void frintp(const FPRegister& fd, const FPRegister& fn);
1674 // FP round to integer (towards zero.)
1675 void frintz(const FPRegister& fd, const FPRegister& fn);
1677 // FP compare registers.
1678 void fcmp(const FPRegister& fn, const FPRegister& fm);
1680 // FP compare immediate.
1681 void fcmp(const FPRegister& fn, double value);
1683 // FP conditional compare.
1684 void fccmp(const FPRegister& fn,
1685 const FPRegister& fm,
1689 // FP conditional select.
1690 void fcsel(const FPRegister& fd,
1691 const FPRegister& fn,
1692 const FPRegister& fm,
1695 // Common FP Convert function
1696 void FPConvertToInt(const Register& rd,
1697 const FPRegister& fn,
1698 FPIntegerConvertOp op);
1700 // FP convert between single and double precision.
1701 void fcvt(const FPRegister& fd, const FPRegister& fn);
1703 // Convert FP to unsigned integer (nearest with ties to away).
1704 void fcvtau(const Register& rd, const FPRegister& fn);
1706 // Convert FP to signed integer (nearest with ties to away).
1707 void fcvtas(const Register& rd, const FPRegister& fn);
1709 // Convert FP to unsigned integer (round towards -infinity).
1710 void fcvtmu(const Register& rd, const FPRegister& fn);
1712 // Convert FP to signed integer (round towards -infinity).
1713 void fcvtms(const Register& rd, const FPRegister& fn);
1715 // Convert FP to unsigned integer (nearest with ties to even).
1716 void fcvtnu(const Register& rd, const FPRegister& fn);
1718 // Convert FP to signed integer (nearest with ties to even).
1719 void fcvtns(const Register& rd, const FPRegister& fn);
1721 // Convert FP to unsigned integer (round towards zero).
1722 void fcvtzu(const Register& rd, const FPRegister& fn);
1724 // Convert FP to signed integer (rounf towards zero).
1725 void fcvtzs(const Register& rd, const FPRegister& fn);
1727 // Convert signed integer or fixed point to FP.
1728 void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1730 // Convert unsigned integer or fixed point to FP.
1731 void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1733 // Instruction functions used only for test, debug, and patching.
1734 // Emit raw instructions in the instruction stream.
1735 void dci(Instr raw_inst) { Emit(raw_inst); }
1737 // Emit 8 bits of data in the instruction stream.
1738 void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
1740 // Emit 32 bits of data in the instruction stream.
1741 void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
1743 // Emit 64 bits of data in the instruction stream.
1744 void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
1746 // Copy a string into the instruction stream, including the terminating NULL
1747 // character. The instruction pointer (pc_) is then aligned correctly for
1748 // subsequent instructions.
1749 void EmitStringData(const char* string);
1751 // Pseudo-instructions ------------------------------------------------------
1753 // Parameters are described in arm64/instructions-arm64.h.
1754 void debug(const char* message, uint32_t code, Instr params = BREAK);
1757 void dd(uint32_t data) { dc32(data); }
1758 void db(uint8_t data) { dc8(data); }
1760 // Code generation helpers --------------------------------------------------
1762 bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
1764 Instruction* pc() const { return Instruction::Cast(pc_); }
1766 Instruction* InstructionAt(int offset) const {
1767 return reinterpret_cast<Instruction*>(buffer_ + offset);
1770 ptrdiff_t InstructionOffset(Instruction* instr) const {
1771 return reinterpret_cast<byte*>(instr) - buffer_;
1774 // Register encoding.
1775 static Instr Rd(CPURegister rd) {
1776 DCHECK(rd.code() != kSPRegInternalCode);
1777 return rd.code() << Rd_offset;
1780 static Instr Rn(CPURegister rn) {
1781 DCHECK(rn.code() != kSPRegInternalCode);
1782 return rn.code() << Rn_offset;
1785 static Instr Rm(CPURegister rm) {
1786 DCHECK(rm.code() != kSPRegInternalCode);
1787 return rm.code() << Rm_offset;
1790 static Instr Ra(CPURegister ra) {
1791 DCHECK(ra.code() != kSPRegInternalCode);
1792 return ra.code() << Ra_offset;
1795 static Instr Rt(CPURegister rt) {
1796 DCHECK(rt.code() != kSPRegInternalCode);
1797 return rt.code() << Rt_offset;
1800 static Instr Rt2(CPURegister rt2) {
1801 DCHECK(rt2.code() != kSPRegInternalCode);
1802 return rt2.code() << Rt2_offset;
1805 // These encoding functions allow the stack pointer to be encoded, and
1806 // disallow the zero register.
1807 static Instr RdSP(Register rd) {
1808 DCHECK(!rd.IsZero());
1809 return (rd.code() & kRegCodeMask) << Rd_offset;
1812 static Instr RnSP(Register rn) {
1813 DCHECK(!rn.IsZero());
1814 return (rn.code() & kRegCodeMask) << Rn_offset;
1818 inline static Instr Flags(FlagsUpdate S);
1819 inline static Instr Cond(Condition cond);
1821 // PC-relative address encoding.
1822 inline static Instr ImmPCRelAddress(int imm21);
1825 inline static Instr ImmUncondBranch(int imm26);
1826 inline static Instr ImmCondBranch(int imm19);
1827 inline static Instr ImmCmpBranch(int imm19);
1828 inline static Instr ImmTestBranch(int imm14);
1829 inline static Instr ImmTestBranchBit(unsigned bit_pos);
1831 // Data Processing encoding.
1832 inline static Instr SF(Register rd);
1833 inline static Instr ImmAddSub(int64_t imm);
1834 inline static Instr ImmS(unsigned imms, unsigned reg_size);
1835 inline static Instr ImmR(unsigned immr, unsigned reg_size);
1836 inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
1837 inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
1838 inline static Instr ImmLLiteral(int imm19);
1839 inline static Instr BitN(unsigned bitn, unsigned reg_size);
1840 inline static Instr ShiftDP(Shift shift);
1841 inline static Instr ImmDPShift(unsigned amount);
1842 inline static Instr ExtendMode(Extend extend);
1843 inline static Instr ImmExtendShift(unsigned left_shift);
1844 inline static Instr ImmCondCmp(unsigned imm);
1845 inline static Instr Nzcv(StatusFlags nzcv);
1847 static bool IsImmAddSub(int64_t immediate);
1848 static bool IsImmLogical(uint64_t value,
1854 // MemOperand offset encoding.
1855 inline static Instr ImmLSUnsigned(int imm12);
1856 inline static Instr ImmLS(int imm9);
1857 inline static Instr ImmLSPair(int imm7, LSDataSize size);
1858 inline static Instr ImmShiftLS(unsigned shift_amount);
1859 inline static Instr ImmException(int imm16);
1860 inline static Instr ImmSystemRegister(int imm15);
1861 inline static Instr ImmHint(int imm7);
1862 inline static Instr ImmBarrierDomain(int imm2);
1863 inline static Instr ImmBarrierType(int imm2);
1864 inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
1866 static bool IsImmLSUnscaled(int64_t offset);
1867 static bool IsImmLSScaled(int64_t offset, LSDataSize size);
1869 // Move immediates encoding.
1870 inline static Instr ImmMoveWide(uint64_t imm);
1871 inline static Instr ShiftMoveWide(int64_t shift);
1874 static Instr ImmFP32(float imm);
1875 static Instr ImmFP64(double imm);
1876 inline static Instr FPScale(unsigned scale);
1878 // FP register type.
1879 inline static Instr FPType(FPRegister fd);
1881 // Class for scoping postponing the constant pool generation.
1882 class BlockConstPoolScope {
1884 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1885 assem_->StartBlockConstPool();
1887 ~BlockConstPoolScope() {
1888 assem_->EndBlockConstPool();
1894 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1897 // Check if is time to emit a constant pool.
1898 void CheckConstPool(bool force_emit, bool require_jump);
1900 // Allocate a constant pool of the correct size for the generated code.
1901 Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
1903 // Generate the constant pool for the generated code.
1904 void PopulateConstantPool(ConstantPoolArray* constant_pool);
1906 // Returns true if we should emit a veneer as soon as possible for a branch
1907 // which can at most reach to specified pc.
1908 bool ShouldEmitVeneer(int max_reachable_pc,
1909 int margin = kVeneerDistanceMargin);
1910 bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
1911 return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
1914 // The maximum code size generated for a veneer. Currently one branch
1915 // instruction. This is for code size checking purposes, and can be extended
1916 // in the future for example if we decide to add nops between the veneers.
1917 static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
1919 void RecordVeneerPool(int location_offset, int size);
1920 // Emits veneers for branches that are approaching their maximum range.
1921 // If need_protection is true, the veneers are protected by a branch jumping
1923 void EmitVeneers(bool force_emit, bool need_protection,
1924 int margin = kVeneerDistanceMargin);
1925 void EmitVeneersGuard() { EmitPoolGuard(); }
1926 // Checks whether veneers need to be emitted at this point.
1927 // If force_emit is set, a veneer is generated for *all* unresolved branches.
1928 void CheckVeneerPool(bool force_emit, bool require_jump,
1929 int margin = kVeneerDistanceMargin);
1931 class BlockPoolsScope {
1933 explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
1934 assem_->StartBlockPools();
1936 ~BlockPoolsScope() {
1937 assem_->EndBlockPools();
1943 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
1947 inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
1949 void LoadStore(const CPURegister& rt,
1950 const MemOperand& addr,
1953 void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
1954 const MemOperand& addr, LoadStorePairOp op);
1955 static bool IsImmLSPair(int64_t offset, LSDataSize size);
1957 void Logical(const Register& rd,
1959 const Operand& operand,
1961 void LogicalImmediate(const Register& rd,
1968 void ConditionalCompare(const Register& rn,
1969 const Operand& operand,
1972 ConditionalCompareOp op);
1973 static bool IsImmConditionalCompare(int64_t immediate);
1975 void AddSubWithCarry(const Register& rd,
1977 const Operand& operand,
1979 AddSubWithCarryOp op);
1981 // Functions for emulating operands not directly supported by the instruction
1983 void EmitShift(const Register& rd,
1987 void EmitExtendShift(const Register& rd,
1990 unsigned left_shift);
1992 void AddSub(const Register& rd,
1994 const Operand& operand,
1998 static bool IsImmFP32(float imm);
1999 static bool IsImmFP64(double imm);
2001 // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
2002 // registers. Only simple loads are supported; sign- and zero-extension (such
2003 // as in LDPSW_x or LDRB_w) are not supported.
2004 static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
2005 static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
2006 const CPURegister& rt2);
2007 static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
2008 static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
2009 const CPURegister& rt2);
2010 static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
2011 const CPURegister& rt, const CPURegister& rt2);
2012 static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
2013 const CPURegister& rt, const CPURegister& rt2);
2014 static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
2016 // Remove the specified branch from the unbound label link chain.
2017 // If available, a veneer for this label can be used for other branches in the
2018 // chain if the link chain cannot be fixed up without this branch.
2019 void RemoveBranchFromLabelLinkChain(Instruction* branch,
2021 Instruction* label_veneer = NULL);
2024 // Instruction helpers.
2025 void MoveWide(const Register& rd,
2028 MoveWideImmediateOp mov_op);
2029 void DataProcShiftedRegister(const Register& rd,
2031 const Operand& operand,
2034 void DataProcExtendedRegister(const Register& rd,
2036 const Operand& operand,
2039 void LoadStorePairNonTemporal(const CPURegister& rt,
2040 const CPURegister& rt2,
2041 const MemOperand& addr,
2042 LoadStorePairNonTemporalOp op);
2043 void ConditionalSelect(const Register& rd,
2047 ConditionalSelectOp op);
2048 void DataProcessing1Source(const Register& rd,
2050 DataProcessing1SourceOp op);
2051 void DataProcessing3Source(const Register& rd,
2055 DataProcessing3SourceOp op);
2056 void FPDataProcessing1Source(const FPRegister& fd,
2057 const FPRegister& fn,
2058 FPDataProcessing1SourceOp op);
2059 void FPDataProcessing2Source(const FPRegister& fd,
2060 const FPRegister& fn,
2061 const FPRegister& fm,
2062 FPDataProcessing2SourceOp op);
2063 void FPDataProcessing3Source(const FPRegister& fd,
2064 const FPRegister& fn,
2065 const FPRegister& fm,
2066 const FPRegister& fa,
2067 FPDataProcessing3SourceOp op);
2071 // Return an offset for a label-referencing instruction, typically a branch.
2072 int LinkAndGetByteOffsetTo(Label* label);
2074 // This is the same as LinkAndGetByteOffsetTo, but return an offset
2075 // suitable for fields that take instruction offsets.
2076 inline int LinkAndGetInstructionOffsetTo(Label* label);
2078 static const int kStartOfLabelLinkChain = 0;
2080 // Verify that a label's link chain is intact.
2081 void CheckLabelLinkChain(Label const * label);
2083 void RecordLiteral(int64_t imm, unsigned size);
2085 // Postpone the generation of the constant pool for the specified number of
2087 void BlockConstPoolFor(int instructions);
2089 // Set how far from current pc the next constant pool check will be.
2090 void SetNextConstPoolCheckIn(int instructions) {
2091 next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
2094 // Emit the instruction at pc_.
2095 void Emit(Instr instruction) {
2096 STATIC_ASSERT(sizeof(*pc_) == 1);
2097 STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
2098 DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
2100 memcpy(pc_, &instruction, sizeof(instruction));
2101 pc_ += sizeof(instruction);
2105 // Emit data inline in the instruction stream.
2106 void EmitData(void const * data, unsigned size) {
2107 DCHECK(sizeof(*pc_) == 1);
2108 DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
2110 // TODO(all): Somehow register we have some data here. Then we can
2111 // disassemble it correctly.
2112 memcpy(pc_, data, size);
2118 void CheckBufferSpace();
2121 // Pc offset of the next constant pool check.
2122 int next_constant_pool_check_;
2124 // Constant pool generation
2125 // Pools are emitted in the instruction stream. They are emitted when:
2126 // * the distance to the first use is above a pre-defined distance or
2127 // * the numbers of entries in the pool is above a pre-defined size or
2128 // * code generation is finished
2129 // If a pool needs to be emitted before code generation is finished a branch
2130 // over the emitted pool will be inserted.
2132 // Constants in the pool may be addresses of functions that gets relocated;
2133 // if so, a relocation info entry is associated to the constant pool entry.
2135 // Repeated checking whether the constant pool should be emitted is rather
2136 // expensive. By default we only check again once a number of instructions
2137 // has been generated. That also means that the sizing of the buffers is not
2138 // an exact science, and that we rely on some slop to not overrun buffers.
2139 static const int kCheckConstPoolInterval = 128;
2141 // Distance to first use after a which a pool will be emitted. Pool entries
2142 // are accessed with pc relative load therefore this cannot be more than
2143 // 1 * MB. Since constant pool emission checks are interval based this value
2144 // is an approximation.
2145 static const int kApproxMaxDistToConstPool = 64 * KB;
2147 // Number of pool entries after which a pool will be emitted. Since constant
2148 // pool emission checks are interval based this value is an approximation.
2149 static const int kApproxMaxPoolEntryCount = 512;
2151 // Emission of the constant pool may be blocked in some code sequences.
2152 int const_pool_blocked_nesting_; // Block emission if this is not zero.
2153 int no_const_pool_before_; // Block emission before this pc offset.
2155 // Emission of the veneer pools may be blocked in some code sequences.
2156 int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
2158 // Relocation info generation
2159 // Each relocation is encoded as a variable size value
2160 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
2161 RelocInfoWriter reloc_info_writer;
2163 // Relocation info records are also used during code generation as temporary
2164 // containers for constants and code target addresses until they are emitted
2165 // to the constant pool. These pending relocation info records are temporarily
2166 // stored in a separate buffer until a constant pool is emitted.
2167 // If every instruction in a long sequence is accessing the pool, we need one
2168 // pending relocation entry per instruction.
2170 // The pending constant pool.
2171 ConstPool constpool_;
2173 // Relocation for a type-recording IC has the AST id added to it. This
2174 // member variable is a way to pass the information from the call site to
2175 // the relocation info.
2176 TypeFeedbackId recorded_ast_id_;
2178 inline TypeFeedbackId RecordedAstId();
2179 inline void ClearRecordedAstId();
2182 // Record the AST id of the CallIC being compiled, so that it can be placed
2183 // in the relocation information.
2184 void SetRecordedAstId(TypeFeedbackId ast_id) {
2185 DCHECK(recorded_ast_id_.IsNone());
2186 recorded_ast_id_ = ast_id;
2190 // The relocation writer's position is at least kGap bytes below the end of
2191 // the generated instructions. This is so that multi-instruction sequences do
2192 // not have to check for overflow. The same is true for writes of large
2193 // relocation info entries, and debug strings encoded in the instruction
2195 static const int kGap = 128;
2198 class FarBranchInfo {
2200 FarBranchInfo(int offset, Label* label)
2201 : pc_offset_(offset), label_(label) {}
2202 // Offset of the branch in the code generation buffer.
2204 // The label branched to.
2209 // Information about unresolved (forward) branches.
2210 // The Assembler is only allowed to delete out-of-date information from here
2211 // after a label is bound. The MacroAssembler uses this information to
2212 // generate veneers.
2214 // The second member gives information about the unresolved branch. The first
2215 // member of the pair is the maximum offset that the branch can reach in the
2216 // buffer. The map is sorted according to this reachable offset, allowing to
2217 // easily check when veneers need to be emitted.
2218 // Note that the maximum reachable offset (first member of the pairs) should
2219 // always be positive but has the same type as the return value for
2220 // pc_offset() for convenience.
2221 std::multimap<int, FarBranchInfo> unresolved_branches_;
2223 // We generate a veneer for a branch if we reach within this distance of the
2224 // limit of the range.
2225 static const int kVeneerDistanceMargin = 1 * KB;
2226 // The factor of 2 is a finger in the air guess. With a default margin of
2227 // 1KB, that leaves us an addional 256 instructions to avoid generating a
2228 // protective branch.
2229 static const int kVeneerNoProtectionFactor = 2;
2230 static const int kVeneerDistanceCheckMargin =
2231 kVeneerNoProtectionFactor * kVeneerDistanceMargin;
2232 int unresolved_branches_first_limit() const {
2233 DCHECK(!unresolved_branches_.empty());
2234 return unresolved_branches_.begin()->first;
2236 // This is similar to next_constant_pool_check_ and helps reduce the overhead
2237 // of checking for veneer pools.
2238 // It is maintained to the closest unresolved branch limit minus the maximum
2239 // veneer margin (or kMaxInt if there are no unresolved branches).
2240 int next_veneer_pool_check_;
2243 // If a veneer is emitted for a branch instruction, that instruction must be
2244 // removed from the associated label's link chain so that the assembler does
2245 // not later attempt (likely unsuccessfully) to patch it to branch directly to
2247 void DeleteUnresolvedBranchInfoForLabel(Label* label);
2248 // This function deletes the information related to the label by traversing
2249 // the label chain, and for each PC-relative instruction in the chain checking
2250 // if pending unresolved information exists. Its complexity is proportional to
2251 // the length of the label chain.
2252 void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
2255 PositionsRecorder positions_recorder_;
2256 friend class PositionsRecorder;
2257 friend class EnsureSpace;
2258 friend class ConstPool;
2261 class PatchingAssembler : public Assembler {
2263 // Create an Assembler with a buffer starting at 'start'.
2264 // The buffer size is
2265 // size of instructions to patch + kGap
2266 // Where kGap is the distance from which the Assembler tries to grow the
2268 // If more or fewer instructions than expected are generated or if some
2269 // relocation information takes space in the buffer, the PatchingAssembler
2270 // will crash trying to grow the buffer.
2271 PatchingAssembler(Instruction* start, unsigned count)
2273 reinterpret_cast<byte*>(start),
2274 count * kInstructionSize + kGap) {
2278 PatchingAssembler(byte* start, unsigned count)
2279 : Assembler(NULL, start, count * kInstructionSize + kGap) {
2280 // Block constant pool emission.
2284 ~PatchingAssembler() {
2285 // Const pool should still be blocked.
2286 DCHECK(is_const_pool_blocked());
2288 // Verify we have generated the number of instruction we expected.
2289 DCHECK((pc_offset() + kGap) == buffer_size_);
2290 // Verify no relocation information has been emitted.
2291 DCHECK(IsConstPoolEmpty());
2292 // Flush the Instruction cache.
2293 size_t length = buffer_size_ - kGap;
2294 CpuFeatures::FlushICache(buffer_, length);
2297 // See definition of PatchAdrFar() for details.
2298 static const int kAdrFarPatchableNNops = 2;
2299 static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
2300 void PatchAdrFar(int64_t target_offset);
2304 class EnsureSpace BASE_EMBEDDED {
2306 explicit EnsureSpace(Assembler* assembler) {
2307 assembler->CheckBufferSpace();
2311 } } // namespace v8::internal
2313 #endif // V8_ARM64_ASSEMBLER_ARM64_H_