1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_ASSEMBLER_ARM64_H_
12 #include "src/arm64/instructions-arm64.h"
13 #include "src/assembler.h"
14 #include "src/globals.h"
15 #include "src/serialize.h"
16 #include "src/utils.h"
23 // -----------------------------------------------------------------------------
25 #define REGISTER_CODE_LIST(R) \
26 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
27 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
28 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
29 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
32 static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
35 // Some CPURegister methods can return Register and FPRegister types, so we
36 // need to declare them in advance.
43 // The kInvalid value is used to detect uninitialized static instances,
44 // which are always zero-initialized before any constructors are called.
51 static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
52 CPURegister r = {code, size, type};
56 unsigned code() const;
57 RegisterType type() const;
59 unsigned SizeInBits() const;
60 int SizeInBytes() const;
61 bool Is32Bits() const;
62 bool Is64Bits() const;
64 bool IsValidOrNone() const;
65 bool IsValidRegister() const;
66 bool IsValidFPRegister() const;
68 bool Is(const CPURegister& other) const;
69 bool Aliases(const CPURegister& other) const;
74 bool IsRegister() const;
75 bool IsFPRegister() const;
82 bool IsSameSizeAndType(const CPURegister& other) const;
85 bool is(const CPURegister& other) const { return Is(other); }
86 bool is_valid() const { return IsValid(); }
90 RegisterType reg_type;
94 struct Register : public CPURegister {
95 static Register Create(unsigned code, unsigned size) {
96 return Register(CPURegister::Create(code, size, CPURegister::kRegister));
102 reg_type = CPURegister::kNoRegister;
105 explicit Register(const CPURegister& r) {
106 reg_code = r.reg_code;
107 reg_size = r.reg_size;
108 reg_type = r.reg_type;
109 DCHECK(IsValidOrNone());
112 Register(const Register& r) { // NOLINT(runtime/explicit)
113 reg_code = r.reg_code;
114 reg_size = r.reg_size;
115 reg_type = r.reg_type;
116 DCHECK(IsValidOrNone());
119 bool IsValid() const {
120 DCHECK(IsRegister() || IsNone());
121 return IsValidRegister();
124 static Register XRegFromCode(unsigned code);
125 static Register WRegFromCode(unsigned code);
127 // Start of V8 compatibility section ---------------------
128 // These memebers are necessary for compilation.
129 // A few of them may be unused for now.
131 static const int kNumRegisters = kNumberOfRegisters;
132 static int NumRegisters() { return kNumRegisters; }
134 // We allow crankshaft to use the following registers:
137 // - x27 (also context)
139 // TODO(all): Register x25 is currently free and could be available for
140 // crankshaft, but we don't use it as we might use it as a per function
141 // literal pool pointer in the future.
143 // TODO(all): Consider storing cp in x25 to have only two ranges.
144 // We split allocatable registers in three ranges called
148 static const unsigned kAllocatableLowRangeBegin = 0;
149 static const unsigned kAllocatableLowRangeEnd = 15;
150 static const unsigned kAllocatableHighRangeBegin = 18;
151 static const unsigned kAllocatableHighRangeEnd = 24;
152 static const unsigned kAllocatableContext = 27;
154 // Gap between low and high ranges.
155 static const int kAllocatableRangeGapSize =
156 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
158 static const int kMaxNumAllocatableRegisters =
159 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
160 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
161 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
163 // Return true if the register is one that crankshaft can allocate.
164 bool IsAllocatable() const {
165 return ((reg_code == kAllocatableContext) ||
166 (reg_code <= kAllocatableLowRangeEnd) ||
167 ((reg_code >= kAllocatableHighRangeBegin) &&
168 (reg_code <= kAllocatableHighRangeEnd)));
171 static Register FromAllocationIndex(unsigned index) {
172 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
173 // cp is the last allocatable register.
174 if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
175 return from_code(kAllocatableContext);
178 // Handle low and high ranges.
179 return (index <= kAllocatableLowRangeEnd)
181 : from_code(index + kAllocatableRangeGapSize);
184 static const char* AllocationIndexToString(int index) {
185 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
186 DCHECK((kAllocatableLowRangeBegin == 0) &&
187 (kAllocatableLowRangeEnd == 15) &&
188 (kAllocatableHighRangeBegin == 18) &&
189 (kAllocatableHighRangeEnd == 24) &&
190 (kAllocatableContext == 27));
191 const char* const names[] = {
192 "x0", "x1", "x2", "x3", "x4",
193 "x5", "x6", "x7", "x8", "x9",
194 "x10", "x11", "x12", "x13", "x14",
195 "x15", "x18", "x19", "x20", "x21",
196 "x22", "x23", "x24", "x27",
201 static int ToAllocationIndex(Register reg) {
202 DCHECK(reg.IsAllocatable());
203 unsigned code = reg.code();
204 if (code == kAllocatableContext) {
205 return NumAllocatableRegisters() - 1;
208 return (code <= kAllocatableLowRangeEnd)
210 : code - kAllocatableRangeGapSize;
213 static Register from_code(int code) {
214 // Always return an X register.
215 return Register::Create(code, kXRegSizeInBits);
218 // End of V8 compatibility section -----------------------
222 struct FPRegister : public CPURegister {
223 static FPRegister Create(unsigned code, unsigned size) {
225 CPURegister::Create(code, size, CPURegister::kFPRegister));
231 reg_type = CPURegister::kNoRegister;
234 explicit FPRegister(const CPURegister& r) {
235 reg_code = r.reg_code;
236 reg_size = r.reg_size;
237 reg_type = r.reg_type;
238 DCHECK(IsValidOrNone());
241 FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
242 reg_code = r.reg_code;
243 reg_size = r.reg_size;
244 reg_type = r.reg_type;
245 DCHECK(IsValidOrNone());
248 bool IsValid() const {
249 DCHECK(IsFPRegister() || IsNone());
250 return IsValidFPRegister();
253 static FPRegister SRegFromCode(unsigned code);
254 static FPRegister DRegFromCode(unsigned code);
256 // Start of V8 compatibility section ---------------------
257 static const int kMaxNumRegisters = kNumberOfFPRegisters;
259 // Crankshaft can use all the FP registers except:
260 // - d15 which is used to keep the 0 double value
261 // - d30 which is used in crankshaft as a double scratch register
262 // - d31 which is used in the MacroAssembler as a double scratch register
263 static const unsigned kAllocatableLowRangeBegin = 0;
264 static const unsigned kAllocatableLowRangeEnd = 14;
265 static const unsigned kAllocatableHighRangeBegin = 16;
266 static const unsigned kAllocatableHighRangeEnd = 28;
268 static const RegList kAllocatableFPRegisters = 0x1fff7fff;
270 // Gap between low and high ranges.
271 static const int kAllocatableRangeGapSize =
272 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
274 static const int kMaxNumAllocatableRegisters =
275 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
276 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
277 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
279 // Return true if the register is one that crankshaft can allocate.
280 bool IsAllocatable() const {
281 return (Bit() & kAllocatableFPRegisters) != 0;
284 static FPRegister FromAllocationIndex(unsigned int index) {
285 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
287 return (index <= kAllocatableLowRangeEnd)
289 : from_code(index + kAllocatableRangeGapSize);
292 static const char* AllocationIndexToString(int index) {
293 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
294 DCHECK((kAllocatableLowRangeBegin == 0) &&
295 (kAllocatableLowRangeEnd == 14) &&
296 (kAllocatableHighRangeBegin == 16) &&
297 (kAllocatableHighRangeEnd == 28));
298 const char* const names[] = {
299 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
300 "d8", "d9", "d10", "d11", "d12", "d13", "d14",
301 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
302 "d24", "d25", "d26", "d27", "d28"
307 static int ToAllocationIndex(FPRegister reg) {
308 DCHECK(reg.IsAllocatable());
309 unsigned code = reg.code();
311 return (code <= kAllocatableLowRangeEnd)
313 : code - kAllocatableRangeGapSize;
316 static FPRegister from_code(int code) {
317 // Always return a D register.
318 return FPRegister::Create(code, kDRegSizeInBits);
320 // End of V8 compatibility section -----------------------
324 STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
325 STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
328 #if defined(ARM64_DEFINE_REG_STATICS)
329 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
330 const CPURegister init_##register_class##_##name = {code, size, type}; \
331 const register_class& name = *reinterpret_cast<const register_class*>( \
332 &init_##register_class##_##name)
333 #define ALIAS_REGISTER(register_class, alias, name) \
334 const register_class& alias = *reinterpret_cast<const register_class*>( \
335 &init_##register_class##_##name)
337 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
338 extern const register_class& name
339 #define ALIAS_REGISTER(register_class, alias, name) \
340 extern const register_class& alias
341 #endif // defined(ARM64_DEFINE_REG_STATICS)
343 // No*Reg is used to indicate an unused argument, or an error case. Note that
344 // these all compare equal (using the Is() method). The Register and FPRegister
345 // variants are provided for convenience.
346 INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
347 INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
348 INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
351 INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
353 #define DEFINE_REGISTERS(N) \
354 INITIALIZE_REGISTER(Register, w##N, N, \
355 kWRegSizeInBits, CPURegister::kRegister); \
356 INITIALIZE_REGISTER(Register, x##N, N, \
357 kXRegSizeInBits, CPURegister::kRegister);
358 REGISTER_CODE_LIST(DEFINE_REGISTERS)
359 #undef DEFINE_REGISTERS
361 INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
362 CPURegister::kRegister);
363 INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
364 CPURegister::kRegister);
366 #define DEFINE_FPREGISTERS(N) \
367 INITIALIZE_REGISTER(FPRegister, s##N, N, \
368 kSRegSizeInBits, CPURegister::kFPRegister); \
369 INITIALIZE_REGISTER(FPRegister, d##N, N, \
370 kDRegSizeInBits, CPURegister::kFPRegister);
371 REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
372 #undef DEFINE_FPREGISTERS
374 #undef INITIALIZE_REGISTER
376 // Registers aliases.
377 ALIAS_REGISTER(Register, ip0, x16);
378 ALIAS_REGISTER(Register, ip1, x17);
379 ALIAS_REGISTER(Register, wip0, w16);
380 ALIAS_REGISTER(Register, wip1, w17);
382 ALIAS_REGISTER(Register, root, x26);
383 ALIAS_REGISTER(Register, rr, x26);
384 // Context pointer register.
385 ALIAS_REGISTER(Register, cp, x27);
386 // We use a register as a JS stack pointer to overcome the restriction on the
387 // architectural SP alignment.
388 // We chose x28 because it is contiguous with the other specific purpose
390 STATIC_ASSERT(kJSSPCode == 28);
391 ALIAS_REGISTER(Register, jssp, x28);
392 ALIAS_REGISTER(Register, wjssp, w28);
393 ALIAS_REGISTER(Register, fp, x29);
394 ALIAS_REGISTER(Register, lr, x30);
395 ALIAS_REGISTER(Register, xzr, x31);
396 ALIAS_REGISTER(Register, wzr, w31);
398 // Keeps the 0 double value.
399 ALIAS_REGISTER(FPRegister, fp_zero, d15);
400 // Crankshaft double scratch register.
401 ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
402 // MacroAssembler double scratch registers.
403 ALIAS_REGISTER(FPRegister, fp_scratch, d30);
404 ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
405 ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
407 #undef ALIAS_REGISTER
410 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
411 Register reg2 = NoReg,
412 Register reg3 = NoReg,
413 Register reg4 = NoReg);
416 // AreAliased returns true if any of the named registers overlap. Arguments set
417 // to NoReg are ignored. The system stack pointer may be specified.
418 bool AreAliased(const CPURegister& reg1,
419 const CPURegister& reg2,
420 const CPURegister& reg3 = NoReg,
421 const CPURegister& reg4 = NoReg,
422 const CPURegister& reg5 = NoReg,
423 const CPURegister& reg6 = NoReg,
424 const CPURegister& reg7 = NoReg,
425 const CPURegister& reg8 = NoReg);
427 // AreSameSizeAndType returns true if all of the specified registers have the
428 // same size, and are of the same type. The system stack pointer may be
429 // specified. Arguments set to NoReg are ignored, as are any subsequent
430 // arguments. At least one argument (reg1) must be valid (not NoCPUReg).
431 bool AreSameSizeAndType(const CPURegister& reg1,
432 const CPURegister& reg2,
433 const CPURegister& reg3 = NoCPUReg,
434 const CPURegister& reg4 = NoCPUReg,
435 const CPURegister& reg5 = NoCPUReg,
436 const CPURegister& reg6 = NoCPUReg,
437 const CPURegister& reg7 = NoCPUReg,
438 const CPURegister& reg8 = NoCPUReg);
441 typedef FPRegister DoubleRegister;
444 // -----------------------------------------------------------------------------
445 // Lists of registers.
448 explicit CPURegList(CPURegister reg1,
449 CPURegister reg2 = NoCPUReg,
450 CPURegister reg3 = NoCPUReg,
451 CPURegister reg4 = NoCPUReg)
452 : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
453 size_(reg1.SizeInBits()), type_(reg1.type()) {
454 DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
458 CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
459 : list_(list), size_(size), type_(type) {
463 CPURegList(CPURegister::RegisterType type, unsigned size,
464 unsigned first_reg, unsigned last_reg)
465 : size_(size), type_(type) {
466 DCHECK(((type == CPURegister::kRegister) &&
467 (last_reg < kNumberOfRegisters)) ||
468 ((type == CPURegister::kFPRegister) &&
469 (last_reg < kNumberOfFPRegisters)));
470 DCHECK(last_reg >= first_reg);
471 list_ = (1UL << (last_reg + 1)) - 1;
472 list_ &= ~((1UL << first_reg) - 1);
476 CPURegister::RegisterType type() const {
481 RegList list() const {
486 inline void set_list(RegList new_list) {
491 // Combine another CPURegList into this one. Registers that already exist in
492 // this list are left unchanged. The type and size of the registers in the
493 // 'other' list must match those in this list.
494 void Combine(const CPURegList& other);
496 // Remove every register in the other CPURegList from this one. Registers that
497 // do not exist in this list are ignored. The type of the registers in the
498 // 'other' list must match those in this list.
499 void Remove(const CPURegList& other);
501 // Variants of Combine and Remove which take CPURegisters.
502 void Combine(const CPURegister& other);
503 void Remove(const CPURegister& other1,
504 const CPURegister& other2 = NoCPUReg,
505 const CPURegister& other3 = NoCPUReg,
506 const CPURegister& other4 = NoCPUReg);
508 // Variants of Combine and Remove which take a single register by its code;
509 // the type and size of the register is inferred from this list.
510 void Combine(int code);
511 void Remove(int code);
513 // Remove all callee-saved registers from the list. This can be useful when
514 // preparing registers for an AAPCS64 function call, for example.
515 void RemoveCalleeSaved();
517 CPURegister PopLowestIndex();
518 CPURegister PopHighestIndex();
520 // AAPCS64 callee-saved registers.
521 static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
522 static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
524 // AAPCS64 caller-saved registers. Note that this includes lr.
525 static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
526 static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
528 // Registers saved as safepoints.
529 static CPURegList GetSafepointSavedRegisters();
531 bool IsEmpty() const {
536 bool IncludesAliasOf(const CPURegister& other1,
537 const CPURegister& other2 = NoCPUReg,
538 const CPURegister& other3 = NoCPUReg,
539 const CPURegister& other4 = NoCPUReg) const {
542 if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
543 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
544 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
545 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
546 return (list_ & list) != 0;
551 return CountSetBits(list_, kRegListSizeInBits);
554 unsigned RegisterSizeInBits() const {
559 unsigned RegisterSizeInBytes() const {
560 int size_in_bits = RegisterSizeInBits();
561 DCHECK((size_in_bits % kBitsPerByte) == 0);
562 return size_in_bits / kBitsPerByte;
565 unsigned TotalSizeInBytes() const {
567 return RegisterSizeInBytes() * Count();
573 CPURegister::RegisterType type_;
575 bool IsValid() const {
576 const RegList kValidRegisters = 0x8000000ffffffff;
577 const RegList kValidFPRegisters = 0x0000000ffffffff;
579 case CPURegister::kRegister:
580 return (list_ & kValidRegisters) == list_;
581 case CPURegister::kFPRegister:
582 return (list_ & kValidFPRegisters) == list_;
583 case CPURegister::kNoRegister:
593 // AAPCS64 callee-saved registers.
594 #define kCalleeSaved CPURegList::GetCalleeSaved()
595 #define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
598 // AAPCS64 caller-saved registers. Note that this includes lr.
599 #define kCallerSaved CPURegList::GetCallerSaved()
600 #define kCallerSavedFP CPURegList::GetCallerSavedFP()
602 // -----------------------------------------------------------------------------
607 inline explicit Immediate(Handle<T> handle);
609 // This is allowed to be an implicit constructor because Immediate is
610 // a wrapper class that doesn't normally perform any type conversion.
612 inline Immediate(T value); // NOLINT(runtime/explicit)
615 inline Immediate(T value, RelocInfo::Mode rmode);
617 int64_t value() const { return value_; }
618 RelocInfo::Mode rmode() const { return rmode_; }
621 void InitializeHandle(Handle<Object> value);
624 RelocInfo::Mode rmode_;
628 // -----------------------------------------------------------------------------
630 const int kSmiShift = kSmiTagSize + kSmiShiftSize;
631 const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
633 // Represents an operand in a machine instruction.
635 // TODO(all): If necessary, study more in details which methods
636 // TODO(all): should be inlined or not.
638 // rm, {<shift> {#<shift_amount>}}
639 // where <shift> is one of {LSL, LSR, ASR, ROR}.
640 // <shift_amount> is uint6_t.
641 // This is allowed to be an implicit constructor because Operand is
642 // a wrapper class that doesn't normally perform any type conversion.
643 inline Operand(Register reg,
645 unsigned shift_amount = 0); // NOLINT(runtime/explicit)
647 // rm, <extend> {#<shift_amount>}
648 // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
649 // <shift_amount> is uint2_t.
650 inline Operand(Register reg,
652 unsigned shift_amount = 0);
655 inline explicit Operand(Handle<T> handle);
657 // Implicit constructor for all int types, ExternalReference, and Smi.
659 inline Operand(T t); // NOLINT(runtime/explicit)
661 // Implicit constructor for int types.
663 inline Operand(T t, RelocInfo::Mode rmode);
665 inline bool IsImmediate() const;
666 inline bool IsShiftedRegister() const;
667 inline bool IsExtendedRegister() const;
668 inline bool IsZero() const;
670 // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
671 // which helps in the encoding of instructions that use the stack pointer.
672 inline Operand ToExtendedRegister() const;
674 inline Immediate immediate() const;
675 inline int64_t ImmediateValue() const;
676 inline Register reg() const;
677 inline Shift shift() const;
678 inline Extend extend() const;
679 inline unsigned shift_amount() const;
681 // Relocation information.
682 bool NeedsRelocation(const Assembler* assembler) const;
685 inline static Operand UntagSmi(Register smi);
686 inline static Operand UntagSmiAndScale(Register smi, int scale);
689 Immediate immediate_;
693 unsigned shift_amount_;
697 // MemOperand represents a memory operand in a load or store instruction.
701 inline explicit MemOperand(Register base,
703 AddrMode addrmode = Offset);
704 inline explicit MemOperand(Register base,
707 unsigned shift_amount = 0);
708 inline explicit MemOperand(Register base,
711 unsigned shift_amount = 0);
712 inline explicit MemOperand(Register base,
713 const Operand& offset,
714 AddrMode addrmode = Offset);
716 const Register& base() const { return base_; }
717 const Register& regoffset() const { return regoffset_; }
718 int64_t offset() const { return offset_; }
719 AddrMode addrmode() const { return addrmode_; }
720 Shift shift() const { return shift_; }
721 Extend extend() const { return extend_; }
722 unsigned shift_amount() const { return shift_amount_; }
723 inline bool IsImmediateOffset() const;
724 inline bool IsRegisterOffset() const;
725 inline bool IsPreIndex() const;
726 inline bool IsPostIndex() const;
728 // For offset modes, return the offset as an Operand. This helper cannot
729 // handle indexed modes.
730 inline Operand OffsetAsOperand() const;
733 kNotPair, // Can't use a pair instruction.
734 kPairAB, // Can use a pair instruction (operandA has lower address).
735 kPairBA // Can use a pair instruction (operandB has lower address).
737 // Check if two MemOperand are consistent for stp/ldp use.
738 static PairResult AreConsistentForPair(const MemOperand& operandA,
739 const MemOperand& operandB,
740 int access_size_log2 = kXRegSizeLog2);
749 unsigned shift_amount_;
755 explicit ConstPool(Assembler* assm)
758 shared_entries_count(0) {}
759 void RecordEntry(intptr_t data, RelocInfo::Mode mode);
760 int EntryCount() const {
761 return shared_entries_count + unique_entries_.size();
763 bool IsEmpty() const {
764 return shared_entries_.empty() && unique_entries_.empty();
766 // Distance in bytes between the current pc and the first instruction
767 // using the pool. If there are no pending entries return kMaxInt.
768 int DistanceToFirstUse();
769 // Offset after which instructions using the pool will be out of range.
771 // Maximum size the constant pool can be with current entries. It always
772 // includes alignment padding and branch over.
774 // Size in bytes of the literal pool *if* it is emitted at the current
775 // pc. The size will include the branch over the pool if it was requested.
776 int SizeIfEmittedAtCurrentPc(bool require_jump);
777 // Emit the literal pool at the current pc with a branch over the pool if
779 void Emit(bool require_jump);
780 // Discard any pending pool entries.
784 bool CanBeShared(RelocInfo::Mode mode);
790 // Keep track of the first instruction requiring a constant pool entry
791 // since the previous constant pool was emitted.
793 // values, pc offset(s) of entries which can be shared.
794 std::multimap<uint64_t, int> shared_entries_;
795 // Number of distinct literal in shared entries.
796 int shared_entries_count;
797 // values, pc offset of entries which cannot be shared.
798 std::vector<std::pair<uint64_t, int> > unique_entries_;
802 // -----------------------------------------------------------------------------
805 class Assembler : public AssemblerBase {
807 // Create an assembler. Instructions and relocation information are emitted
808 // into a buffer, with the instructions starting from the beginning and the
809 // relocation information starting from the end of the buffer. See CodeDesc
810 // for a detailed comment on the layout (globals.h).
812 // If the provided buffer is NULL, the assembler allocates and grows its own
813 // buffer, and buffer_size determines the initial buffer size. The buffer is
814 // owned by the assembler and deallocated upon destruction of the assembler.
816 // If the provided buffer is not NULL, the assembler uses the provided buffer
817 // for code generation and assumes its size to be buffer_size. If the buffer
818 // is too small, a fatal error occurs. No deallocation of the buffer is done
819 // upon destruction of the assembler.
820 Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
822 virtual ~Assembler();
824 virtual void AbortedCodeGeneration() {
828 // System functions ---------------------------------------------------------
829 // Start generating code from the beginning of the buffer, discarding any code
830 // and data that has already been emitted into the buffer.
832 // In order to avoid any accidental transfer of state, Reset DCHECKs that the
833 // constant pool is not blocked.
836 // GetCode emits any pending (non-emitted) code and fills the descriptor
837 // desc. GetCode() is idempotent; it returns the same result if no other
838 // Assembler functions are invoked in between GetCode() calls.
840 // The descriptor (desc) can be NULL. In that case, the code is finalized as
841 // usual, but the descriptor is not populated.
842 void GetCode(CodeDesc* desc);
844 // Insert the smallest number of nop instructions
845 // possible to align the pc offset to a multiple
846 // of m. m must be a power of 2 (>= 4).
849 inline void Unreachable();
851 // Label --------------------------------------------------------------------
852 // Bind a label to the current pc. Note that labels can only be bound once,
853 // and if labels are linked to other instructions, they _must_ be bound
854 // before they go out of scope.
855 void bind(Label* label);
858 // RelocInfo and pools ------------------------------------------------------
860 // Record relocation information for current pc_.
861 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
863 // Return the address in the constant pool of the code target address used by
864 // the branch/call instruction at pc.
865 inline static Address target_pointer_address_at(Address pc);
867 // Read/Modify the code target address in the branch/call instruction at pc.
868 inline static Address target_address_at(Address pc,
869 ConstantPoolArray* constant_pool);
870 inline static void set_target_address_at(Address pc,
871 ConstantPoolArray* constant_pool,
873 ICacheFlushMode icache_flush_mode =
874 FLUSH_ICACHE_IF_NEEDED);
875 static inline Address target_address_at(Address pc, Code* code);
876 static inline void set_target_address_at(Address pc,
879 ICacheFlushMode icache_flush_mode =
880 FLUSH_ICACHE_IF_NEEDED);
882 // Return the code target address at a call site from the return address of
883 // that call in the instruction stream.
884 inline static Address target_address_from_return_address(Address pc);
886 // Given the address of the beginning of a call, return the address in the
887 // instruction stream that call will return from.
888 inline static Address return_address_from_call_start(Address pc);
890 // Return the code target address of the patch debug break slot
891 inline static Address break_address_from_return_address(Address pc);
893 // This sets the branch destination (which is in the constant pool on ARM).
894 // This is for calls and branches within generated code.
895 inline static void deserialization_set_special_target_at(
896 Address constant_pool_entry, Code* code, Address target);
898 // All addresses in the constant pool are the same size as pointers.
899 static const int kSpecialTargetSize = kPointerSize;
901 // The sizes of the call sequences emitted by MacroAssembler::Call.
902 // Wherever possible, use MacroAssembler::CallSize instead of these constants,
903 // as it will choose the correct value for a given relocation mode.
905 // Without relocation:
906 // movz temp, #(target & 0x000000000000ffff)
907 // movk temp, #(target & 0x00000000ffff0000)
908 // movk temp, #(target & 0x0000ffff00000000)
914 static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
915 static const int kCallSizeWithRelocation = 2 * kInstructionSize;
917 // Size of the generated code in bytes
918 uint64_t SizeOfGeneratedCode() const {
919 DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
920 return pc_ - buffer_;
923 // Return the code size generated from label to the current position.
924 uint64_t SizeOfCodeGeneratedSince(const Label* label) {
925 DCHECK(label->is_bound());
926 DCHECK(pc_offset() >= label->pos());
927 DCHECK(pc_offset() < buffer_size_);
928 return pc_offset() - label->pos();
931 // Check the size of the code generated since the given label. This function
932 // is used primarily to work around comparisons between signed and unsigned
933 // quantities, since V8 uses both.
934 // TODO(jbramley): Work out what sign to use for these things and if possible,
935 // change things to be consistent.
936 void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
938 DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
941 // Return the number of instructions generated from label to the
943 int InstructionsGeneratedSince(const Label* label) {
944 return SizeOfCodeGeneratedSince(label) / kInstructionSize;
947 // Number of instructions generated for the return sequence in
948 // FullCodeGenerator::EmitReturnSequence.
949 static const int kJSRetSequenceInstructions = 7;
950 // Distance between start of patched return sequence and the emitted address
952 static const int kPatchReturnSequenceAddressOffset = 0;
953 static const int kPatchDebugBreakSlotAddressOffset = 0;
955 // Number of instructions necessary to be able to later patch it to a call.
956 // See DebugCodegen::GenerateSlot() and
957 // BreakLocationIterator::SetDebugBreakAtSlot().
958 static const int kDebugBreakSlotInstructions = 4;
959 static const int kDebugBreakSlotLength =
960 kDebugBreakSlotInstructions * kInstructionSize;
962 static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
964 // Prevent contant pool emission until EndBlockConstPool is called.
965 // Call to this function can be nested but must be followed by an equal
966 // number of call to EndBlockConstpool.
967 void StartBlockConstPool();
969 // Resume constant pool emission. Need to be called as many time as
970 // StartBlockConstPool to have an effect.
971 void EndBlockConstPool();
973 bool is_const_pool_blocked() const;
974 static bool IsConstantPoolAt(Instruction* instr);
975 static int ConstantPoolSizeAt(Instruction* instr);
976 // See Assembler::CheckConstPool for more info.
977 void EmitPoolGuard();
979 // Prevent veneer pool emission until EndBlockVeneerPool is called.
980 // Call to this function can be nested but must be followed by an equal
981 // number of call to EndBlockConstpool.
982 void StartBlockVeneerPool();
984 // Resume constant pool emission. Need to be called as many time as
985 // StartBlockVeneerPool to have an effect.
986 void EndBlockVeneerPool();
988 bool is_veneer_pool_blocked() const {
989 return veneer_pool_blocked_nesting_ > 0;
992 // Block/resume emission of constant pools and veneer pools.
993 void StartBlockPools() {
994 StartBlockConstPool();
995 StartBlockVeneerPool();
997 void EndBlockPools() {
999 EndBlockVeneerPool();
1002 // Debugging ----------------------------------------------------------------
1003 PositionsRecorder* positions_recorder() { return &positions_recorder_; }
1004 void RecordComment(const char* msg);
1005 int buffer_space() const;
1007 // Mark address of the ExitJSFrame code.
1008 void RecordJSReturn();
1010 // Mark address of a debug break slot.
1011 void RecordDebugBreakSlot();
1013 // Record the emission of a constant pool.
1015 // The emission of constant and veneer pools depends on the size of the code
1016 // generated and the number of RelocInfo recorded.
1017 // The Debug mechanism needs to map code offsets between two versions of a
1018 // function, compiled with and without debugger support (see for example
1019 // Debug::PrepareForBreakPoints()).
1020 // Compiling functions with debugger support generates additional code
1021 // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
1022 // and cause the version of the code with debugger support to have pools
1023 // generated in different places.
1024 // Recording the position and size of emitted pools allows to correctly
1025 // compute the offset mappings between the different versions of a function in
1028 // The parameter indicates the size of the pool (in bytes), including
1029 // the marker and branch over the data.
1030 void RecordConstPool(int size);
1033 // Instruction set functions ------------------------------------------------
1035 // Branch / Jump instructions.
1036 // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
1037 // Branch to register.
1038 void br(const Register& xn);
1040 // Branch-link to register.
1041 void blr(const Register& xn);
1043 // Branch to register with return hint.
1044 void ret(const Register& xn = lr);
1046 // Unconditional branch to label.
1047 void b(Label* label);
1049 // Conditional branch to label.
1050 void b(Label* label, Condition cond);
1052 // Unconditional branch to PC offset.
1055 // Conditional branch to PC offset.
1056 void b(int imm19, Condition cond);
1058 // Branch-link to label / pc offset.
1059 void bl(Label* label);
1062 // Compare and branch to label / pc offset if zero.
1063 void cbz(const Register& rt, Label* label);
1064 void cbz(const Register& rt, int imm19);
1066 // Compare and branch to label / pc offset if not zero.
1067 void cbnz(const Register& rt, Label* label);
1068 void cbnz(const Register& rt, int imm19);
1070 // Test bit and branch to label / pc offset if zero.
1071 void tbz(const Register& rt, unsigned bit_pos, Label* label);
1072 void tbz(const Register& rt, unsigned bit_pos, int imm14);
1074 // Test bit and branch to label / pc offset if not zero.
1075 void tbnz(const Register& rt, unsigned bit_pos, Label* label);
1076 void tbnz(const Register& rt, unsigned bit_pos, int imm14);
1078 // Address calculation instructions.
1079 // Calculate a PC-relative address. Unlike for branches the offset in adr is
1080 // unscaled (i.e. the result can be unaligned).
1081 void adr(const Register& rd, Label* label);
1082 void adr(const Register& rd, int imm21);
1084 // Data Processing instructions.
1086 void add(const Register& rd,
1088 const Operand& operand);
1090 // Add and update status flags.
1091 void adds(const Register& rd,
1093 const Operand& operand);
1095 // Compare negative.
1096 void cmn(const Register& rn, const Operand& operand);
1099 void sub(const Register& rd,
1101 const Operand& operand);
1103 // Subtract and update status flags.
1104 void subs(const Register& rd,
1106 const Operand& operand);
1109 void cmp(const Register& rn, const Operand& operand);
1112 void neg(const Register& rd,
1113 const Operand& operand);
1115 // Negate and update status flags.
1116 void negs(const Register& rd,
1117 const Operand& operand);
1119 // Add with carry bit.
1120 void adc(const Register& rd,
1122 const Operand& operand);
1124 // Add with carry bit and update status flags.
1125 void adcs(const Register& rd,
1127 const Operand& operand);
1129 // Subtract with carry bit.
1130 void sbc(const Register& rd,
1132 const Operand& operand);
1134 // Subtract with carry bit and update status flags.
1135 void sbcs(const Register& rd,
1137 const Operand& operand);
1139 // Negate with carry bit.
1140 void ngc(const Register& rd,
1141 const Operand& operand);
1143 // Negate with carry bit and update status flags.
1144 void ngcs(const Register& rd,
1145 const Operand& operand);
1147 // Logical instructions.
1148 // Bitwise and (A & B).
1149 void and_(const Register& rd,
1151 const Operand& operand);
1153 // Bitwise and (A & B) and update status flags.
1154 void ands(const Register& rd,
1156 const Operand& operand);
1158 // Bit test, and set flags.
1159 void tst(const Register& rn, const Operand& operand);
1161 // Bit clear (A & ~B).
1162 void bic(const Register& rd,
1164 const Operand& operand);
1166 // Bit clear (A & ~B) and update status flags.
1167 void bics(const Register& rd,
1169 const Operand& operand);
1171 // Bitwise or (A | B).
1172 void orr(const Register& rd, const Register& rn, const Operand& operand);
1174 // Bitwise nor (A | ~B).
1175 void orn(const Register& rd, const Register& rn, const Operand& operand);
1177 // Bitwise eor/xor (A ^ B).
1178 void eor(const Register& rd, const Register& rn, const Operand& operand);
1180 // Bitwise enor/xnor (A ^ ~B).
1181 void eon(const Register& rd, const Register& rn, const Operand& operand);
1183 // Logical shift left variable.
1184 void lslv(const Register& rd, const Register& rn, const Register& rm);
1186 // Logical shift right variable.
1187 void lsrv(const Register& rd, const Register& rn, const Register& rm);
1189 // Arithmetic shift right variable.
1190 void asrv(const Register& rd, const Register& rn, const Register& rm);
1192 // Rotate right variable.
1193 void rorv(const Register& rd, const Register& rn, const Register& rm);
1195 // Bitfield instructions.
1197 void bfm(const Register& rd,
1202 // Signed bitfield move.
1203 void sbfm(const Register& rd,
1208 // Unsigned bitfield move.
1209 void ubfm(const Register& rd,
1216 void bfi(const Register& rd,
1221 DCHECK(lsb + width <= rn.SizeInBits());
1222 bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1225 // Bitfield extract and insert low.
1226 void bfxil(const Register& rd,
1231 DCHECK(lsb + width <= rn.SizeInBits());
1232 bfm(rd, rn, lsb, lsb + width - 1);
1236 // Arithmetic shift right.
1237 void asr(const Register& rd, const Register& rn, unsigned shift) {
1238 DCHECK(shift < rd.SizeInBits());
1239 sbfm(rd, rn, shift, rd.SizeInBits() - 1);
1242 // Signed bitfield insert in zero.
1243 void sbfiz(const Register& rd,
1248 DCHECK(lsb + width <= rn.SizeInBits());
1249 sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1252 // Signed bitfield extract.
1253 void sbfx(const Register& rd,
1258 DCHECK(lsb + width <= rn.SizeInBits());
1259 sbfm(rd, rn, lsb, lsb + width - 1);
1262 // Signed extend byte.
1263 void sxtb(const Register& rd, const Register& rn) {
1267 // Signed extend halfword.
1268 void sxth(const Register& rd, const Register& rn) {
1269 sbfm(rd, rn, 0, 15);
1272 // Signed extend word.
1273 void sxtw(const Register& rd, const Register& rn) {
1274 sbfm(rd, rn, 0, 31);
1278 // Logical shift left.
1279 void lsl(const Register& rd, const Register& rn, unsigned shift) {
1280 unsigned reg_size = rd.SizeInBits();
1281 DCHECK(shift < reg_size);
1282 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
1285 // Logical shift right.
1286 void lsr(const Register& rd, const Register& rn, unsigned shift) {
1287 DCHECK(shift < rd.SizeInBits());
1288 ubfm(rd, rn, shift, rd.SizeInBits() - 1);
1291 // Unsigned bitfield insert in zero.
1292 void ubfiz(const Register& rd,
1297 DCHECK(lsb + width <= rn.SizeInBits());
1298 ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1301 // Unsigned bitfield extract.
1302 void ubfx(const Register& rd,
1307 DCHECK(lsb + width <= rn.SizeInBits());
1308 ubfm(rd, rn, lsb, lsb + width - 1);
1311 // Unsigned extend byte.
1312 void uxtb(const Register& rd, const Register& rn) {
1316 // Unsigned extend halfword.
1317 void uxth(const Register& rd, const Register& rn) {
1318 ubfm(rd, rn, 0, 15);
1321 // Unsigned extend word.
1322 void uxtw(const Register& rd, const Register& rn) {
1323 ubfm(rd, rn, 0, 31);
1327 void extr(const Register& rd,
1332 // Conditional select: rd = cond ? rn : rm.
1333 void csel(const Register& rd,
1338 // Conditional select increment: rd = cond ? rn : rm + 1.
1339 void csinc(const Register& rd,
1344 // Conditional select inversion: rd = cond ? rn : ~rm.
1345 void csinv(const Register& rd,
1350 // Conditional select negation: rd = cond ? rn : -rm.
1351 void csneg(const Register& rd,
1356 // Conditional set: rd = cond ? 1 : 0.
1357 void cset(const Register& rd, Condition cond);
1359 // Conditional set minus: rd = cond ? -1 : 0.
1360 void csetm(const Register& rd, Condition cond);
1362 // Conditional increment: rd = cond ? rn + 1 : rn.
1363 void cinc(const Register& rd, const Register& rn, Condition cond);
1365 // Conditional invert: rd = cond ? ~rn : rn.
1366 void cinv(const Register& rd, const Register& rn, Condition cond);
1368 // Conditional negate: rd = cond ? -rn : rn.
1369 void cneg(const Register& rd, const Register& rn, Condition cond);
1372 void ror(const Register& rd, const Register& rs, unsigned shift) {
1373 extr(rd, rs, rs, shift);
1376 // Conditional comparison.
1377 // Conditional compare negative.
1378 void ccmn(const Register& rn,
1379 const Operand& operand,
1383 // Conditional compare.
1384 void ccmp(const Register& rn,
1385 const Operand& operand,
1390 // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
1391 void mul(const Register& rd, const Register& rn, const Register& rm);
1393 // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
1394 void madd(const Register& rd,
1397 const Register& ra);
1399 // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
1400 void mneg(const Register& rd, const Register& rn, const Register& rm);
1402 // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
1403 void msub(const Register& rd,
1406 const Register& ra);
1408 // 32 x 32 -> 64-bit multiply.
1409 void smull(const Register& rd, const Register& rn, const Register& rm);
1411 // Xd = bits<127:64> of Xn * Xm.
1412 void smulh(const Register& rd, const Register& rn, const Register& rm);
1414 // Signed 32 x 32 -> 64-bit multiply and accumulate.
1415 void smaddl(const Register& rd,
1418 const Register& ra);
1420 // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
1421 void umaddl(const Register& rd,
1424 const Register& ra);
1426 // Signed 32 x 32 -> 64-bit multiply and subtract.
1427 void smsubl(const Register& rd,
1430 const Register& ra);
1432 // Unsigned 32 x 32 -> 64-bit multiply and subtract.
1433 void umsubl(const Register& rd,
1436 const Register& ra);
1438 // Signed integer divide.
1439 void sdiv(const Register& rd, const Register& rn, const Register& rm);
1441 // Unsigned integer divide.
1442 void udiv(const Register& rd, const Register& rn, const Register& rm);
1444 // Bit count, bit reverse and endian reverse.
1445 void rbit(const Register& rd, const Register& rn);
1446 void rev16(const Register& rd, const Register& rn);
1447 void rev32(const Register& rd, const Register& rn);
1448 void rev(const Register& rd, const Register& rn);
1449 void clz(const Register& rd, const Register& rn);
1450 void cls(const Register& rd, const Register& rn);
1452 // Memory instructions.
1454 // Load integer or FP register.
1455 void ldr(const CPURegister& rt, const MemOperand& src);
1457 // Store integer or FP register.
1458 void str(const CPURegister& rt, const MemOperand& dst);
1460 // Load word with sign extension.
1461 void ldrsw(const Register& rt, const MemOperand& src);
1464 void ldrb(const Register& rt, const MemOperand& src);
1467 void strb(const Register& rt, const MemOperand& dst);
1469 // Load byte with sign extension.
1470 void ldrsb(const Register& rt, const MemOperand& src);
1473 void ldrh(const Register& rt, const MemOperand& src);
1476 void strh(const Register& rt, const MemOperand& dst);
1478 // Load half-word with sign extension.
1479 void ldrsh(const Register& rt, const MemOperand& src);
1481 // Load integer or FP register pair.
1482 void ldp(const CPURegister& rt, const CPURegister& rt2,
1483 const MemOperand& src);
1485 // Store integer or FP register pair.
1486 void stp(const CPURegister& rt, const CPURegister& rt2,
1487 const MemOperand& dst);
1489 // Load word pair with sign extension.
1490 void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
1492 // Load integer or FP register pair, non-temporal.
1493 void ldnp(const CPURegister& rt, const CPURegister& rt2,
1494 const MemOperand& src);
1496 // Store integer or FP register pair, non-temporal.
1497 void stnp(const CPURegister& rt, const CPURegister& rt2,
1498 const MemOperand& dst);
1500 // Load literal to register from a pc relative address.
1501 void ldr_pcrel(const CPURegister& rt, int imm19);
1503 // Load literal to register.
1504 void ldr(const CPURegister& rt, const Immediate& imm);
1506 // Move instructions. The default shift of -1 indicates that the move
1507 // instruction will calculate an appropriate 16-bit immediate and left shift
1508 // that is equal to the 64-bit immediate argument. If an explicit left shift
1509 // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
1511 // For movk, an explicit shift can be used to indicate which half word should
1512 // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
1513 // half word with zero, whereas movk(x0, 0, 48) will overwrite the
1514 // most-significant.
1517 void movk(const Register& rd, uint64_t imm, int shift = -1) {
1518 MoveWide(rd, imm, shift, MOVK);
1521 // Move with non-zero.
1522 void movn(const Register& rd, uint64_t imm, int shift = -1) {
1523 MoveWide(rd, imm, shift, MOVN);
1527 void movz(const Register& rd, uint64_t imm, int shift = -1) {
1528 MoveWide(rd, imm, shift, MOVZ);
1531 // Misc instructions.
1532 // Monitor debug-mode breakpoint.
1535 // Halting debug-mode breakpoint.
1538 // Move register to register.
1539 void mov(const Register& rd, const Register& rn);
1541 // Move NOT(operand) to register.
1542 void mvn(const Register& rd, const Operand& operand);
1544 // System instructions.
1545 // Move to register from system register.
1546 void mrs(const Register& rt, SystemRegister sysreg);
1548 // Move from register to system register.
1549 void msr(SystemRegister sysreg, const Register& rt);
1552 void hint(SystemHint code);
1554 // Data memory barrier
1555 void dmb(BarrierDomain domain, BarrierType type);
1557 // Data synchronization barrier
1558 void dsb(BarrierDomain domain, BarrierType type);
1560 // Instruction synchronization barrier
1563 // Alias for system instructions.
1564 void nop() { hint(NOP); }
1566 // Different nop operations are used by the code generator to detect certain
1567 // states of the generated code.
1568 enum NopMarkerTypes {
1572 FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
1573 LAST_NOP_MARKER = ADR_FAR_NOP
1576 void nop(NopMarkerTypes n) {
1577 DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
1578 mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
1582 // Move immediate to FP register.
1583 void fmov(FPRegister fd, double imm);
1584 void fmov(FPRegister fd, float imm);
1586 // Move FP register to register.
1587 void fmov(Register rd, FPRegister fn);
1589 // Move register to FP register.
1590 void fmov(FPRegister fd, Register rn);
1592 // Move FP register to FP register.
1593 void fmov(FPRegister fd, FPRegister fn);
1596 void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1599 void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1602 void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1604 // FP fused multiply and add.
1605 void fmadd(const FPRegister& fd,
1606 const FPRegister& fn,
1607 const FPRegister& fm,
1608 const FPRegister& fa);
1610 // FP fused multiply and subtract.
1611 void fmsub(const FPRegister& fd,
1612 const FPRegister& fn,
1613 const FPRegister& fm,
1614 const FPRegister& fa);
1616 // FP fused multiply, add and negate.
1617 void fnmadd(const FPRegister& fd,
1618 const FPRegister& fn,
1619 const FPRegister& fm,
1620 const FPRegister& fa);
1622 // FP fused multiply, subtract and negate.
1623 void fnmsub(const FPRegister& fd,
1624 const FPRegister& fn,
1625 const FPRegister& fm,
1626 const FPRegister& fa);
1629 void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1632 void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1635 void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1638 void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1641 void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1644 void fabs(const FPRegister& fd, const FPRegister& fn);
1647 void fneg(const FPRegister& fd, const FPRegister& fn);
1650 void fsqrt(const FPRegister& fd, const FPRegister& fn);
1652 // FP round to integer (nearest with ties to away).
1653 void frinta(const FPRegister& fd, const FPRegister& fn);
1655 // FP round to integer (toward minus infinity).
1656 void frintm(const FPRegister& fd, const FPRegister& fn);
1658 // FP round to integer (nearest with ties to even).
1659 void frintn(const FPRegister& fd, const FPRegister& fn);
1661 // FP round to integer (towards zero.)
1662 void frintz(const FPRegister& fd, const FPRegister& fn);
1664 // FP compare registers.
1665 void fcmp(const FPRegister& fn, const FPRegister& fm);
1667 // FP compare immediate.
1668 void fcmp(const FPRegister& fn, double value);
1670 // FP conditional compare.
1671 void fccmp(const FPRegister& fn,
1672 const FPRegister& fm,
1676 // FP conditional select.
1677 void fcsel(const FPRegister& fd,
1678 const FPRegister& fn,
1679 const FPRegister& fm,
1682 // Common FP Convert function
1683 void FPConvertToInt(const Register& rd,
1684 const FPRegister& fn,
1685 FPIntegerConvertOp op);
1687 // FP convert between single and double precision.
1688 void fcvt(const FPRegister& fd, const FPRegister& fn);
1690 // Convert FP to unsigned integer (nearest with ties to away).
1691 void fcvtau(const Register& rd, const FPRegister& fn);
1693 // Convert FP to signed integer (nearest with ties to away).
1694 void fcvtas(const Register& rd, const FPRegister& fn);
1696 // Convert FP to unsigned integer (round towards -infinity).
1697 void fcvtmu(const Register& rd, const FPRegister& fn);
1699 // Convert FP to signed integer (round towards -infinity).
1700 void fcvtms(const Register& rd, const FPRegister& fn);
1702 // Convert FP to unsigned integer (nearest with ties to even).
1703 void fcvtnu(const Register& rd, const FPRegister& fn);
1705 // Convert FP to signed integer (nearest with ties to even).
1706 void fcvtns(const Register& rd, const FPRegister& fn);
1708 // Convert FP to unsigned integer (round towards zero).
1709 void fcvtzu(const Register& rd, const FPRegister& fn);
1711 // Convert FP to signed integer (rounf towards zero).
1712 void fcvtzs(const Register& rd, const FPRegister& fn);
1714 // Convert signed integer or fixed point to FP.
1715 void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1717 // Convert unsigned integer or fixed point to FP.
1718 void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1720 // Instruction functions used only for test, debug, and patching.
1721 // Emit raw instructions in the instruction stream.
1722 void dci(Instr raw_inst) { Emit(raw_inst); }
1724 // Emit 8 bits of data in the instruction stream.
1725 void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
1727 // Emit 32 bits of data in the instruction stream.
1728 void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
1730 // Emit 64 bits of data in the instruction stream.
1731 void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
1733 // Copy a string into the instruction stream, including the terminating NULL
1734 // character. The instruction pointer (pc_) is then aligned correctly for
1735 // subsequent instructions.
1736 void EmitStringData(const char* string);
1738 // Pseudo-instructions ------------------------------------------------------
1740 // Parameters are described in arm64/instructions-arm64.h.
1741 void debug(const char* message, uint32_t code, Instr params = BREAK);
1744 void dd(uint32_t data) { dc32(data); }
1745 void db(uint8_t data) { dc8(data); }
1747 // Code generation helpers --------------------------------------------------
1749 bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
1751 Instruction* pc() const { return Instruction::Cast(pc_); }
1753 Instruction* InstructionAt(int offset) const {
1754 return reinterpret_cast<Instruction*>(buffer_ + offset);
1757 ptrdiff_t InstructionOffset(Instruction* instr) const {
1758 return reinterpret_cast<byte*>(instr) - buffer_;
1761 // Register encoding.
1762 static Instr Rd(CPURegister rd) {
1763 DCHECK(rd.code() != kSPRegInternalCode);
1764 return rd.code() << Rd_offset;
1767 static Instr Rn(CPURegister rn) {
1768 DCHECK(rn.code() != kSPRegInternalCode);
1769 return rn.code() << Rn_offset;
1772 static Instr Rm(CPURegister rm) {
1773 DCHECK(rm.code() != kSPRegInternalCode);
1774 return rm.code() << Rm_offset;
1777 static Instr Ra(CPURegister ra) {
1778 DCHECK(ra.code() != kSPRegInternalCode);
1779 return ra.code() << Ra_offset;
1782 static Instr Rt(CPURegister rt) {
1783 DCHECK(rt.code() != kSPRegInternalCode);
1784 return rt.code() << Rt_offset;
1787 static Instr Rt2(CPURegister rt2) {
1788 DCHECK(rt2.code() != kSPRegInternalCode);
1789 return rt2.code() << Rt2_offset;
1792 // These encoding functions allow the stack pointer to be encoded, and
1793 // disallow the zero register.
1794 static Instr RdSP(Register rd) {
1795 DCHECK(!rd.IsZero());
1796 return (rd.code() & kRegCodeMask) << Rd_offset;
1799 static Instr RnSP(Register rn) {
1800 DCHECK(!rn.IsZero());
1801 return (rn.code() & kRegCodeMask) << Rn_offset;
1805 inline static Instr Flags(FlagsUpdate S);
1806 inline static Instr Cond(Condition cond);
1808 // PC-relative address encoding.
1809 inline static Instr ImmPCRelAddress(int imm21);
1812 inline static Instr ImmUncondBranch(int imm26);
1813 inline static Instr ImmCondBranch(int imm19);
1814 inline static Instr ImmCmpBranch(int imm19);
1815 inline static Instr ImmTestBranch(int imm14);
1816 inline static Instr ImmTestBranchBit(unsigned bit_pos);
1818 // Data Processing encoding.
1819 inline static Instr SF(Register rd);
1820 inline static Instr ImmAddSub(int64_t imm);
1821 inline static Instr ImmS(unsigned imms, unsigned reg_size);
1822 inline static Instr ImmR(unsigned immr, unsigned reg_size);
1823 inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
1824 inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
1825 inline static Instr ImmLLiteral(int imm19);
1826 inline static Instr BitN(unsigned bitn, unsigned reg_size);
1827 inline static Instr ShiftDP(Shift shift);
1828 inline static Instr ImmDPShift(unsigned amount);
1829 inline static Instr ExtendMode(Extend extend);
1830 inline static Instr ImmExtendShift(unsigned left_shift);
1831 inline static Instr ImmCondCmp(unsigned imm);
1832 inline static Instr Nzcv(StatusFlags nzcv);
1834 static bool IsImmAddSub(int64_t immediate);
1835 static bool IsImmLogical(uint64_t value,
1841 // MemOperand offset encoding.
1842 inline static Instr ImmLSUnsigned(int imm12);
1843 inline static Instr ImmLS(int imm9);
1844 inline static Instr ImmLSPair(int imm7, LSDataSize size);
1845 inline static Instr ImmShiftLS(unsigned shift_amount);
1846 inline static Instr ImmException(int imm16);
1847 inline static Instr ImmSystemRegister(int imm15);
1848 inline static Instr ImmHint(int imm7);
1849 inline static Instr ImmBarrierDomain(int imm2);
1850 inline static Instr ImmBarrierType(int imm2);
1851 inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
1853 static bool IsImmLSUnscaled(int64_t offset);
1854 static bool IsImmLSScaled(int64_t offset, LSDataSize size);
1856 // Move immediates encoding.
1857 inline static Instr ImmMoveWide(uint64_t imm);
1858 inline static Instr ShiftMoveWide(int64_t shift);
1861 static Instr ImmFP32(float imm);
1862 static Instr ImmFP64(double imm);
1863 inline static Instr FPScale(unsigned scale);
1865 // FP register type.
1866 inline static Instr FPType(FPRegister fd);
1868 // Class for scoping postponing the constant pool generation.
1869 class BlockConstPoolScope {
1871 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1872 assem_->StartBlockConstPool();
1874 ~BlockConstPoolScope() {
1875 assem_->EndBlockConstPool();
1881 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1884 // Check if is time to emit a constant pool.
1885 void CheckConstPool(bool force_emit, bool require_jump);
1887 // Allocate a constant pool of the correct size for the generated code.
1888 Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
1890 // Generate the constant pool for the generated code.
1891 void PopulateConstantPool(ConstantPoolArray* constant_pool);
1893 // Returns true if we should emit a veneer as soon as possible for a branch
1894 // which can at most reach to specified pc.
1895 bool ShouldEmitVeneer(int max_reachable_pc,
1896 int margin = kVeneerDistanceMargin);
1897 bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
1898 return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
1901 // The maximum code size generated for a veneer. Currently one branch
1902 // instruction. This is for code size checking purposes, and can be extended
1903 // in the future for example if we decide to add nops between the veneers.
1904 static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
1906 void RecordVeneerPool(int location_offset, int size);
1907 // Emits veneers for branches that are approaching their maximum range.
1908 // If need_protection is true, the veneers are protected by a branch jumping
1910 void EmitVeneers(bool force_emit, bool need_protection,
1911 int margin = kVeneerDistanceMargin);
1912 void EmitVeneersGuard() { EmitPoolGuard(); }
1913 // Checks whether veneers need to be emitted at this point.
1914 // If force_emit is set, a veneer is generated for *all* unresolved branches.
1915 void CheckVeneerPool(bool force_emit, bool require_jump,
1916 int margin = kVeneerDistanceMargin);
1918 class BlockPoolsScope {
1920 explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
1921 assem_->StartBlockPools();
1923 ~BlockPoolsScope() {
1924 assem_->EndBlockPools();
1930 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
1934 inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
1936 void LoadStore(const CPURegister& rt,
1937 const MemOperand& addr,
1940 void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
1941 const MemOperand& addr, LoadStorePairOp op);
1942 static bool IsImmLSPair(int64_t offset, LSDataSize size);
1944 void Logical(const Register& rd,
1946 const Operand& operand,
1948 void LogicalImmediate(const Register& rd,
1955 void ConditionalCompare(const Register& rn,
1956 const Operand& operand,
1959 ConditionalCompareOp op);
1960 static bool IsImmConditionalCompare(int64_t immediate);
1962 void AddSubWithCarry(const Register& rd,
1964 const Operand& operand,
1966 AddSubWithCarryOp op);
1968 // Functions for emulating operands not directly supported by the instruction
1970 void EmitShift(const Register& rd,
1974 void EmitExtendShift(const Register& rd,
1977 unsigned left_shift);
1979 void AddSub(const Register& rd,
1981 const Operand& operand,
1985 static bool IsImmFP32(float imm);
1986 static bool IsImmFP64(double imm);
1988 // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
1989 // registers. Only simple loads are supported; sign- and zero-extension (such
1990 // as in LDPSW_x or LDRB_w) are not supported.
1991 static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
1992 static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
1993 const CPURegister& rt2);
1994 static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
1995 static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
1996 const CPURegister& rt2);
1997 static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
1998 const CPURegister& rt, const CPURegister& rt2);
1999 static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
2000 const CPURegister& rt, const CPURegister& rt2);
2001 static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
2003 // Remove the specified branch from the unbound label link chain.
2004 // If available, a veneer for this label can be used for other branches in the
2005 // chain if the link chain cannot be fixed up without this branch.
2006 void RemoveBranchFromLabelLinkChain(Instruction* branch,
2008 Instruction* label_veneer = NULL);
2011 // Instruction helpers.
2012 void MoveWide(const Register& rd,
2015 MoveWideImmediateOp mov_op);
2016 void DataProcShiftedRegister(const Register& rd,
2018 const Operand& operand,
2021 void DataProcExtendedRegister(const Register& rd,
2023 const Operand& operand,
2026 void LoadStorePairNonTemporal(const CPURegister& rt,
2027 const CPURegister& rt2,
2028 const MemOperand& addr,
2029 LoadStorePairNonTemporalOp op);
2030 void ConditionalSelect(const Register& rd,
2034 ConditionalSelectOp op);
2035 void DataProcessing1Source(const Register& rd,
2037 DataProcessing1SourceOp op);
2038 void DataProcessing3Source(const Register& rd,
2042 DataProcessing3SourceOp op);
2043 void FPDataProcessing1Source(const FPRegister& fd,
2044 const FPRegister& fn,
2045 FPDataProcessing1SourceOp op);
2046 void FPDataProcessing2Source(const FPRegister& fd,
2047 const FPRegister& fn,
2048 const FPRegister& fm,
2049 FPDataProcessing2SourceOp op);
2050 void FPDataProcessing3Source(const FPRegister& fd,
2051 const FPRegister& fn,
2052 const FPRegister& fm,
2053 const FPRegister& fa,
2054 FPDataProcessing3SourceOp op);
2058 // Return an offset for a label-referencing instruction, typically a branch.
2059 int LinkAndGetByteOffsetTo(Label* label);
2061 // This is the same as LinkAndGetByteOffsetTo, but return an offset
2062 // suitable for fields that take instruction offsets.
2063 inline int LinkAndGetInstructionOffsetTo(Label* label);
2065 static const int kStartOfLabelLinkChain = 0;
2067 // Verify that a label's link chain is intact.
2068 void CheckLabelLinkChain(Label const * label);
2070 void RecordLiteral(int64_t imm, unsigned size);
2072 // Postpone the generation of the constant pool for the specified number of
2074 void BlockConstPoolFor(int instructions);
2076 // Set how far from current pc the next constant pool check will be.
2077 void SetNextConstPoolCheckIn(int instructions) {
2078 next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
2081 // Emit the instruction at pc_.
2082 void Emit(Instr instruction) {
2083 STATIC_ASSERT(sizeof(*pc_) == 1);
2084 STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
2085 DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
2087 memcpy(pc_, &instruction, sizeof(instruction));
2088 pc_ += sizeof(instruction);
2092 // Emit data inline in the instruction stream.
2093 void EmitData(void const * data, unsigned size) {
2094 DCHECK(sizeof(*pc_) == 1);
2095 DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
2097 // TODO(all): Somehow register we have some data here. Then we can
2098 // disassemble it correctly.
2099 memcpy(pc_, data, size);
2105 void CheckBufferSpace();
2108 // Pc offset of the next constant pool check.
2109 int next_constant_pool_check_;
2111 // Constant pool generation
2112 // Pools are emitted in the instruction stream. They are emitted when:
2113 // * the distance to the first use is above a pre-defined distance or
2114 // * the numbers of entries in the pool is above a pre-defined size or
2115 // * code generation is finished
2116 // If a pool needs to be emitted before code generation is finished a branch
2117 // over the emitted pool will be inserted.
2119 // Constants in the pool may be addresses of functions that gets relocated;
2120 // if so, a relocation info entry is associated to the constant pool entry.
2122 // Repeated checking whether the constant pool should be emitted is rather
2123 // expensive. By default we only check again once a number of instructions
2124 // has been generated. That also means that the sizing of the buffers is not
2125 // an exact science, and that we rely on some slop to not overrun buffers.
2126 static const int kCheckConstPoolInterval = 128;
2128 // Distance to first use after a which a pool will be emitted. Pool entries
2129 // are accessed with pc relative load therefore this cannot be more than
2130 // 1 * MB. Since constant pool emission checks are interval based this value
2131 // is an approximation.
2132 static const int kApproxMaxDistToConstPool = 64 * KB;
2134 // Number of pool entries after which a pool will be emitted. Since constant
2135 // pool emission checks are interval based this value is an approximation.
2136 static const int kApproxMaxPoolEntryCount = 512;
2138 // Emission of the constant pool may be blocked in some code sequences.
2139 int const_pool_blocked_nesting_; // Block emission if this is not zero.
2140 int no_const_pool_before_; // Block emission before this pc offset.
2142 // Emission of the veneer pools may be blocked in some code sequences.
2143 int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
2145 // Relocation info generation
2146 // Each relocation is encoded as a variable size value
2147 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
2148 RelocInfoWriter reloc_info_writer;
2150 // Relocation info records are also used during code generation as temporary
2151 // containers for constants and code target addresses until they are emitted
2152 // to the constant pool. These pending relocation info records are temporarily
2153 // stored in a separate buffer until a constant pool is emitted.
2154 // If every instruction in a long sequence is accessing the pool, we need one
2155 // pending relocation entry per instruction.
2157 // The pending constant pool.
2158 ConstPool constpool_;
2160 // Relocation for a type-recording IC has the AST id added to it. This
2161 // member variable is a way to pass the information from the call site to
2162 // the relocation info.
2163 TypeFeedbackId recorded_ast_id_;
2165 inline TypeFeedbackId RecordedAstId();
2166 inline void ClearRecordedAstId();
2169 // Record the AST id of the CallIC being compiled, so that it can be placed
2170 // in the relocation information.
2171 void SetRecordedAstId(TypeFeedbackId ast_id) {
2172 DCHECK(recorded_ast_id_.IsNone());
2173 recorded_ast_id_ = ast_id;
2177 // The relocation writer's position is at least kGap bytes below the end of
2178 // the generated instructions. This is so that multi-instruction sequences do
2179 // not have to check for overflow. The same is true for writes of large
2180 // relocation info entries, and debug strings encoded in the instruction
2182 static const int kGap = 128;
2185 class FarBranchInfo {
2187 FarBranchInfo(int offset, Label* label)
2188 : pc_offset_(offset), label_(label) {}
2189 // Offset of the branch in the code generation buffer.
2191 // The label branched to.
2196 // Information about unresolved (forward) branches.
2197 // The Assembler is only allowed to delete out-of-date information from here
2198 // after a label is bound. The MacroAssembler uses this information to
2199 // generate veneers.
2201 // The second member gives information about the unresolved branch. The first
2202 // member of the pair is the maximum offset that the branch can reach in the
2203 // buffer. The map is sorted according to this reachable offset, allowing to
2204 // easily check when veneers need to be emitted.
2205 // Note that the maximum reachable offset (first member of the pairs) should
2206 // always be positive but has the same type as the return value for
2207 // pc_offset() for convenience.
2208 std::multimap<int, FarBranchInfo> unresolved_branches_;
2210 // We generate a veneer for a branch if we reach within this distance of the
2211 // limit of the range.
2212 static const int kVeneerDistanceMargin = 1 * KB;
2213 // The factor of 2 is a finger in the air guess. With a default margin of
2214 // 1KB, that leaves us an addional 256 instructions to avoid generating a
2215 // protective branch.
2216 static const int kVeneerNoProtectionFactor = 2;
2217 static const int kVeneerDistanceCheckMargin =
2218 kVeneerNoProtectionFactor * kVeneerDistanceMargin;
2219 int unresolved_branches_first_limit() const {
2220 DCHECK(!unresolved_branches_.empty());
2221 return unresolved_branches_.begin()->first;
2223 // This is similar to next_constant_pool_check_ and helps reduce the overhead
2224 // of checking for veneer pools.
2225 // It is maintained to the closest unresolved branch limit minus the maximum
2226 // veneer margin (or kMaxInt if there are no unresolved branches).
2227 int next_veneer_pool_check_;
2230 // If a veneer is emitted for a branch instruction, that instruction must be
2231 // removed from the associated label's link chain so that the assembler does
2232 // not later attempt (likely unsuccessfully) to patch it to branch directly to
2234 void DeleteUnresolvedBranchInfoForLabel(Label* label);
2235 // This function deletes the information related to the label by traversing
2236 // the label chain, and for each PC-relative instruction in the chain checking
2237 // if pending unresolved information exists. Its complexity is proportional to
2238 // the length of the label chain.
2239 void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
2242 PositionsRecorder positions_recorder_;
2243 friend class PositionsRecorder;
2244 friend class EnsureSpace;
2245 friend class ConstPool;
2248 class PatchingAssembler : public Assembler {
2250 // Create an Assembler with a buffer starting at 'start'.
2251 // The buffer size is
2252 // size of instructions to patch + kGap
2253 // Where kGap is the distance from which the Assembler tries to grow the
2255 // If more or fewer instructions than expected are generated or if some
2256 // relocation information takes space in the buffer, the PatchingAssembler
2257 // will crash trying to grow the buffer.
2258 PatchingAssembler(Instruction* start, unsigned count)
2260 reinterpret_cast<byte*>(start),
2261 count * kInstructionSize + kGap) {
2265 PatchingAssembler(byte* start, unsigned count)
2266 : Assembler(NULL, start, count * kInstructionSize + kGap) {
2267 // Block constant pool emission.
2271 ~PatchingAssembler() {
2272 // Const pool should still be blocked.
2273 DCHECK(is_const_pool_blocked());
2275 // Verify we have generated the number of instruction we expected.
2276 DCHECK((pc_offset() + kGap) == buffer_size_);
2277 // Verify no relocation information has been emitted.
2278 DCHECK(IsConstPoolEmpty());
2279 // Flush the Instruction cache.
2280 size_t length = buffer_size_ - kGap;
2281 CpuFeatures::FlushICache(buffer_, length);
2284 // See definition of PatchAdrFar() for details.
2285 static const int kAdrFarPatchableNNops = 2;
2286 static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
2287 void PatchAdrFar(int64_t target_offset);
2291 class EnsureSpace BASE_EMBEDDED {
2293 explicit EnsureSpace(Assembler* assembler) {
2294 assembler->CheckBufferSpace();
2298 } } // namespace v8::internal
2300 #endif // V8_ARM64_ASSEMBLER_ARM64_H_