1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_ASSEMBLER_ARM64_H_
12 #include "src/arm64/instructions-arm64.h"
13 #include "src/assembler.h"
14 #include "src/globals.h"
15 #include "src/serialize.h"
16 #include "src/utils.h"
23 // -----------------------------------------------------------------------------
25 #define REGISTER_CODE_LIST(R) \
26 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
27 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
28 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
29 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
32 static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
35 // Some CPURegister methods can return Register and FPRegister types, so we
36 // need to declare them in advance.
43 // The kInvalid value is used to detect uninitialized static instances,
44 // which are always zero-initialized before any constructors are called.
51 static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
52 CPURegister r = {code, size, type};
56 unsigned code() const;
57 RegisterType type() const;
59 unsigned SizeInBits() const;
60 int SizeInBytes() const;
61 bool Is32Bits() const;
62 bool Is64Bits() const;
64 bool IsValidOrNone() const;
65 bool IsValidRegister() const;
66 bool IsValidFPRegister() const;
68 bool Is(const CPURegister& other) const;
69 bool Aliases(const CPURegister& other) const;
74 bool IsRegister() const;
75 bool IsFPRegister() const;
82 bool IsSameSizeAndType(const CPURegister& other) const;
85 bool is(const CPURegister& other) const { return Is(other); }
86 bool is_valid() const { return IsValid(); }
90 RegisterType reg_type;
94 struct Register : public CPURegister {
95 static Register Create(unsigned code, unsigned size) {
96 return Register(CPURegister::Create(code, size, CPURegister::kRegister));
102 reg_type = CPURegister::kNoRegister;
105 explicit Register(const CPURegister& r) {
106 reg_code = r.reg_code;
107 reg_size = r.reg_size;
108 reg_type = r.reg_type;
109 DCHECK(IsValidOrNone());
112 Register(const Register& r) { // NOLINT(runtime/explicit)
113 reg_code = r.reg_code;
114 reg_size = r.reg_size;
115 reg_type = r.reg_type;
116 DCHECK(IsValidOrNone());
119 bool IsValid() const {
120 DCHECK(IsRegister() || IsNone());
121 return IsValidRegister();
124 static Register XRegFromCode(unsigned code);
125 static Register WRegFromCode(unsigned code);
127 // Start of V8 compatibility section ---------------------
128 // These memebers are necessary for compilation.
129 // A few of them may be unused for now.
131 static const int kNumRegisters = kNumberOfRegisters;
132 static int NumRegisters() { return kNumRegisters; }
134 // We allow crankshaft to use the following registers:
137 // - x27 (also context)
139 // TODO(all): Register x25 is currently free and could be available for
140 // crankshaft, but we don't use it as we might use it as a per function
141 // literal pool pointer in the future.
143 // TODO(all): Consider storing cp in x25 to have only two ranges.
144 // We split allocatable registers in three ranges called
148 static const unsigned kAllocatableLowRangeBegin = 0;
149 static const unsigned kAllocatableLowRangeEnd = 15;
150 static const unsigned kAllocatableHighRangeBegin = 18;
151 static const unsigned kAllocatableHighRangeEnd = 24;
152 static const unsigned kAllocatableContext = 27;
154 // Gap between low and high ranges.
155 static const int kAllocatableRangeGapSize =
156 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
158 static const int kMaxNumAllocatableRegisters =
159 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
160 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
161 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
163 // Return true if the register is one that crankshaft can allocate.
164 bool IsAllocatable() const {
165 return ((reg_code == kAllocatableContext) ||
166 (reg_code <= kAllocatableLowRangeEnd) ||
167 ((reg_code >= kAllocatableHighRangeBegin) &&
168 (reg_code <= kAllocatableHighRangeEnd)));
171 static Register FromAllocationIndex(unsigned index) {
172 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
173 // cp is the last allocatable register.
174 if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
175 return from_code(kAllocatableContext);
178 // Handle low and high ranges.
179 return (index <= kAllocatableLowRangeEnd)
181 : from_code(index + kAllocatableRangeGapSize);
184 static const char* AllocationIndexToString(int index) {
185 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
186 DCHECK((kAllocatableLowRangeBegin == 0) &&
187 (kAllocatableLowRangeEnd == 15) &&
188 (kAllocatableHighRangeBegin == 18) &&
189 (kAllocatableHighRangeEnd == 24) &&
190 (kAllocatableContext == 27));
191 const char* const names[] = {
192 "x0", "x1", "x2", "x3", "x4",
193 "x5", "x6", "x7", "x8", "x9",
194 "x10", "x11", "x12", "x13", "x14",
195 "x15", "x18", "x19", "x20", "x21",
196 "x22", "x23", "x24", "x27",
201 static int ToAllocationIndex(Register reg) {
202 DCHECK(reg.IsAllocatable());
203 unsigned code = reg.code();
204 if (code == kAllocatableContext) {
205 return NumAllocatableRegisters() - 1;
208 return (code <= kAllocatableLowRangeEnd)
210 : code - kAllocatableRangeGapSize;
213 static Register from_code(int code) {
214 // Always return an X register.
215 return Register::Create(code, kXRegSizeInBits);
218 // End of V8 compatibility section -----------------------
222 struct FPRegister : public CPURegister {
223 static FPRegister Create(unsigned code, unsigned size) {
225 CPURegister::Create(code, size, CPURegister::kFPRegister));
231 reg_type = CPURegister::kNoRegister;
234 explicit FPRegister(const CPURegister& r) {
235 reg_code = r.reg_code;
236 reg_size = r.reg_size;
237 reg_type = r.reg_type;
238 DCHECK(IsValidOrNone());
241 FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
242 reg_code = r.reg_code;
243 reg_size = r.reg_size;
244 reg_type = r.reg_type;
245 DCHECK(IsValidOrNone());
248 bool IsValid() const {
249 DCHECK(IsFPRegister() || IsNone());
250 return IsValidFPRegister();
253 static FPRegister SRegFromCode(unsigned code);
254 static FPRegister DRegFromCode(unsigned code);
256 // Start of V8 compatibility section ---------------------
257 static const int kMaxNumRegisters = kNumberOfFPRegisters;
259 // Crankshaft can use all the FP registers except:
260 // - d15 which is used to keep the 0 double value
261 // - d30 which is used in crankshaft as a double scratch register
262 // - d31 which is used in the MacroAssembler as a double scratch register
263 static const unsigned kAllocatableLowRangeBegin = 0;
264 static const unsigned kAllocatableLowRangeEnd = 14;
265 static const unsigned kAllocatableHighRangeBegin = 16;
266 static const unsigned kAllocatableHighRangeEnd = 28;
268 static const RegList kAllocatableFPRegisters = 0x1fff7fff;
270 // Gap between low and high ranges.
271 static const int kAllocatableRangeGapSize =
272 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
274 static const int kMaxNumAllocatableRegisters =
275 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
276 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
277 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
279 // TODO(turbofan): Proper float32 support.
280 static int NumAllocatableAliasedRegisters() {
281 return NumAllocatableRegisters();
284 // Return true if the register is one that crankshaft can allocate.
285 bool IsAllocatable() const {
286 return (Bit() & kAllocatableFPRegisters) != 0;
289 static FPRegister FromAllocationIndex(unsigned int index) {
290 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
292 return (index <= kAllocatableLowRangeEnd)
294 : from_code(index + kAllocatableRangeGapSize);
297 static const char* AllocationIndexToString(int index) {
298 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
299 DCHECK((kAllocatableLowRangeBegin == 0) &&
300 (kAllocatableLowRangeEnd == 14) &&
301 (kAllocatableHighRangeBegin == 16) &&
302 (kAllocatableHighRangeEnd == 28));
303 const char* const names[] = {
304 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
305 "d8", "d9", "d10", "d11", "d12", "d13", "d14",
306 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
307 "d24", "d25", "d26", "d27", "d28"
312 static int ToAllocationIndex(FPRegister reg) {
313 DCHECK(reg.IsAllocatable());
314 unsigned code = reg.code();
316 return (code <= kAllocatableLowRangeEnd)
318 : code - kAllocatableRangeGapSize;
321 static FPRegister from_code(int code) {
322 // Always return a D register.
323 return FPRegister::Create(code, kDRegSizeInBits);
325 // End of V8 compatibility section -----------------------
329 STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
330 STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
333 #if defined(ARM64_DEFINE_REG_STATICS)
334 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
335 const CPURegister init_##register_class##_##name = {code, size, type}; \
336 const register_class& name = *reinterpret_cast<const register_class*>( \
337 &init_##register_class##_##name)
338 #define ALIAS_REGISTER(register_class, alias, name) \
339 const register_class& alias = *reinterpret_cast<const register_class*>( \
340 &init_##register_class##_##name)
342 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
343 extern const register_class& name
344 #define ALIAS_REGISTER(register_class, alias, name) \
345 extern const register_class& alias
346 #endif // defined(ARM64_DEFINE_REG_STATICS)
348 // No*Reg is used to indicate an unused argument, or an error case. Note that
349 // these all compare equal (using the Is() method). The Register and FPRegister
350 // variants are provided for convenience.
351 INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
352 INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
353 INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
356 INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
358 #define DEFINE_REGISTERS(N) \
359 INITIALIZE_REGISTER(Register, w##N, N, \
360 kWRegSizeInBits, CPURegister::kRegister); \
361 INITIALIZE_REGISTER(Register, x##N, N, \
362 kXRegSizeInBits, CPURegister::kRegister);
363 REGISTER_CODE_LIST(DEFINE_REGISTERS)
364 #undef DEFINE_REGISTERS
366 INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
367 CPURegister::kRegister);
368 INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
369 CPURegister::kRegister);
371 #define DEFINE_FPREGISTERS(N) \
372 INITIALIZE_REGISTER(FPRegister, s##N, N, \
373 kSRegSizeInBits, CPURegister::kFPRegister); \
374 INITIALIZE_REGISTER(FPRegister, d##N, N, \
375 kDRegSizeInBits, CPURegister::kFPRegister);
376 REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
377 #undef DEFINE_FPREGISTERS
379 #undef INITIALIZE_REGISTER
381 // Registers aliases.
382 ALIAS_REGISTER(Register, ip0, x16);
383 ALIAS_REGISTER(Register, ip1, x17);
384 ALIAS_REGISTER(Register, wip0, w16);
385 ALIAS_REGISTER(Register, wip1, w17);
387 ALIAS_REGISTER(Register, root, x26);
388 ALIAS_REGISTER(Register, rr, x26);
389 // Context pointer register.
390 ALIAS_REGISTER(Register, cp, x27);
391 // We use a register as a JS stack pointer to overcome the restriction on the
392 // architectural SP alignment.
393 // We chose x28 because it is contiguous with the other specific purpose
395 STATIC_ASSERT(kJSSPCode == 28);
396 ALIAS_REGISTER(Register, jssp, x28);
397 ALIAS_REGISTER(Register, wjssp, w28);
398 ALIAS_REGISTER(Register, fp, x29);
399 ALIAS_REGISTER(Register, lr, x30);
400 ALIAS_REGISTER(Register, xzr, x31);
401 ALIAS_REGISTER(Register, wzr, w31);
403 // Keeps the 0 double value.
404 ALIAS_REGISTER(FPRegister, fp_zero, d15);
405 // Crankshaft double scratch register.
406 ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
407 // MacroAssembler double scratch registers.
408 ALIAS_REGISTER(FPRegister, fp_scratch, d30);
409 ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
410 ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
412 #undef ALIAS_REGISTER
415 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
416 Register reg2 = NoReg,
417 Register reg3 = NoReg,
418 Register reg4 = NoReg);
421 // AreAliased returns true if any of the named registers overlap. Arguments set
422 // to NoReg are ignored. The system stack pointer may be specified.
423 bool AreAliased(const CPURegister& reg1,
424 const CPURegister& reg2,
425 const CPURegister& reg3 = NoReg,
426 const CPURegister& reg4 = NoReg,
427 const CPURegister& reg5 = NoReg,
428 const CPURegister& reg6 = NoReg,
429 const CPURegister& reg7 = NoReg,
430 const CPURegister& reg8 = NoReg);
432 // AreSameSizeAndType returns true if all of the specified registers have the
433 // same size, and are of the same type. The system stack pointer may be
434 // specified. Arguments set to NoReg are ignored, as are any subsequent
435 // arguments. At least one argument (reg1) must be valid (not NoCPUReg).
436 bool AreSameSizeAndType(const CPURegister& reg1,
437 const CPURegister& reg2,
438 const CPURegister& reg3 = NoCPUReg,
439 const CPURegister& reg4 = NoCPUReg,
440 const CPURegister& reg5 = NoCPUReg,
441 const CPURegister& reg6 = NoCPUReg,
442 const CPURegister& reg7 = NoCPUReg,
443 const CPURegister& reg8 = NoCPUReg);
446 typedef FPRegister DoubleRegister;
449 // -----------------------------------------------------------------------------
450 // Lists of registers.
453 explicit CPURegList(CPURegister reg1,
454 CPURegister reg2 = NoCPUReg,
455 CPURegister reg3 = NoCPUReg,
456 CPURegister reg4 = NoCPUReg)
457 : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
458 size_(reg1.SizeInBits()), type_(reg1.type()) {
459 DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
463 CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
464 : list_(list), size_(size), type_(type) {
468 CPURegList(CPURegister::RegisterType type, unsigned size,
469 unsigned first_reg, unsigned last_reg)
470 : size_(size), type_(type) {
471 DCHECK(((type == CPURegister::kRegister) &&
472 (last_reg < kNumberOfRegisters)) ||
473 ((type == CPURegister::kFPRegister) &&
474 (last_reg < kNumberOfFPRegisters)));
475 DCHECK(last_reg >= first_reg);
476 list_ = (1UL << (last_reg + 1)) - 1;
477 list_ &= ~((1UL << first_reg) - 1);
481 CPURegister::RegisterType type() const {
486 RegList list() const {
491 inline void set_list(RegList new_list) {
496 // Combine another CPURegList into this one. Registers that already exist in
497 // this list are left unchanged. The type and size of the registers in the
498 // 'other' list must match those in this list.
499 void Combine(const CPURegList& other);
501 // Remove every register in the other CPURegList from this one. Registers that
502 // do not exist in this list are ignored. The type of the registers in the
503 // 'other' list must match those in this list.
504 void Remove(const CPURegList& other);
506 // Variants of Combine and Remove which take CPURegisters.
507 void Combine(const CPURegister& other);
508 void Remove(const CPURegister& other1,
509 const CPURegister& other2 = NoCPUReg,
510 const CPURegister& other3 = NoCPUReg,
511 const CPURegister& other4 = NoCPUReg);
513 // Variants of Combine and Remove which take a single register by its code;
514 // the type and size of the register is inferred from this list.
515 void Combine(int code);
516 void Remove(int code);
518 // Remove all callee-saved registers from the list. This can be useful when
519 // preparing registers for an AAPCS64 function call, for example.
520 void RemoveCalleeSaved();
522 CPURegister PopLowestIndex();
523 CPURegister PopHighestIndex();
525 // AAPCS64 callee-saved registers.
526 static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
527 static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
529 // AAPCS64 caller-saved registers. Note that this includes lr.
530 static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
531 static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
533 // Registers saved as safepoints.
534 static CPURegList GetSafepointSavedRegisters();
536 bool IsEmpty() const {
541 bool IncludesAliasOf(const CPURegister& other1,
542 const CPURegister& other2 = NoCPUReg,
543 const CPURegister& other3 = NoCPUReg,
544 const CPURegister& other4 = NoCPUReg) const {
547 if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
548 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
549 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
550 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
551 return (list_ & list) != 0;
556 return CountSetBits(list_, kRegListSizeInBits);
559 unsigned RegisterSizeInBits() const {
564 unsigned RegisterSizeInBytes() const {
565 int size_in_bits = RegisterSizeInBits();
566 DCHECK((size_in_bits % kBitsPerByte) == 0);
567 return size_in_bits / kBitsPerByte;
570 unsigned TotalSizeInBytes() const {
572 return RegisterSizeInBytes() * Count();
578 CPURegister::RegisterType type_;
580 bool IsValid() const {
581 const RegList kValidRegisters = 0x8000000ffffffff;
582 const RegList kValidFPRegisters = 0x0000000ffffffff;
584 case CPURegister::kRegister:
585 return (list_ & kValidRegisters) == list_;
586 case CPURegister::kFPRegister:
587 return (list_ & kValidFPRegisters) == list_;
588 case CPURegister::kNoRegister:
598 // AAPCS64 callee-saved registers.
599 #define kCalleeSaved CPURegList::GetCalleeSaved()
600 #define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
603 // AAPCS64 caller-saved registers. Note that this includes lr.
604 #define kCallerSaved CPURegList::GetCallerSaved()
605 #define kCallerSavedFP CPURegList::GetCallerSavedFP()
607 // -----------------------------------------------------------------------------
612 inline explicit Immediate(Handle<T> handle);
614 // This is allowed to be an implicit constructor because Immediate is
615 // a wrapper class that doesn't normally perform any type conversion.
617 inline Immediate(T value); // NOLINT(runtime/explicit)
620 inline Immediate(T value, RelocInfo::Mode rmode);
622 int64_t value() const { return value_; }
623 RelocInfo::Mode rmode() const { return rmode_; }
626 void InitializeHandle(Handle<Object> value);
629 RelocInfo::Mode rmode_;
633 // -----------------------------------------------------------------------------
635 const int kSmiShift = kSmiTagSize + kSmiShiftSize;
636 const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
638 // Represents an operand in a machine instruction.
640 // TODO(all): If necessary, study more in details which methods
641 // TODO(all): should be inlined or not.
643 // rm, {<shift> {#<shift_amount>}}
644 // where <shift> is one of {LSL, LSR, ASR, ROR}.
645 // <shift_amount> is uint6_t.
646 // This is allowed to be an implicit constructor because Operand is
647 // a wrapper class that doesn't normally perform any type conversion.
648 inline Operand(Register reg,
650 unsigned shift_amount = 0); // NOLINT(runtime/explicit)
652 // rm, <extend> {#<shift_amount>}
653 // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
654 // <shift_amount> is uint2_t.
655 inline Operand(Register reg,
657 unsigned shift_amount = 0);
660 inline explicit Operand(Handle<T> handle);
662 // Implicit constructor for all int types, ExternalReference, and Smi.
664 inline Operand(T t); // NOLINT(runtime/explicit)
666 // Implicit constructor for int types.
668 inline Operand(T t, RelocInfo::Mode rmode);
670 inline bool IsImmediate() const;
671 inline bool IsShiftedRegister() const;
672 inline bool IsExtendedRegister() const;
673 inline bool IsZero() const;
675 // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
676 // which helps in the encoding of instructions that use the stack pointer.
677 inline Operand ToExtendedRegister() const;
679 inline Immediate immediate() const;
680 inline int64_t ImmediateValue() const;
681 inline Register reg() const;
682 inline Shift shift() const;
683 inline Extend extend() const;
684 inline unsigned shift_amount() const;
686 // Relocation information.
687 bool NeedsRelocation(const Assembler* assembler) const;
690 inline static Operand UntagSmi(Register smi);
691 inline static Operand UntagSmiAndScale(Register smi, int scale);
694 Immediate immediate_;
698 unsigned shift_amount_;
702 // MemOperand represents a memory operand in a load or store instruction.
706 inline explicit MemOperand(Register base,
708 AddrMode addrmode = Offset);
709 inline explicit MemOperand(Register base,
712 unsigned shift_amount = 0);
713 inline explicit MemOperand(Register base,
716 unsigned shift_amount = 0);
717 inline explicit MemOperand(Register base,
718 const Operand& offset,
719 AddrMode addrmode = Offset);
721 const Register& base() const { return base_; }
722 const Register& regoffset() const { return regoffset_; }
723 int64_t offset() const { return offset_; }
724 AddrMode addrmode() const { return addrmode_; }
725 Shift shift() const { return shift_; }
726 Extend extend() const { return extend_; }
727 unsigned shift_amount() const { return shift_amount_; }
728 inline bool IsImmediateOffset() const;
729 inline bool IsRegisterOffset() const;
730 inline bool IsPreIndex() const;
731 inline bool IsPostIndex() const;
733 // For offset modes, return the offset as an Operand. This helper cannot
734 // handle indexed modes.
735 inline Operand OffsetAsOperand() const;
738 kNotPair, // Can't use a pair instruction.
739 kPairAB, // Can use a pair instruction (operandA has lower address).
740 kPairBA // Can use a pair instruction (operandB has lower address).
742 // Check if two MemOperand are consistent for stp/ldp use.
743 static PairResult AreConsistentForPair(const MemOperand& operandA,
744 const MemOperand& operandB,
745 int access_size_log2 = kXRegSizeLog2);
754 unsigned shift_amount_;
760 explicit ConstPool(Assembler* assm)
763 shared_entries_count(0) {}
764 void RecordEntry(intptr_t data, RelocInfo::Mode mode);
765 int EntryCount() const {
766 return shared_entries_count + unique_entries_.size();
768 bool IsEmpty() const {
769 return shared_entries_.empty() && unique_entries_.empty();
771 // Distance in bytes between the current pc and the first instruction
772 // using the pool. If there are no pending entries return kMaxInt.
773 int DistanceToFirstUse();
774 // Offset after which instructions using the pool will be out of range.
776 // Maximum size the constant pool can be with current entries. It always
777 // includes alignment padding and branch over.
779 // Size in bytes of the literal pool *if* it is emitted at the current
780 // pc. The size will include the branch over the pool if it was requested.
781 int SizeIfEmittedAtCurrentPc(bool require_jump);
782 // Emit the literal pool at the current pc with a branch over the pool if
784 void Emit(bool require_jump);
785 // Discard any pending pool entries.
789 bool CanBeShared(RelocInfo::Mode mode);
795 // Keep track of the first instruction requiring a constant pool entry
796 // since the previous constant pool was emitted.
798 // values, pc offset(s) of entries which can be shared.
799 std::multimap<uint64_t, int> shared_entries_;
800 // Number of distinct literal in shared entries.
801 int shared_entries_count;
802 // values, pc offset of entries which cannot be shared.
803 std::vector<std::pair<uint64_t, int> > unique_entries_;
807 // -----------------------------------------------------------------------------
810 class Assembler : public AssemblerBase {
812 // Create an assembler. Instructions and relocation information are emitted
813 // into a buffer, with the instructions starting from the beginning and the
814 // relocation information starting from the end of the buffer. See CodeDesc
815 // for a detailed comment on the layout (globals.h).
817 // If the provided buffer is NULL, the assembler allocates and grows its own
818 // buffer, and buffer_size determines the initial buffer size. The buffer is
819 // owned by the assembler and deallocated upon destruction of the assembler.
821 // If the provided buffer is not NULL, the assembler uses the provided buffer
822 // for code generation and assumes its size to be buffer_size. If the buffer
823 // is too small, a fatal error occurs. No deallocation of the buffer is done
824 // upon destruction of the assembler.
825 Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
827 virtual ~Assembler();
829 virtual void AbortedCodeGeneration() {
833 // System functions ---------------------------------------------------------
834 // Start generating code from the beginning of the buffer, discarding any code
835 // and data that has already been emitted into the buffer.
837 // In order to avoid any accidental transfer of state, Reset DCHECKs that the
838 // constant pool is not blocked.
841 // GetCode emits any pending (non-emitted) code and fills the descriptor
842 // desc. GetCode() is idempotent; it returns the same result if no other
843 // Assembler functions are invoked in between GetCode() calls.
845 // The descriptor (desc) can be NULL. In that case, the code is finalized as
846 // usual, but the descriptor is not populated.
847 void GetCode(CodeDesc* desc);
849 // Insert the smallest number of nop instructions
850 // possible to align the pc offset to a multiple
851 // of m. m must be a power of 2 (>= 4).
854 inline void Unreachable();
856 // Label --------------------------------------------------------------------
857 // Bind a label to the current pc. Note that labels can only be bound once,
858 // and if labels are linked to other instructions, they _must_ be bound
859 // before they go out of scope.
860 void bind(Label* label);
863 // RelocInfo and pools ------------------------------------------------------
865 // Record relocation information for current pc_.
866 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
868 // Return the address in the constant pool of the code target address used by
869 // the branch/call instruction at pc.
870 inline static Address target_pointer_address_at(Address pc);
872 // Read/Modify the code target address in the branch/call instruction at pc.
873 inline static Address target_address_at(Address pc,
874 ConstantPoolArray* constant_pool);
875 inline static void set_target_address_at(Address pc,
876 ConstantPoolArray* constant_pool,
878 ICacheFlushMode icache_flush_mode =
879 FLUSH_ICACHE_IF_NEEDED);
880 static inline Address target_address_at(Address pc, Code* code);
881 static inline void set_target_address_at(Address pc,
884 ICacheFlushMode icache_flush_mode =
885 FLUSH_ICACHE_IF_NEEDED);
887 // Return the code target address at a call site from the return address of
888 // that call in the instruction stream.
889 inline static Address target_address_from_return_address(Address pc);
891 // Given the address of the beginning of a call, return the address in the
892 // instruction stream that call will return from.
893 inline static Address return_address_from_call_start(Address pc);
895 // Return the code target address of the patch debug break slot
896 inline static Address break_address_from_return_address(Address pc);
898 // This sets the branch destination (which is in the constant pool on ARM).
899 // This is for calls and branches within generated code.
900 inline static void deserialization_set_special_target_at(
901 Address constant_pool_entry, Code* code, Address target);
903 // All addresses in the constant pool are the same size as pointers.
904 static const int kSpecialTargetSize = kPointerSize;
906 // The sizes of the call sequences emitted by MacroAssembler::Call.
907 // Wherever possible, use MacroAssembler::CallSize instead of these constants,
908 // as it will choose the correct value for a given relocation mode.
910 // Without relocation:
911 // movz temp, #(target & 0x000000000000ffff)
912 // movk temp, #(target & 0x00000000ffff0000)
913 // movk temp, #(target & 0x0000ffff00000000)
919 static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
920 static const int kCallSizeWithRelocation = 2 * kInstructionSize;
922 // Size of the generated code in bytes
923 uint64_t SizeOfGeneratedCode() const {
924 DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
925 return pc_ - buffer_;
928 // Return the code size generated from label to the current position.
929 uint64_t SizeOfCodeGeneratedSince(const Label* label) {
930 DCHECK(label->is_bound());
931 DCHECK(pc_offset() >= label->pos());
932 DCHECK(pc_offset() < buffer_size_);
933 return pc_offset() - label->pos();
936 // Check the size of the code generated since the given label. This function
937 // is used primarily to work around comparisons between signed and unsigned
938 // quantities, since V8 uses both.
939 // TODO(jbramley): Work out what sign to use for these things and if possible,
940 // change things to be consistent.
941 void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
943 DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
946 // Return the number of instructions generated from label to the
948 int InstructionsGeneratedSince(const Label* label) {
949 return SizeOfCodeGeneratedSince(label) / kInstructionSize;
952 // Number of instructions generated for the return sequence in
953 // FullCodeGenerator::EmitReturnSequence.
954 static const int kJSRetSequenceInstructions = 7;
955 // Distance between start of patched return sequence and the emitted address
957 static const int kPatchReturnSequenceAddressOffset = 0;
958 static const int kPatchDebugBreakSlotAddressOffset = 0;
960 // Number of instructions necessary to be able to later patch it to a call.
961 // See DebugCodegen::GenerateSlot() and
962 // BreakLocationIterator::SetDebugBreakAtSlot().
963 static const int kDebugBreakSlotInstructions = 4;
964 static const int kDebugBreakSlotLength =
965 kDebugBreakSlotInstructions * kInstructionSize;
967 static const int kPatchDebugBreakSlotReturnOffset = 2 * kInstructionSize;
969 // Prevent contant pool emission until EndBlockConstPool is called.
970 // Call to this function can be nested but must be followed by an equal
971 // number of call to EndBlockConstpool.
972 void StartBlockConstPool();
974 // Resume constant pool emission. Need to be called as many time as
975 // StartBlockConstPool to have an effect.
976 void EndBlockConstPool();
978 bool is_const_pool_blocked() const;
979 static bool IsConstantPoolAt(Instruction* instr);
980 static int ConstantPoolSizeAt(Instruction* instr);
981 // See Assembler::CheckConstPool for more info.
982 void EmitPoolGuard();
984 // Prevent veneer pool emission until EndBlockVeneerPool is called.
985 // Call to this function can be nested but must be followed by an equal
986 // number of call to EndBlockConstpool.
987 void StartBlockVeneerPool();
989 // Resume constant pool emission. Need to be called as many time as
990 // StartBlockVeneerPool to have an effect.
991 void EndBlockVeneerPool();
993 bool is_veneer_pool_blocked() const {
994 return veneer_pool_blocked_nesting_ > 0;
997 // Block/resume emission of constant pools and veneer pools.
998 void StartBlockPools() {
999 StartBlockConstPool();
1000 StartBlockVeneerPool();
1002 void EndBlockPools() {
1003 EndBlockConstPool();
1004 EndBlockVeneerPool();
1007 // Debugging ----------------------------------------------------------------
1008 PositionsRecorder* positions_recorder() { return &positions_recorder_; }
1009 void RecordComment(const char* msg);
1010 int buffer_space() const;
1012 // Mark address of the ExitJSFrame code.
1013 void RecordJSReturn();
1015 // Mark address of a debug break slot.
1016 void RecordDebugBreakSlot();
1018 // Record the emission of a constant pool.
1020 // The emission of constant and veneer pools depends on the size of the code
1021 // generated and the number of RelocInfo recorded.
1022 // The Debug mechanism needs to map code offsets between two versions of a
1023 // function, compiled with and without debugger support (see for example
1024 // Debug::PrepareForBreakPoints()).
1025 // Compiling functions with debugger support generates additional code
1026 // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
1027 // and cause the version of the code with debugger support to have pools
1028 // generated in different places.
1029 // Recording the position and size of emitted pools allows to correctly
1030 // compute the offset mappings between the different versions of a function in
1033 // The parameter indicates the size of the pool (in bytes), including
1034 // the marker and branch over the data.
1035 void RecordConstPool(int size);
1038 // Instruction set functions ------------------------------------------------
1040 // Branch / Jump instructions.
1041 // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
1042 // Branch to register.
1043 void br(const Register& xn);
1045 // Branch-link to register.
1046 void blr(const Register& xn);
1048 // Branch to register with return hint.
1049 void ret(const Register& xn = lr);
1051 // Unconditional branch to label.
1052 void b(Label* label);
1054 // Conditional branch to label.
1055 void b(Label* label, Condition cond);
1057 // Unconditional branch to PC offset.
1060 // Conditional branch to PC offset.
1061 void b(int imm19, Condition cond);
1063 // Branch-link to label / pc offset.
1064 void bl(Label* label);
1067 // Compare and branch to label / pc offset if zero.
1068 void cbz(const Register& rt, Label* label);
1069 void cbz(const Register& rt, int imm19);
1071 // Compare and branch to label / pc offset if not zero.
1072 void cbnz(const Register& rt, Label* label);
1073 void cbnz(const Register& rt, int imm19);
1075 // Test bit and branch to label / pc offset if zero.
1076 void tbz(const Register& rt, unsigned bit_pos, Label* label);
1077 void tbz(const Register& rt, unsigned bit_pos, int imm14);
1079 // Test bit and branch to label / pc offset if not zero.
1080 void tbnz(const Register& rt, unsigned bit_pos, Label* label);
1081 void tbnz(const Register& rt, unsigned bit_pos, int imm14);
1083 // Address calculation instructions.
1084 // Calculate a PC-relative address. Unlike for branches the offset in adr is
1085 // unscaled (i.e. the result can be unaligned).
1086 void adr(const Register& rd, Label* label);
1087 void adr(const Register& rd, int imm21);
1089 // Data Processing instructions.
1091 void add(const Register& rd,
1093 const Operand& operand);
1095 // Add and update status flags.
1096 void adds(const Register& rd,
1098 const Operand& operand);
1100 // Compare negative.
1101 void cmn(const Register& rn, const Operand& operand);
1104 void sub(const Register& rd,
1106 const Operand& operand);
1108 // Subtract and update status flags.
1109 void subs(const Register& rd,
1111 const Operand& operand);
1114 void cmp(const Register& rn, const Operand& operand);
1117 void neg(const Register& rd,
1118 const Operand& operand);
1120 // Negate and update status flags.
1121 void negs(const Register& rd,
1122 const Operand& operand);
1124 // Add with carry bit.
1125 void adc(const Register& rd,
1127 const Operand& operand);
1129 // Add with carry bit and update status flags.
1130 void adcs(const Register& rd,
1132 const Operand& operand);
1134 // Subtract with carry bit.
1135 void sbc(const Register& rd,
1137 const Operand& operand);
1139 // Subtract with carry bit and update status flags.
1140 void sbcs(const Register& rd,
1142 const Operand& operand);
1144 // Negate with carry bit.
1145 void ngc(const Register& rd,
1146 const Operand& operand);
1148 // Negate with carry bit and update status flags.
1149 void ngcs(const Register& rd,
1150 const Operand& operand);
1152 // Logical instructions.
1153 // Bitwise and (A & B).
1154 void and_(const Register& rd,
1156 const Operand& operand);
1158 // Bitwise and (A & B) and update status flags.
1159 void ands(const Register& rd,
1161 const Operand& operand);
1163 // Bit test, and set flags.
1164 void tst(const Register& rn, const Operand& operand);
1166 // Bit clear (A & ~B).
1167 void bic(const Register& rd,
1169 const Operand& operand);
1171 // Bit clear (A & ~B) and update status flags.
1172 void bics(const Register& rd,
1174 const Operand& operand);
1176 // Bitwise or (A | B).
1177 void orr(const Register& rd, const Register& rn, const Operand& operand);
1179 // Bitwise nor (A | ~B).
1180 void orn(const Register& rd, const Register& rn, const Operand& operand);
1182 // Bitwise eor/xor (A ^ B).
1183 void eor(const Register& rd, const Register& rn, const Operand& operand);
1185 // Bitwise enor/xnor (A ^ ~B).
1186 void eon(const Register& rd, const Register& rn, const Operand& operand);
1188 // Logical shift left variable.
1189 void lslv(const Register& rd, const Register& rn, const Register& rm);
1191 // Logical shift right variable.
1192 void lsrv(const Register& rd, const Register& rn, const Register& rm);
1194 // Arithmetic shift right variable.
1195 void asrv(const Register& rd, const Register& rn, const Register& rm);
1197 // Rotate right variable.
1198 void rorv(const Register& rd, const Register& rn, const Register& rm);
1200 // Bitfield instructions.
1202 void bfm(const Register& rd,
1207 // Signed bitfield move.
1208 void sbfm(const Register& rd,
1213 // Unsigned bitfield move.
1214 void ubfm(const Register& rd,
1221 void bfi(const Register& rd,
1226 DCHECK(lsb + width <= rn.SizeInBits());
1227 bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1230 // Bitfield extract and insert low.
1231 void bfxil(const Register& rd,
1236 DCHECK(lsb + width <= rn.SizeInBits());
1237 bfm(rd, rn, lsb, lsb + width - 1);
1241 // Arithmetic shift right.
1242 void asr(const Register& rd, const Register& rn, unsigned shift) {
1243 DCHECK(shift < rd.SizeInBits());
1244 sbfm(rd, rn, shift, rd.SizeInBits() - 1);
1247 // Signed bitfield insert in zero.
1248 void sbfiz(const Register& rd,
1253 DCHECK(lsb + width <= rn.SizeInBits());
1254 sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1257 // Signed bitfield extract.
1258 void sbfx(const Register& rd,
1263 DCHECK(lsb + width <= rn.SizeInBits());
1264 sbfm(rd, rn, lsb, lsb + width - 1);
1267 // Signed extend byte.
1268 void sxtb(const Register& rd, const Register& rn) {
1272 // Signed extend halfword.
1273 void sxth(const Register& rd, const Register& rn) {
1274 sbfm(rd, rn, 0, 15);
1277 // Signed extend word.
1278 void sxtw(const Register& rd, const Register& rn) {
1279 sbfm(rd, rn, 0, 31);
1283 // Logical shift left.
1284 void lsl(const Register& rd, const Register& rn, unsigned shift) {
1285 unsigned reg_size = rd.SizeInBits();
1286 DCHECK(shift < reg_size);
1287 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
1290 // Logical shift right.
1291 void lsr(const Register& rd, const Register& rn, unsigned shift) {
1292 DCHECK(shift < rd.SizeInBits());
1293 ubfm(rd, rn, shift, rd.SizeInBits() - 1);
1296 // Unsigned bitfield insert in zero.
1297 void ubfiz(const Register& rd,
1302 DCHECK(lsb + width <= rn.SizeInBits());
1303 ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1306 // Unsigned bitfield extract.
1307 void ubfx(const Register& rd,
1312 DCHECK(lsb + width <= rn.SizeInBits());
1313 ubfm(rd, rn, lsb, lsb + width - 1);
1316 // Unsigned extend byte.
1317 void uxtb(const Register& rd, const Register& rn) {
1321 // Unsigned extend halfword.
1322 void uxth(const Register& rd, const Register& rn) {
1323 ubfm(rd, rn, 0, 15);
1326 // Unsigned extend word.
1327 void uxtw(const Register& rd, const Register& rn) {
1328 ubfm(rd, rn, 0, 31);
1332 void extr(const Register& rd,
1337 // Conditional select: rd = cond ? rn : rm.
1338 void csel(const Register& rd,
1343 // Conditional select increment: rd = cond ? rn : rm + 1.
1344 void csinc(const Register& rd,
1349 // Conditional select inversion: rd = cond ? rn : ~rm.
1350 void csinv(const Register& rd,
1355 // Conditional select negation: rd = cond ? rn : -rm.
1356 void csneg(const Register& rd,
1361 // Conditional set: rd = cond ? 1 : 0.
1362 void cset(const Register& rd, Condition cond);
1364 // Conditional set minus: rd = cond ? -1 : 0.
1365 void csetm(const Register& rd, Condition cond);
1367 // Conditional increment: rd = cond ? rn + 1 : rn.
1368 void cinc(const Register& rd, const Register& rn, Condition cond);
1370 // Conditional invert: rd = cond ? ~rn : rn.
1371 void cinv(const Register& rd, const Register& rn, Condition cond);
1373 // Conditional negate: rd = cond ? -rn : rn.
1374 void cneg(const Register& rd, const Register& rn, Condition cond);
1377 void ror(const Register& rd, const Register& rs, unsigned shift) {
1378 extr(rd, rs, rs, shift);
1381 // Conditional comparison.
1382 // Conditional compare negative.
1383 void ccmn(const Register& rn,
1384 const Operand& operand,
1388 // Conditional compare.
1389 void ccmp(const Register& rn,
1390 const Operand& operand,
1395 // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
1396 void mul(const Register& rd, const Register& rn, const Register& rm);
1398 // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
1399 void madd(const Register& rd,
1402 const Register& ra);
1404 // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
1405 void mneg(const Register& rd, const Register& rn, const Register& rm);
1407 // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
1408 void msub(const Register& rd,
1411 const Register& ra);
1413 // 32 x 32 -> 64-bit multiply.
1414 void smull(const Register& rd, const Register& rn, const Register& rm);
1416 // Xd = bits<127:64> of Xn * Xm.
1417 void smulh(const Register& rd, const Register& rn, const Register& rm);
1419 // Signed 32 x 32 -> 64-bit multiply and accumulate.
1420 void smaddl(const Register& rd,
1423 const Register& ra);
1425 // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
1426 void umaddl(const Register& rd,
1429 const Register& ra);
1431 // Signed 32 x 32 -> 64-bit multiply and subtract.
1432 void smsubl(const Register& rd,
1435 const Register& ra);
1437 // Unsigned 32 x 32 -> 64-bit multiply and subtract.
1438 void umsubl(const Register& rd,
1441 const Register& ra);
1443 // Signed integer divide.
1444 void sdiv(const Register& rd, const Register& rn, const Register& rm);
1446 // Unsigned integer divide.
1447 void udiv(const Register& rd, const Register& rn, const Register& rm);
1449 // Bit count, bit reverse and endian reverse.
1450 void rbit(const Register& rd, const Register& rn);
1451 void rev16(const Register& rd, const Register& rn);
1452 void rev32(const Register& rd, const Register& rn);
1453 void rev(const Register& rd, const Register& rn);
1454 void clz(const Register& rd, const Register& rn);
1455 void cls(const Register& rd, const Register& rn);
1457 // Memory instructions.
1459 // Load integer or FP register.
1460 void ldr(const CPURegister& rt, const MemOperand& src);
1462 // Store integer or FP register.
1463 void str(const CPURegister& rt, const MemOperand& dst);
1465 // Load word with sign extension.
1466 void ldrsw(const Register& rt, const MemOperand& src);
1469 void ldrb(const Register& rt, const MemOperand& src);
1472 void strb(const Register& rt, const MemOperand& dst);
1474 // Load byte with sign extension.
1475 void ldrsb(const Register& rt, const MemOperand& src);
1478 void ldrh(const Register& rt, const MemOperand& src);
1481 void strh(const Register& rt, const MemOperand& dst);
1483 // Load half-word with sign extension.
1484 void ldrsh(const Register& rt, const MemOperand& src);
1486 // Load integer or FP register pair.
1487 void ldp(const CPURegister& rt, const CPURegister& rt2,
1488 const MemOperand& src);
1490 // Store integer or FP register pair.
1491 void stp(const CPURegister& rt, const CPURegister& rt2,
1492 const MemOperand& dst);
1494 // Load word pair with sign extension.
1495 void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
1497 // Load integer or FP register pair, non-temporal.
1498 void ldnp(const CPURegister& rt, const CPURegister& rt2,
1499 const MemOperand& src);
1501 // Store integer or FP register pair, non-temporal.
1502 void stnp(const CPURegister& rt, const CPURegister& rt2,
1503 const MemOperand& dst);
1505 // Load literal to register from a pc relative address.
1506 void ldr_pcrel(const CPURegister& rt, int imm19);
1508 // Load literal to register.
1509 void ldr(const CPURegister& rt, const Immediate& imm);
1511 // Move instructions. The default shift of -1 indicates that the move
1512 // instruction will calculate an appropriate 16-bit immediate and left shift
1513 // that is equal to the 64-bit immediate argument. If an explicit left shift
1514 // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
1516 // For movk, an explicit shift can be used to indicate which half word should
1517 // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
1518 // half word with zero, whereas movk(x0, 0, 48) will overwrite the
1519 // most-significant.
1522 void movk(const Register& rd, uint64_t imm, int shift = -1) {
1523 MoveWide(rd, imm, shift, MOVK);
1526 // Move with non-zero.
1527 void movn(const Register& rd, uint64_t imm, int shift = -1) {
1528 MoveWide(rd, imm, shift, MOVN);
1532 void movz(const Register& rd, uint64_t imm, int shift = -1) {
1533 MoveWide(rd, imm, shift, MOVZ);
1536 // Misc instructions.
1537 // Monitor debug-mode breakpoint.
1540 // Halting debug-mode breakpoint.
1543 // Move register to register.
1544 void mov(const Register& rd, const Register& rn);
1546 // Move NOT(operand) to register.
1547 void mvn(const Register& rd, const Operand& operand);
1549 // System instructions.
1550 // Move to register from system register.
1551 void mrs(const Register& rt, SystemRegister sysreg);
1553 // Move from register to system register.
1554 void msr(SystemRegister sysreg, const Register& rt);
1557 void hint(SystemHint code);
1559 // Data memory barrier
1560 void dmb(BarrierDomain domain, BarrierType type);
1562 // Data synchronization barrier
1563 void dsb(BarrierDomain domain, BarrierType type);
1565 // Instruction synchronization barrier
1568 // Alias for system instructions.
1569 void nop() { hint(NOP); }
1571 // Different nop operations are used by the code generator to detect certain
1572 // states of the generated code.
1573 enum NopMarkerTypes {
1577 FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
1578 LAST_NOP_MARKER = ADR_FAR_NOP
1581 void nop(NopMarkerTypes n) {
1582 DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
1583 mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
1587 // Move immediate to FP register.
1588 void fmov(FPRegister fd, double imm);
1589 void fmov(FPRegister fd, float imm);
1591 // Move FP register to register.
1592 void fmov(Register rd, FPRegister fn);
1594 // Move register to FP register.
1595 void fmov(FPRegister fd, Register rn);
1597 // Move FP register to FP register.
1598 void fmov(FPRegister fd, FPRegister fn);
1601 void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1604 void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1607 void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1609 // FP fused multiply and add.
1610 void fmadd(const FPRegister& fd,
1611 const FPRegister& fn,
1612 const FPRegister& fm,
1613 const FPRegister& fa);
1615 // FP fused multiply and subtract.
1616 void fmsub(const FPRegister& fd,
1617 const FPRegister& fn,
1618 const FPRegister& fm,
1619 const FPRegister& fa);
1621 // FP fused multiply, add and negate.
1622 void fnmadd(const FPRegister& fd,
1623 const FPRegister& fn,
1624 const FPRegister& fm,
1625 const FPRegister& fa);
1627 // FP fused multiply, subtract and negate.
1628 void fnmsub(const FPRegister& fd,
1629 const FPRegister& fn,
1630 const FPRegister& fm,
1631 const FPRegister& fa);
1634 void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1637 void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1640 void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1643 void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1646 void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1649 void fabs(const FPRegister& fd, const FPRegister& fn);
1652 void fneg(const FPRegister& fd, const FPRegister& fn);
1655 void fsqrt(const FPRegister& fd, const FPRegister& fn);
1657 // FP round to integer (nearest with ties to away).
1658 void frinta(const FPRegister& fd, const FPRegister& fn);
1660 // FP round to integer (toward minus infinity).
1661 void frintm(const FPRegister& fd, const FPRegister& fn);
1663 // FP round to integer (nearest with ties to even).
1664 void frintn(const FPRegister& fd, const FPRegister& fn);
1666 // FP round to integer (towards plus infinity).
1667 void frintp(const FPRegister& fd, const FPRegister& fn);
1669 // FP round to integer (towards zero.)
1670 void frintz(const FPRegister& fd, const FPRegister& fn);
1672 // FP compare registers.
1673 void fcmp(const FPRegister& fn, const FPRegister& fm);
1675 // FP compare immediate.
1676 void fcmp(const FPRegister& fn, double value);
1678 // FP conditional compare.
1679 void fccmp(const FPRegister& fn,
1680 const FPRegister& fm,
1684 // FP conditional select.
1685 void fcsel(const FPRegister& fd,
1686 const FPRegister& fn,
1687 const FPRegister& fm,
1690 // Common FP Convert function
1691 void FPConvertToInt(const Register& rd,
1692 const FPRegister& fn,
1693 FPIntegerConvertOp op);
1695 // FP convert between single and double precision.
1696 void fcvt(const FPRegister& fd, const FPRegister& fn);
1698 // Convert FP to unsigned integer (nearest with ties to away).
1699 void fcvtau(const Register& rd, const FPRegister& fn);
1701 // Convert FP to signed integer (nearest with ties to away).
1702 void fcvtas(const Register& rd, const FPRegister& fn);
1704 // Convert FP to unsigned integer (round towards -infinity).
1705 void fcvtmu(const Register& rd, const FPRegister& fn);
1707 // Convert FP to signed integer (round towards -infinity).
1708 void fcvtms(const Register& rd, const FPRegister& fn);
1710 // Convert FP to unsigned integer (nearest with ties to even).
1711 void fcvtnu(const Register& rd, const FPRegister& fn);
1713 // Convert FP to signed integer (nearest with ties to even).
1714 void fcvtns(const Register& rd, const FPRegister& fn);
1716 // Convert FP to unsigned integer (round towards zero).
1717 void fcvtzu(const Register& rd, const FPRegister& fn);
1719 // Convert FP to signed integer (rounf towards zero).
1720 void fcvtzs(const Register& rd, const FPRegister& fn);
1722 // Convert signed integer or fixed point to FP.
1723 void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1725 // Convert unsigned integer or fixed point to FP.
1726 void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1728 // Instruction functions used only for test, debug, and patching.
1729 // Emit raw instructions in the instruction stream.
1730 void dci(Instr raw_inst) { Emit(raw_inst); }
1732 // Emit 8 bits of data in the instruction stream.
1733 void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
1735 // Emit 32 bits of data in the instruction stream.
1736 void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
1738 // Emit 64 bits of data in the instruction stream.
1739 void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
1741 // Copy a string into the instruction stream, including the terminating NULL
1742 // character. The instruction pointer (pc_) is then aligned correctly for
1743 // subsequent instructions.
1744 void EmitStringData(const char* string);
1746 // Pseudo-instructions ------------------------------------------------------
1748 // Parameters are described in arm64/instructions-arm64.h.
1749 void debug(const char* message, uint32_t code, Instr params = BREAK);
1752 void dd(uint32_t data) { dc32(data); }
1753 void db(uint8_t data) { dc8(data); }
1755 // Code generation helpers --------------------------------------------------
1757 bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
1759 Instruction* pc() const { return Instruction::Cast(pc_); }
1761 Instruction* InstructionAt(int offset) const {
1762 return reinterpret_cast<Instruction*>(buffer_ + offset);
1765 ptrdiff_t InstructionOffset(Instruction* instr) const {
1766 return reinterpret_cast<byte*>(instr) - buffer_;
1769 // Register encoding.
1770 static Instr Rd(CPURegister rd) {
1771 DCHECK(rd.code() != kSPRegInternalCode);
1772 return rd.code() << Rd_offset;
1775 static Instr Rn(CPURegister rn) {
1776 DCHECK(rn.code() != kSPRegInternalCode);
1777 return rn.code() << Rn_offset;
1780 static Instr Rm(CPURegister rm) {
1781 DCHECK(rm.code() != kSPRegInternalCode);
1782 return rm.code() << Rm_offset;
1785 static Instr Ra(CPURegister ra) {
1786 DCHECK(ra.code() != kSPRegInternalCode);
1787 return ra.code() << Ra_offset;
1790 static Instr Rt(CPURegister rt) {
1791 DCHECK(rt.code() != kSPRegInternalCode);
1792 return rt.code() << Rt_offset;
1795 static Instr Rt2(CPURegister rt2) {
1796 DCHECK(rt2.code() != kSPRegInternalCode);
1797 return rt2.code() << Rt2_offset;
1800 // These encoding functions allow the stack pointer to be encoded, and
1801 // disallow the zero register.
1802 static Instr RdSP(Register rd) {
1803 DCHECK(!rd.IsZero());
1804 return (rd.code() & kRegCodeMask) << Rd_offset;
1807 static Instr RnSP(Register rn) {
1808 DCHECK(!rn.IsZero());
1809 return (rn.code() & kRegCodeMask) << Rn_offset;
1813 inline static Instr Flags(FlagsUpdate S);
1814 inline static Instr Cond(Condition cond);
1816 // PC-relative address encoding.
1817 inline static Instr ImmPCRelAddress(int imm21);
1820 inline static Instr ImmUncondBranch(int imm26);
1821 inline static Instr ImmCondBranch(int imm19);
1822 inline static Instr ImmCmpBranch(int imm19);
1823 inline static Instr ImmTestBranch(int imm14);
1824 inline static Instr ImmTestBranchBit(unsigned bit_pos);
1826 // Data Processing encoding.
1827 inline static Instr SF(Register rd);
1828 inline static Instr ImmAddSub(int64_t imm);
1829 inline static Instr ImmS(unsigned imms, unsigned reg_size);
1830 inline static Instr ImmR(unsigned immr, unsigned reg_size);
1831 inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
1832 inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
1833 inline static Instr ImmLLiteral(int imm19);
1834 inline static Instr BitN(unsigned bitn, unsigned reg_size);
1835 inline static Instr ShiftDP(Shift shift);
1836 inline static Instr ImmDPShift(unsigned amount);
1837 inline static Instr ExtendMode(Extend extend);
1838 inline static Instr ImmExtendShift(unsigned left_shift);
1839 inline static Instr ImmCondCmp(unsigned imm);
1840 inline static Instr Nzcv(StatusFlags nzcv);
1842 static bool IsImmAddSub(int64_t immediate);
1843 static bool IsImmLogical(uint64_t value,
1849 // MemOperand offset encoding.
1850 inline static Instr ImmLSUnsigned(int imm12);
1851 inline static Instr ImmLS(int imm9);
1852 inline static Instr ImmLSPair(int imm7, LSDataSize size);
1853 inline static Instr ImmShiftLS(unsigned shift_amount);
1854 inline static Instr ImmException(int imm16);
1855 inline static Instr ImmSystemRegister(int imm15);
1856 inline static Instr ImmHint(int imm7);
1857 inline static Instr ImmBarrierDomain(int imm2);
1858 inline static Instr ImmBarrierType(int imm2);
1859 inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
1861 static bool IsImmLSUnscaled(int64_t offset);
1862 static bool IsImmLSScaled(int64_t offset, LSDataSize size);
1864 // Move immediates encoding.
1865 inline static Instr ImmMoveWide(uint64_t imm);
1866 inline static Instr ShiftMoveWide(int64_t shift);
1869 static Instr ImmFP32(float imm);
1870 static Instr ImmFP64(double imm);
1871 inline static Instr FPScale(unsigned scale);
1873 // FP register type.
1874 inline static Instr FPType(FPRegister fd);
1876 // Class for scoping postponing the constant pool generation.
1877 class BlockConstPoolScope {
1879 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1880 assem_->StartBlockConstPool();
1882 ~BlockConstPoolScope() {
1883 assem_->EndBlockConstPool();
1889 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1892 // Check if is time to emit a constant pool.
1893 void CheckConstPool(bool force_emit, bool require_jump);
1895 // Allocate a constant pool of the correct size for the generated code.
1896 Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
1898 // Generate the constant pool for the generated code.
1899 void PopulateConstantPool(ConstantPoolArray* constant_pool);
1901 // Returns true if we should emit a veneer as soon as possible for a branch
1902 // which can at most reach to specified pc.
1903 bool ShouldEmitVeneer(int max_reachable_pc,
1904 int margin = kVeneerDistanceMargin);
1905 bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
1906 return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
1909 // The maximum code size generated for a veneer. Currently one branch
1910 // instruction. This is for code size checking purposes, and can be extended
1911 // in the future for example if we decide to add nops between the veneers.
1912 static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
1914 void RecordVeneerPool(int location_offset, int size);
1915 // Emits veneers for branches that are approaching their maximum range.
1916 // If need_protection is true, the veneers are protected by a branch jumping
1918 void EmitVeneers(bool force_emit, bool need_protection,
1919 int margin = kVeneerDistanceMargin);
1920 void EmitVeneersGuard() { EmitPoolGuard(); }
1921 // Checks whether veneers need to be emitted at this point.
1922 // If force_emit is set, a veneer is generated for *all* unresolved branches.
1923 void CheckVeneerPool(bool force_emit, bool require_jump,
1924 int margin = kVeneerDistanceMargin);
1926 class BlockPoolsScope {
1928 explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
1929 assem_->StartBlockPools();
1931 ~BlockPoolsScope() {
1932 assem_->EndBlockPools();
1938 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
1942 inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
1944 void LoadStore(const CPURegister& rt,
1945 const MemOperand& addr,
1948 void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
1949 const MemOperand& addr, LoadStorePairOp op);
1950 static bool IsImmLSPair(int64_t offset, LSDataSize size);
1952 void Logical(const Register& rd,
1954 const Operand& operand,
1956 void LogicalImmediate(const Register& rd,
1963 void ConditionalCompare(const Register& rn,
1964 const Operand& operand,
1967 ConditionalCompareOp op);
1968 static bool IsImmConditionalCompare(int64_t immediate);
1970 void AddSubWithCarry(const Register& rd,
1972 const Operand& operand,
1974 AddSubWithCarryOp op);
1976 // Functions for emulating operands not directly supported by the instruction
1978 void EmitShift(const Register& rd,
1982 void EmitExtendShift(const Register& rd,
1985 unsigned left_shift);
1987 void AddSub(const Register& rd,
1989 const Operand& operand,
1993 static bool IsImmFP32(float imm);
1994 static bool IsImmFP64(double imm);
1996 // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
1997 // registers. Only simple loads are supported; sign- and zero-extension (such
1998 // as in LDPSW_x or LDRB_w) are not supported.
1999 static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
2000 static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
2001 const CPURegister& rt2);
2002 static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
2003 static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
2004 const CPURegister& rt2);
2005 static inline LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
2006 const CPURegister& rt, const CPURegister& rt2);
2007 static inline LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
2008 const CPURegister& rt, const CPURegister& rt2);
2009 static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
2011 // Remove the specified branch from the unbound label link chain.
2012 // If available, a veneer for this label can be used for other branches in the
2013 // chain if the link chain cannot be fixed up without this branch.
2014 void RemoveBranchFromLabelLinkChain(Instruction* branch,
2016 Instruction* label_veneer = NULL);
2019 // Instruction helpers.
2020 void MoveWide(const Register& rd,
2023 MoveWideImmediateOp mov_op);
2024 void DataProcShiftedRegister(const Register& rd,
2026 const Operand& operand,
2029 void DataProcExtendedRegister(const Register& rd,
2031 const Operand& operand,
2034 void LoadStorePairNonTemporal(const CPURegister& rt,
2035 const CPURegister& rt2,
2036 const MemOperand& addr,
2037 LoadStorePairNonTemporalOp op);
2038 void ConditionalSelect(const Register& rd,
2042 ConditionalSelectOp op);
2043 void DataProcessing1Source(const Register& rd,
2045 DataProcessing1SourceOp op);
2046 void DataProcessing3Source(const Register& rd,
2050 DataProcessing3SourceOp op);
2051 void FPDataProcessing1Source(const FPRegister& fd,
2052 const FPRegister& fn,
2053 FPDataProcessing1SourceOp op);
2054 void FPDataProcessing2Source(const FPRegister& fd,
2055 const FPRegister& fn,
2056 const FPRegister& fm,
2057 FPDataProcessing2SourceOp op);
2058 void FPDataProcessing3Source(const FPRegister& fd,
2059 const FPRegister& fn,
2060 const FPRegister& fm,
2061 const FPRegister& fa,
2062 FPDataProcessing3SourceOp op);
2066 // Return an offset for a label-referencing instruction, typically a branch.
2067 int LinkAndGetByteOffsetTo(Label* label);
2069 // This is the same as LinkAndGetByteOffsetTo, but return an offset
2070 // suitable for fields that take instruction offsets.
2071 inline int LinkAndGetInstructionOffsetTo(Label* label);
2073 static const int kStartOfLabelLinkChain = 0;
2075 // Verify that a label's link chain is intact.
2076 void CheckLabelLinkChain(Label const * label);
2078 void RecordLiteral(int64_t imm, unsigned size);
2080 // Postpone the generation of the constant pool for the specified number of
2082 void BlockConstPoolFor(int instructions);
2084 // Set how far from current pc the next constant pool check will be.
2085 void SetNextConstPoolCheckIn(int instructions) {
2086 next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
2089 // Emit the instruction at pc_.
2090 void Emit(Instr instruction) {
2091 STATIC_ASSERT(sizeof(*pc_) == 1);
2092 STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
2093 DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
2095 memcpy(pc_, &instruction, sizeof(instruction));
2096 pc_ += sizeof(instruction);
2100 // Emit data inline in the instruction stream.
2101 void EmitData(void const * data, unsigned size) {
2102 DCHECK(sizeof(*pc_) == 1);
2103 DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
2105 // TODO(all): Somehow register we have some data here. Then we can
2106 // disassemble it correctly.
2107 memcpy(pc_, data, size);
2113 void CheckBufferSpace();
2116 // Pc offset of the next constant pool check.
2117 int next_constant_pool_check_;
2119 // Constant pool generation
2120 // Pools are emitted in the instruction stream. They are emitted when:
2121 // * the distance to the first use is above a pre-defined distance or
2122 // * the numbers of entries in the pool is above a pre-defined size or
2123 // * code generation is finished
2124 // If a pool needs to be emitted before code generation is finished a branch
2125 // over the emitted pool will be inserted.
2127 // Constants in the pool may be addresses of functions that gets relocated;
2128 // if so, a relocation info entry is associated to the constant pool entry.
2130 // Repeated checking whether the constant pool should be emitted is rather
2131 // expensive. By default we only check again once a number of instructions
2132 // has been generated. That also means that the sizing of the buffers is not
2133 // an exact science, and that we rely on some slop to not overrun buffers.
2134 static const int kCheckConstPoolInterval = 128;
2136 // Distance to first use after a which a pool will be emitted. Pool entries
2137 // are accessed with pc relative load therefore this cannot be more than
2138 // 1 * MB. Since constant pool emission checks are interval based this value
2139 // is an approximation.
2140 static const int kApproxMaxDistToConstPool = 64 * KB;
2142 // Number of pool entries after which a pool will be emitted. Since constant
2143 // pool emission checks are interval based this value is an approximation.
2144 static const int kApproxMaxPoolEntryCount = 512;
2146 // Emission of the constant pool may be blocked in some code sequences.
2147 int const_pool_blocked_nesting_; // Block emission if this is not zero.
2148 int no_const_pool_before_; // Block emission before this pc offset.
2150 // Emission of the veneer pools may be blocked in some code sequences.
2151 int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
2153 // Relocation info generation
2154 // Each relocation is encoded as a variable size value
2155 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
2156 RelocInfoWriter reloc_info_writer;
2158 // Relocation info records are also used during code generation as temporary
2159 // containers for constants and code target addresses until they are emitted
2160 // to the constant pool. These pending relocation info records are temporarily
2161 // stored in a separate buffer until a constant pool is emitted.
2162 // If every instruction in a long sequence is accessing the pool, we need one
2163 // pending relocation entry per instruction.
2165 // The pending constant pool.
2166 ConstPool constpool_;
2168 // Relocation for a type-recording IC has the AST id added to it. This
2169 // member variable is a way to pass the information from the call site to
2170 // the relocation info.
2171 TypeFeedbackId recorded_ast_id_;
2173 inline TypeFeedbackId RecordedAstId();
2174 inline void ClearRecordedAstId();
2177 // Record the AST id of the CallIC being compiled, so that it can be placed
2178 // in the relocation information.
2179 void SetRecordedAstId(TypeFeedbackId ast_id) {
2180 DCHECK(recorded_ast_id_.IsNone());
2181 recorded_ast_id_ = ast_id;
2185 // The relocation writer's position is at least kGap bytes below the end of
2186 // the generated instructions. This is so that multi-instruction sequences do
2187 // not have to check for overflow. The same is true for writes of large
2188 // relocation info entries, and debug strings encoded in the instruction
2190 static const int kGap = 128;
2193 class FarBranchInfo {
2195 FarBranchInfo(int offset, Label* label)
2196 : pc_offset_(offset), label_(label) {}
2197 // Offset of the branch in the code generation buffer.
2199 // The label branched to.
2204 // Information about unresolved (forward) branches.
2205 // The Assembler is only allowed to delete out-of-date information from here
2206 // after a label is bound. The MacroAssembler uses this information to
2207 // generate veneers.
2209 // The second member gives information about the unresolved branch. The first
2210 // member of the pair is the maximum offset that the branch can reach in the
2211 // buffer. The map is sorted according to this reachable offset, allowing to
2212 // easily check when veneers need to be emitted.
2213 // Note that the maximum reachable offset (first member of the pairs) should
2214 // always be positive but has the same type as the return value for
2215 // pc_offset() for convenience.
2216 std::multimap<int, FarBranchInfo> unresolved_branches_;
2218 // We generate a veneer for a branch if we reach within this distance of the
2219 // limit of the range.
2220 static const int kVeneerDistanceMargin = 1 * KB;
2221 // The factor of 2 is a finger in the air guess. With a default margin of
2222 // 1KB, that leaves us an addional 256 instructions to avoid generating a
2223 // protective branch.
2224 static const int kVeneerNoProtectionFactor = 2;
2225 static const int kVeneerDistanceCheckMargin =
2226 kVeneerNoProtectionFactor * kVeneerDistanceMargin;
2227 int unresolved_branches_first_limit() const {
2228 DCHECK(!unresolved_branches_.empty());
2229 return unresolved_branches_.begin()->first;
2231 // This is similar to next_constant_pool_check_ and helps reduce the overhead
2232 // of checking for veneer pools.
2233 // It is maintained to the closest unresolved branch limit minus the maximum
2234 // veneer margin (or kMaxInt if there are no unresolved branches).
2235 int next_veneer_pool_check_;
2238 // If a veneer is emitted for a branch instruction, that instruction must be
2239 // removed from the associated label's link chain so that the assembler does
2240 // not later attempt (likely unsuccessfully) to patch it to branch directly to
2242 void DeleteUnresolvedBranchInfoForLabel(Label* label);
2243 // This function deletes the information related to the label by traversing
2244 // the label chain, and for each PC-relative instruction in the chain checking
2245 // if pending unresolved information exists. Its complexity is proportional to
2246 // the length of the label chain.
2247 void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
2250 PositionsRecorder positions_recorder_;
2251 friend class PositionsRecorder;
2252 friend class EnsureSpace;
2253 friend class ConstPool;
2256 class PatchingAssembler : public Assembler {
2258 // Create an Assembler with a buffer starting at 'start'.
2259 // The buffer size is
2260 // size of instructions to patch + kGap
2261 // Where kGap is the distance from which the Assembler tries to grow the
2263 // If more or fewer instructions than expected are generated or if some
2264 // relocation information takes space in the buffer, the PatchingAssembler
2265 // will crash trying to grow the buffer.
2266 PatchingAssembler(Instruction* start, unsigned count)
2268 reinterpret_cast<byte*>(start),
2269 count * kInstructionSize + kGap) {
2273 PatchingAssembler(byte* start, unsigned count)
2274 : Assembler(NULL, start, count * kInstructionSize + kGap) {
2275 // Block constant pool emission.
2279 ~PatchingAssembler() {
2280 // Const pool should still be blocked.
2281 DCHECK(is_const_pool_blocked());
2283 // Verify we have generated the number of instruction we expected.
2284 DCHECK((pc_offset() + kGap) == buffer_size_);
2285 // Verify no relocation information has been emitted.
2286 DCHECK(IsConstPoolEmpty());
2287 // Flush the Instruction cache.
2288 size_t length = buffer_size_ - kGap;
2289 CpuFeatures::FlushICache(buffer_, length);
2292 // See definition of PatchAdrFar() for details.
2293 static const int kAdrFarPatchableNNops = 2;
2294 static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
2295 void PatchAdrFar(int64_t target_offset);
2299 class EnsureSpace BASE_EMBEDDED {
2301 explicit EnsureSpace(Assembler* assembler) {
2302 assembler->CheckBufferSpace();
2306 } } // namespace v8::internal
2308 #endif // V8_ARM64_ASSEMBLER_ARM64_H_