1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_ARM64_ASSEMBLER_ARM64_H_
6 #define V8_ARM64_ASSEMBLER_ARM64_H_
13 #include "src/arm64/instructions-arm64.h"
14 #include "src/assembler.h"
15 #include "src/compiler.h"
16 #include "src/globals.h"
17 #include "src/utils.h"
24 // -----------------------------------------------------------------------------
26 #define REGISTER_CODE_LIST(R) \
27 R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
28 R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
29 R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
30 R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
33 static const int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
36 // Some CPURegister methods can return Register and FPRegister types, so we
37 // need to declare them in advance.
44 // The kInvalid value is used to detect uninitialized static instances,
45 // which are always zero-initialized before any constructors are called.
52 static CPURegister Create(unsigned code, unsigned size, RegisterType type) {
53 CPURegister r = {code, size, type};
57 unsigned code() const;
58 RegisterType type() const;
60 unsigned SizeInBits() const;
61 int SizeInBytes() const;
62 bool Is32Bits() const;
63 bool Is64Bits() const;
65 bool IsValidOrNone() const;
66 bool IsValidRegister() const;
67 bool IsValidFPRegister() const;
69 bool Is(const CPURegister& other) const;
70 bool Aliases(const CPURegister& other) const;
75 bool IsRegister() const;
76 bool IsFPRegister() const;
83 bool IsSameSizeAndType(const CPURegister& other) const;
86 bool is(const CPURegister& other) const { return Is(other); }
87 bool is_valid() const { return IsValid(); }
91 RegisterType reg_type;
95 struct Register : public CPURegister {
96 static Register Create(unsigned code, unsigned size) {
97 return Register(CPURegister::Create(code, size, CPURegister::kRegister));
103 reg_type = CPURegister::kNoRegister;
106 explicit Register(const CPURegister& r) {
107 reg_code = r.reg_code;
108 reg_size = r.reg_size;
109 reg_type = r.reg_type;
110 DCHECK(IsValidOrNone());
113 Register(const Register& r) { // NOLINT(runtime/explicit)
114 reg_code = r.reg_code;
115 reg_size = r.reg_size;
116 reg_type = r.reg_type;
117 DCHECK(IsValidOrNone());
120 bool IsValid() const {
121 DCHECK(IsRegister() || IsNone());
122 return IsValidRegister();
125 static Register XRegFromCode(unsigned code);
126 static Register WRegFromCode(unsigned code);
128 // Start of V8 compatibility section ---------------------
129 // These memebers are necessary for compilation.
130 // A few of them may be unused for now.
132 static const int kNumRegisters = kNumberOfRegisters;
133 static int NumRegisters() { return kNumRegisters; }
135 // We allow crankshaft to use the following registers:
138 // - x27 (also context)
140 // TODO(all): Register x25 is currently free and could be available for
141 // crankshaft, but we don't use it as we might use it as a per function
142 // literal pool pointer in the future.
144 // TODO(all): Consider storing cp in x25 to have only two ranges.
145 // We split allocatable registers in three ranges called
149 static const unsigned kAllocatableLowRangeBegin = 0;
150 static const unsigned kAllocatableLowRangeEnd = 15;
151 static const unsigned kAllocatableHighRangeBegin = 18;
152 static const unsigned kAllocatableHighRangeEnd = 24;
153 static const unsigned kAllocatableContext = 27;
155 // Gap between low and high ranges.
156 static const int kAllocatableRangeGapSize =
157 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
159 static const int kMaxNumAllocatableRegisters =
160 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
161 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1) + 1; // cp
162 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
164 // Return true if the register is one that crankshaft can allocate.
165 bool IsAllocatable() const {
166 return ((reg_code == kAllocatableContext) ||
167 (reg_code <= kAllocatableLowRangeEnd) ||
168 ((reg_code >= kAllocatableHighRangeBegin) &&
169 (reg_code <= kAllocatableHighRangeEnd)));
172 static Register FromAllocationIndex(unsigned index) {
173 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
174 // cp is the last allocatable register.
175 if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
176 return from_code(kAllocatableContext);
179 // Handle low and high ranges.
180 return (index <= kAllocatableLowRangeEnd)
182 : from_code(index + kAllocatableRangeGapSize);
185 static const char* AllocationIndexToString(int index) {
186 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
187 DCHECK((kAllocatableLowRangeBegin == 0) &&
188 (kAllocatableLowRangeEnd == 15) &&
189 (kAllocatableHighRangeBegin == 18) &&
190 (kAllocatableHighRangeEnd == 24) &&
191 (kAllocatableContext == 27));
192 const char* const names[] = {
193 "x0", "x1", "x2", "x3", "x4",
194 "x5", "x6", "x7", "x8", "x9",
195 "x10", "x11", "x12", "x13", "x14",
196 "x15", "x18", "x19", "x20", "x21",
197 "x22", "x23", "x24", "x27",
202 static int ToAllocationIndex(Register reg) {
203 DCHECK(reg.IsAllocatable());
204 unsigned code = reg.code();
205 if (code == kAllocatableContext) {
206 return NumAllocatableRegisters() - 1;
209 return (code <= kAllocatableLowRangeEnd)
211 : code - kAllocatableRangeGapSize;
214 static Register from_code(int code) {
215 // Always return an X register.
216 return Register::Create(code, kXRegSizeInBits);
219 // End of V8 compatibility section -----------------------
223 struct FPRegister : public CPURegister {
224 static FPRegister Create(unsigned code, unsigned size) {
226 CPURegister::Create(code, size, CPURegister::kFPRegister));
232 reg_type = CPURegister::kNoRegister;
235 explicit FPRegister(const CPURegister& r) {
236 reg_code = r.reg_code;
237 reg_size = r.reg_size;
238 reg_type = r.reg_type;
239 DCHECK(IsValidOrNone());
242 FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
243 reg_code = r.reg_code;
244 reg_size = r.reg_size;
245 reg_type = r.reg_type;
246 DCHECK(IsValidOrNone());
249 bool IsValid() const {
250 DCHECK(IsFPRegister() || IsNone());
251 return IsValidFPRegister();
254 static FPRegister SRegFromCode(unsigned code);
255 static FPRegister DRegFromCode(unsigned code);
257 // Start of V8 compatibility section ---------------------
258 static const int kMaxNumRegisters = kNumberOfFPRegisters;
260 // Crankshaft can use all the FP registers except:
261 // - d15 which is used to keep the 0 double value
262 // - d30 which is used in crankshaft as a double scratch register
263 // - d31 which is used in the MacroAssembler as a double scratch register
264 static const unsigned kAllocatableLowRangeBegin = 0;
265 static const unsigned kAllocatableLowRangeEnd = 14;
266 static const unsigned kAllocatableHighRangeBegin = 16;
267 static const unsigned kAllocatableHighRangeEnd = 28;
269 static const RegList kAllocatableFPRegisters = 0x1fff7fff;
271 // Gap between low and high ranges.
272 static const int kAllocatableRangeGapSize =
273 (kAllocatableHighRangeBegin - kAllocatableLowRangeEnd) - 1;
275 static const int kMaxNumAllocatableRegisters =
276 (kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1) +
277 (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
278 static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
280 // TODO(turbofan): Proper float32 support.
281 static int NumAllocatableAliasedRegisters() {
282 return NumAllocatableRegisters();
285 // Return true if the register is one that crankshaft can allocate.
286 bool IsAllocatable() const {
287 return (Bit() & kAllocatableFPRegisters) != 0;
290 static FPRegister FromAllocationIndex(unsigned int index) {
291 DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
293 return (index <= kAllocatableLowRangeEnd)
295 : from_code(index + kAllocatableRangeGapSize);
298 static const char* AllocationIndexToString(int index) {
299 DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
300 DCHECK((kAllocatableLowRangeBegin == 0) &&
301 (kAllocatableLowRangeEnd == 14) &&
302 (kAllocatableHighRangeBegin == 16) &&
303 (kAllocatableHighRangeEnd == 28));
304 const char* const names[] = {
305 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
306 "d8", "d9", "d10", "d11", "d12", "d13", "d14",
307 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
308 "d24", "d25", "d26", "d27", "d28"
313 static int ToAllocationIndex(FPRegister reg) {
314 DCHECK(reg.IsAllocatable());
315 unsigned code = reg.code();
317 return (code <= kAllocatableLowRangeEnd)
319 : code - kAllocatableRangeGapSize;
322 static FPRegister from_code(int code) {
323 // Always return a D register.
324 return FPRegister::Create(code, kDRegSizeInBits);
326 // End of V8 compatibility section -----------------------
330 STATIC_ASSERT(sizeof(CPURegister) == sizeof(Register));
331 STATIC_ASSERT(sizeof(CPURegister) == sizeof(FPRegister));
334 #if defined(ARM64_DEFINE_REG_STATICS)
335 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
336 const CPURegister init_##register_class##_##name = {code, size, type}; \
337 const register_class& name = *reinterpret_cast<const register_class*>( \
338 &init_##register_class##_##name)
339 #define ALIAS_REGISTER(register_class, alias, name) \
340 const register_class& alias = *reinterpret_cast<const register_class*>( \
341 &init_##register_class##_##name)
343 #define INITIALIZE_REGISTER(register_class, name, code, size, type) \
344 extern const register_class& name
345 #define ALIAS_REGISTER(register_class, alias, name) \
346 extern const register_class& alias
347 #endif // defined(ARM64_DEFINE_REG_STATICS)
349 // No*Reg is used to indicate an unused argument, or an error case. Note that
350 // these all compare equal (using the Is() method). The Register and FPRegister
351 // variants are provided for convenience.
352 INITIALIZE_REGISTER(Register, NoReg, 0, 0, CPURegister::kNoRegister);
353 INITIALIZE_REGISTER(FPRegister, NoFPReg, 0, 0, CPURegister::kNoRegister);
354 INITIALIZE_REGISTER(CPURegister, NoCPUReg, 0, 0, CPURegister::kNoRegister);
357 INITIALIZE_REGISTER(Register, no_reg, 0, 0, CPURegister::kNoRegister);
359 #define DEFINE_REGISTERS(N) \
360 INITIALIZE_REGISTER(Register, w##N, N, \
361 kWRegSizeInBits, CPURegister::kRegister); \
362 INITIALIZE_REGISTER(Register, x##N, N, \
363 kXRegSizeInBits, CPURegister::kRegister);
364 REGISTER_CODE_LIST(DEFINE_REGISTERS)
365 #undef DEFINE_REGISTERS
367 INITIALIZE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits,
368 CPURegister::kRegister);
369 INITIALIZE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits,
370 CPURegister::kRegister);
372 #define DEFINE_FPREGISTERS(N) \
373 INITIALIZE_REGISTER(FPRegister, s##N, N, \
374 kSRegSizeInBits, CPURegister::kFPRegister); \
375 INITIALIZE_REGISTER(FPRegister, d##N, N, \
376 kDRegSizeInBits, CPURegister::kFPRegister);
377 REGISTER_CODE_LIST(DEFINE_FPREGISTERS)
378 #undef DEFINE_FPREGISTERS
380 #undef INITIALIZE_REGISTER
382 // Registers aliases.
383 ALIAS_REGISTER(Register, ip0, x16);
384 ALIAS_REGISTER(Register, ip1, x17);
385 ALIAS_REGISTER(Register, wip0, w16);
386 ALIAS_REGISTER(Register, wip1, w17);
388 ALIAS_REGISTER(Register, root, x26);
389 ALIAS_REGISTER(Register, rr, x26);
390 // Context pointer register.
391 ALIAS_REGISTER(Register, cp, x27);
392 // We use a register as a JS stack pointer to overcome the restriction on the
393 // architectural SP alignment.
394 // We chose x28 because it is contiguous with the other specific purpose
396 STATIC_ASSERT(kJSSPCode == 28);
397 ALIAS_REGISTER(Register, jssp, x28);
398 ALIAS_REGISTER(Register, wjssp, w28);
399 ALIAS_REGISTER(Register, fp, x29);
400 ALIAS_REGISTER(Register, lr, x30);
401 ALIAS_REGISTER(Register, xzr, x31);
402 ALIAS_REGISTER(Register, wzr, w31);
404 // Keeps the 0 double value.
405 ALIAS_REGISTER(FPRegister, fp_zero, d15);
406 // Crankshaft double scratch register.
407 ALIAS_REGISTER(FPRegister, crankshaft_fp_scratch, d29);
408 // MacroAssembler double scratch registers.
409 ALIAS_REGISTER(FPRegister, fp_scratch, d30);
410 ALIAS_REGISTER(FPRegister, fp_scratch1, d30);
411 ALIAS_REGISTER(FPRegister, fp_scratch2, d31);
413 #undef ALIAS_REGISTER
416 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
417 Register reg2 = NoReg,
418 Register reg3 = NoReg,
419 Register reg4 = NoReg);
422 // AreAliased returns true if any of the named registers overlap. Arguments set
423 // to NoReg are ignored. The system stack pointer may be specified.
424 bool AreAliased(const CPURegister& reg1,
425 const CPURegister& reg2,
426 const CPURegister& reg3 = NoReg,
427 const CPURegister& reg4 = NoReg,
428 const CPURegister& reg5 = NoReg,
429 const CPURegister& reg6 = NoReg,
430 const CPURegister& reg7 = NoReg,
431 const CPURegister& reg8 = NoReg);
433 // AreSameSizeAndType returns true if all of the specified registers have the
434 // same size, and are of the same type. The system stack pointer may be
435 // specified. Arguments set to NoReg are ignored, as are any subsequent
436 // arguments. At least one argument (reg1) must be valid (not NoCPUReg).
437 bool AreSameSizeAndType(const CPURegister& reg1,
438 const CPURegister& reg2,
439 const CPURegister& reg3 = NoCPUReg,
440 const CPURegister& reg4 = NoCPUReg,
441 const CPURegister& reg5 = NoCPUReg,
442 const CPURegister& reg6 = NoCPUReg,
443 const CPURegister& reg7 = NoCPUReg,
444 const CPURegister& reg8 = NoCPUReg);
447 typedef FPRegister DoubleRegister;
450 // -----------------------------------------------------------------------------
451 // Lists of registers.
454 explicit CPURegList(CPURegister reg1,
455 CPURegister reg2 = NoCPUReg,
456 CPURegister reg3 = NoCPUReg,
457 CPURegister reg4 = NoCPUReg)
458 : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
459 size_(reg1.SizeInBits()), type_(reg1.type()) {
460 DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
464 CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
465 : list_(list), size_(size), type_(type) {
469 CPURegList(CPURegister::RegisterType type, unsigned size,
470 unsigned first_reg, unsigned last_reg)
471 : size_(size), type_(type) {
472 DCHECK(((type == CPURegister::kRegister) &&
473 (last_reg < kNumberOfRegisters)) ||
474 ((type == CPURegister::kFPRegister) &&
475 (last_reg < kNumberOfFPRegisters)));
476 DCHECK(last_reg >= first_reg);
477 list_ = (1UL << (last_reg + 1)) - 1;
478 list_ &= ~((1UL << first_reg) - 1);
482 CPURegister::RegisterType type() const {
487 RegList list() const {
492 inline void set_list(RegList new_list) {
497 // Combine another CPURegList into this one. Registers that already exist in
498 // this list are left unchanged. The type and size of the registers in the
499 // 'other' list must match those in this list.
500 void Combine(const CPURegList& other);
502 // Remove every register in the other CPURegList from this one. Registers that
503 // do not exist in this list are ignored. The type of the registers in the
504 // 'other' list must match those in this list.
505 void Remove(const CPURegList& other);
507 // Variants of Combine and Remove which take CPURegisters.
508 void Combine(const CPURegister& other);
509 void Remove(const CPURegister& other1,
510 const CPURegister& other2 = NoCPUReg,
511 const CPURegister& other3 = NoCPUReg,
512 const CPURegister& other4 = NoCPUReg);
514 // Variants of Combine and Remove which take a single register by its code;
515 // the type and size of the register is inferred from this list.
516 void Combine(int code);
517 void Remove(int code);
519 // Remove all callee-saved registers from the list. This can be useful when
520 // preparing registers for an AAPCS64 function call, for example.
521 void RemoveCalleeSaved();
523 CPURegister PopLowestIndex();
524 CPURegister PopHighestIndex();
526 // AAPCS64 callee-saved registers.
527 static CPURegList GetCalleeSaved(unsigned size = kXRegSizeInBits);
528 static CPURegList GetCalleeSavedFP(unsigned size = kDRegSizeInBits);
530 // AAPCS64 caller-saved registers. Note that this includes lr.
531 static CPURegList GetCallerSaved(unsigned size = kXRegSizeInBits);
532 static CPURegList GetCallerSavedFP(unsigned size = kDRegSizeInBits);
534 // Registers saved as safepoints.
535 static CPURegList GetSafepointSavedRegisters();
537 bool IsEmpty() const {
542 bool IncludesAliasOf(const CPURegister& other1,
543 const CPURegister& other2 = NoCPUReg,
544 const CPURegister& other3 = NoCPUReg,
545 const CPURegister& other4 = NoCPUReg) const {
548 if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
549 if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
550 if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
551 if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
552 return (list_ & list) != 0;
557 return CountSetBits(list_, kRegListSizeInBits);
560 unsigned RegisterSizeInBits() const {
565 unsigned RegisterSizeInBytes() const {
566 int size_in_bits = RegisterSizeInBits();
567 DCHECK((size_in_bits % kBitsPerByte) == 0);
568 return size_in_bits / kBitsPerByte;
571 unsigned TotalSizeInBytes() const {
573 return RegisterSizeInBytes() * Count();
579 CPURegister::RegisterType type_;
581 bool IsValid() const {
582 const RegList kValidRegisters = 0x8000000ffffffff;
583 const RegList kValidFPRegisters = 0x0000000ffffffff;
585 case CPURegister::kRegister:
586 return (list_ & kValidRegisters) == list_;
587 case CPURegister::kFPRegister:
588 return (list_ & kValidFPRegisters) == list_;
589 case CPURegister::kNoRegister:
599 // AAPCS64 callee-saved registers.
600 #define kCalleeSaved CPURegList::GetCalleeSaved()
601 #define kCalleeSavedFP CPURegList::GetCalleeSavedFP()
604 // AAPCS64 caller-saved registers. Note that this includes lr.
605 #define kCallerSaved CPURegList::GetCallerSaved()
606 #define kCallerSavedFP CPURegList::GetCallerSavedFP()
608 // -----------------------------------------------------------------------------
613 inline explicit Immediate(Handle<T> handle);
615 // This is allowed to be an implicit constructor because Immediate is
616 // a wrapper class that doesn't normally perform any type conversion.
618 inline Immediate(T value); // NOLINT(runtime/explicit)
621 inline Immediate(T value, RelocInfo::Mode rmode);
623 int64_t value() const { return value_; }
624 RelocInfo::Mode rmode() const { return rmode_; }
627 void InitializeHandle(Handle<Object> value);
630 RelocInfo::Mode rmode_;
634 // -----------------------------------------------------------------------------
636 const int kSmiShift = kSmiTagSize + kSmiShiftSize;
637 const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
639 // Represents an operand in a machine instruction.
641 // TODO(all): If necessary, study more in details which methods
642 // TODO(all): should be inlined or not.
644 // rm, {<shift> {#<shift_amount>}}
645 // where <shift> is one of {LSL, LSR, ASR, ROR}.
646 // <shift_amount> is uint6_t.
647 // This is allowed to be an implicit constructor because Operand is
648 // a wrapper class that doesn't normally perform any type conversion.
649 inline Operand(Register reg,
651 unsigned shift_amount = 0); // NOLINT(runtime/explicit)
653 // rm, <extend> {#<shift_amount>}
654 // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
655 // <shift_amount> is uint2_t.
656 inline Operand(Register reg,
658 unsigned shift_amount = 0);
661 inline explicit Operand(Handle<T> handle);
663 // Implicit constructor for all int types, ExternalReference, and Smi.
665 inline Operand(T t); // NOLINT(runtime/explicit)
667 // Implicit constructor for int types.
669 inline Operand(T t, RelocInfo::Mode rmode);
671 inline bool IsImmediate() const;
672 inline bool IsShiftedRegister() const;
673 inline bool IsExtendedRegister() const;
674 inline bool IsZero() const;
676 // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
677 // which helps in the encoding of instructions that use the stack pointer.
678 inline Operand ToExtendedRegister() const;
680 inline Immediate immediate() const;
681 inline int64_t ImmediateValue() const;
682 inline Register reg() const;
683 inline Shift shift() const;
684 inline Extend extend() const;
685 inline unsigned shift_amount() const;
687 // Relocation information.
688 bool NeedsRelocation(const Assembler* assembler) const;
691 inline static Operand UntagSmi(Register smi);
692 inline static Operand UntagSmiAndScale(Register smi, int scale);
695 Immediate immediate_;
699 unsigned shift_amount_;
703 // MemOperand represents a memory operand in a load or store instruction.
707 inline explicit MemOperand(Register base,
709 AddrMode addrmode = Offset);
710 inline explicit MemOperand(Register base,
713 unsigned shift_amount = 0);
714 inline explicit MemOperand(Register base,
717 unsigned shift_amount = 0);
718 inline explicit MemOperand(Register base,
719 const Operand& offset,
720 AddrMode addrmode = Offset);
722 const Register& base() const { return base_; }
723 const Register& regoffset() const { return regoffset_; }
724 int64_t offset() const { return offset_; }
725 AddrMode addrmode() const { return addrmode_; }
726 Shift shift() const { return shift_; }
727 Extend extend() const { return extend_; }
728 unsigned shift_amount() const { return shift_amount_; }
729 inline bool IsImmediateOffset() const;
730 inline bool IsRegisterOffset() const;
731 inline bool IsPreIndex() const;
732 inline bool IsPostIndex() const;
734 // For offset modes, return the offset as an Operand. This helper cannot
735 // handle indexed modes.
736 inline Operand OffsetAsOperand() const;
739 kNotPair, // Can't use a pair instruction.
740 kPairAB, // Can use a pair instruction (operandA has lower address).
741 kPairBA // Can use a pair instruction (operandB has lower address).
743 // Check if two MemOperand are consistent for stp/ldp use.
744 static PairResult AreConsistentForPair(const MemOperand& operandA,
745 const MemOperand& operandB,
746 int access_size_log2 = kXRegSizeLog2);
755 unsigned shift_amount_;
761 explicit ConstPool(Assembler* assm)
764 shared_entries_count(0) {}
765 void RecordEntry(intptr_t data, RelocInfo::Mode mode);
766 int EntryCount() const {
767 return shared_entries_count + static_cast<int>(unique_entries_.size());
769 bool IsEmpty() const {
770 return shared_entries_.empty() && unique_entries_.empty();
772 // Distance in bytes between the current pc and the first instruction
773 // using the pool. If there are no pending entries return kMaxInt.
774 int DistanceToFirstUse();
775 // Offset after which instructions using the pool will be out of range.
777 // Maximum size the constant pool can be with current entries. It always
778 // includes alignment padding and branch over.
780 // Size in bytes of the literal pool *if* it is emitted at the current
781 // pc. The size will include the branch over the pool if it was requested.
782 int SizeIfEmittedAtCurrentPc(bool require_jump);
783 // Emit the literal pool at the current pc with a branch over the pool if
785 void Emit(bool require_jump);
786 // Discard any pending pool entries.
790 bool CanBeShared(RelocInfo::Mode mode);
796 // Keep track of the first instruction requiring a constant pool entry
797 // since the previous constant pool was emitted.
799 // values, pc offset(s) of entries which can be shared.
800 std::multimap<uint64_t, int> shared_entries_;
801 // Number of distinct literal in shared entries.
802 int shared_entries_count;
803 // values, pc offset of entries which cannot be shared.
804 std::vector<std::pair<uint64_t, int> > unique_entries_;
808 // -----------------------------------------------------------------------------
811 class Assembler : public AssemblerBase {
813 // Create an assembler. Instructions and relocation information are emitted
814 // into a buffer, with the instructions starting from the beginning and the
815 // relocation information starting from the end of the buffer. See CodeDesc
816 // for a detailed comment on the layout (globals.h).
818 // If the provided buffer is NULL, the assembler allocates and grows its own
819 // buffer, and buffer_size determines the initial buffer size. The buffer is
820 // owned by the assembler and deallocated upon destruction of the assembler.
822 // If the provided buffer is not NULL, the assembler uses the provided buffer
823 // for code generation and assumes its size to be buffer_size. If the buffer
824 // is too small, a fatal error occurs. No deallocation of the buffer is done
825 // upon destruction of the assembler.
826 Assembler(Isolate* arg_isolate, void* buffer, int buffer_size);
828 virtual ~Assembler();
830 virtual void AbortedCodeGeneration() {
834 // System functions ---------------------------------------------------------
835 // Start generating code from the beginning of the buffer, discarding any code
836 // and data that has already been emitted into the buffer.
838 // In order to avoid any accidental transfer of state, Reset DCHECKs that the
839 // constant pool is not blocked.
842 // GetCode emits any pending (non-emitted) code and fills the descriptor
843 // desc. GetCode() is idempotent; it returns the same result if no other
844 // Assembler functions are invoked in between GetCode() calls.
846 // The descriptor (desc) can be NULL. In that case, the code is finalized as
847 // usual, but the descriptor is not populated.
848 void GetCode(CodeDesc* desc);
850 // Insert the smallest number of nop instructions
851 // possible to align the pc offset to a multiple
852 // of m. m must be a power of 2 (>= 4).
854 // Insert the smallest number of zero bytes possible to align the pc offset
855 // to a mulitple of m. m must be a power of 2 (>= 2).
856 void DataAlign(int m);
858 inline void Unreachable();
860 // Label --------------------------------------------------------------------
861 // Bind a label to the current pc. Note that labels can only be bound once,
862 // and if labels are linked to other instructions, they _must_ be bound
863 // before they go out of scope.
864 void bind(Label* label);
867 // RelocInfo and pools ------------------------------------------------------
869 // Record relocation information for current pc_.
870 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
872 // Return the address in the constant pool of the code target address used by
873 // the branch/call instruction at pc.
874 inline static Address target_pointer_address_at(Address pc);
876 // Read/Modify the code target address in the branch/call instruction at pc.
877 inline static Address target_address_at(Address pc, Address constant_pool);
878 inline static void set_target_address_at(
879 Address pc, Address constant_pool, Address target,
880 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
881 static inline Address target_address_at(Address pc, Code* code);
882 static inline void set_target_address_at(Address pc,
885 ICacheFlushMode icache_flush_mode =
886 FLUSH_ICACHE_IF_NEEDED);
888 // Return the code target address at a call site from the return address of
889 // that call in the instruction stream.
890 inline static Address target_address_from_return_address(Address pc);
892 // Given the address of the beginning of a call, return the address in the
893 // instruction stream that call will return from.
894 inline static Address return_address_from_call_start(Address pc);
896 // This sets the branch destination (which is in the constant pool on ARM).
897 // This is for calls and branches within generated code.
898 inline static void deserialization_set_special_target_at(
899 Address constant_pool_entry, Code* code, Address target);
901 // This sets the internal reference at the pc.
902 inline static void deserialization_set_target_internal_reference_at(
903 Address pc, Address target,
904 RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
906 // All addresses in the constant pool are the same size as pointers.
907 static const int kSpecialTargetSize = kPointerSize;
909 // The sizes of the call sequences emitted by MacroAssembler::Call.
910 // Wherever possible, use MacroAssembler::CallSize instead of these constants,
911 // as it will choose the correct value for a given relocation mode.
913 // Without relocation:
914 // movz temp, #(target & 0x000000000000ffff)
915 // movk temp, #(target & 0x00000000ffff0000)
916 // movk temp, #(target & 0x0000ffff00000000)
922 static const int kCallSizeWithoutRelocation = 4 * kInstructionSize;
923 static const int kCallSizeWithRelocation = 2 * kInstructionSize;
925 // Size of the generated code in bytes
926 uint64_t SizeOfGeneratedCode() const {
927 DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
928 return pc_ - buffer_;
931 // Return the code size generated from label to the current position.
932 uint64_t SizeOfCodeGeneratedSince(const Label* label) {
933 DCHECK(label->is_bound());
934 DCHECK(pc_offset() >= label->pos());
935 DCHECK(pc_offset() < buffer_size_);
936 return pc_offset() - label->pos();
939 // Check the size of the code generated since the given label. This function
940 // is used primarily to work around comparisons between signed and unsigned
941 // quantities, since V8 uses both.
942 // TODO(jbramley): Work out what sign to use for these things and if possible,
943 // change things to be consistent.
944 void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
946 DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
949 // Return the number of instructions generated from label to the
951 uint64_t InstructionsGeneratedSince(const Label* label) {
952 return SizeOfCodeGeneratedSince(label) / kInstructionSize;
955 static const int kPatchDebugBreakSlotAddressOffset = 0;
957 // Number of instructions necessary to be able to later patch it to a call.
958 static const int kDebugBreakSlotInstructions = 5;
959 static const int kDebugBreakSlotLength =
960 kDebugBreakSlotInstructions * kInstructionSize;
962 // Prevent contant pool emission until EndBlockConstPool is called.
963 // Call to this function can be nested but must be followed by an equal
964 // number of call to EndBlockConstpool.
965 void StartBlockConstPool();
967 // Resume constant pool emission. Need to be called as many time as
968 // StartBlockConstPool to have an effect.
969 void EndBlockConstPool();
971 bool is_const_pool_blocked() const;
972 static bool IsConstantPoolAt(Instruction* instr);
973 static int ConstantPoolSizeAt(Instruction* instr);
974 // See Assembler::CheckConstPool for more info.
975 void EmitPoolGuard();
977 // Prevent veneer pool emission until EndBlockVeneerPool is called.
978 // Call to this function can be nested but must be followed by an equal
979 // number of call to EndBlockConstpool.
980 void StartBlockVeneerPool();
982 // Resume constant pool emission. Need to be called as many time as
983 // StartBlockVeneerPool to have an effect.
984 void EndBlockVeneerPool();
986 bool is_veneer_pool_blocked() const {
987 return veneer_pool_blocked_nesting_ > 0;
990 // Block/resume emission of constant pools and veneer pools.
991 void StartBlockPools() {
992 StartBlockConstPool();
993 StartBlockVeneerPool();
995 void EndBlockPools() {
997 EndBlockVeneerPool();
1000 // Debugging ----------------------------------------------------------------
1001 PositionsRecorder* positions_recorder() { return &positions_recorder_; }
1002 void RecordComment(const char* msg);
1004 // Record a deoptimization reason that can be used by a log or cpu profiler.
1005 // Use --trace-deopt to enable.
1006 void RecordDeoptReason(const int reason, const SourcePosition position);
1008 int buffer_space() const;
1010 // Mark generator continuation.
1011 void RecordGeneratorContinuation();
1013 // Mark address of a debug break slot.
1014 void RecordDebugBreakSlot(RelocInfo::Mode mode, int argc = 0);
1016 // Record the emission of a constant pool.
1018 // The emission of constant and veneer pools depends on the size of the code
1019 // generated and the number of RelocInfo recorded.
1020 // The Debug mechanism needs to map code offsets between two versions of a
1021 // function, compiled with and without debugger support (see for example
1022 // Debug::PrepareForBreakPoints()).
1023 // Compiling functions with debugger support generates additional code
1024 // (DebugCodegen::GenerateSlot()). This may affect the emission of the pools
1025 // and cause the version of the code with debugger support to have pools
1026 // generated in different places.
1027 // Recording the position and size of emitted pools allows to correctly
1028 // compute the offset mappings between the different versions of a function in
1031 // The parameter indicates the size of the pool (in bytes), including
1032 // the marker and branch over the data.
1033 void RecordConstPool(int size);
1036 // Instruction set functions ------------------------------------------------
1038 // Branch / Jump instructions.
1039 // For branches offsets are scaled, i.e. they in instrcutions not in bytes.
1040 // Branch to register.
1041 void br(const Register& xn);
1043 // Branch-link to register.
1044 void blr(const Register& xn);
1046 // Branch to register with return hint.
1047 void ret(const Register& xn = lr);
1049 // Unconditional branch to label.
1050 void b(Label* label);
1052 // Conditional branch to label.
1053 void b(Label* label, Condition cond);
1055 // Unconditional branch to PC offset.
1058 // Conditional branch to PC offset.
1059 void b(int imm19, Condition cond);
1061 // Branch-link to label / pc offset.
1062 void bl(Label* label);
1065 // Compare and branch to label / pc offset if zero.
1066 void cbz(const Register& rt, Label* label);
1067 void cbz(const Register& rt, int imm19);
1069 // Compare and branch to label / pc offset if not zero.
1070 void cbnz(const Register& rt, Label* label);
1071 void cbnz(const Register& rt, int imm19);
1073 // Test bit and branch to label / pc offset if zero.
1074 void tbz(const Register& rt, unsigned bit_pos, Label* label);
1075 void tbz(const Register& rt, unsigned bit_pos, int imm14);
1077 // Test bit and branch to label / pc offset if not zero.
1078 void tbnz(const Register& rt, unsigned bit_pos, Label* label);
1079 void tbnz(const Register& rt, unsigned bit_pos, int imm14);
1081 // Address calculation instructions.
1082 // Calculate a PC-relative address. Unlike for branches the offset in adr is
1083 // unscaled (i.e. the result can be unaligned).
1084 void adr(const Register& rd, Label* label);
1085 void adr(const Register& rd, int imm21);
1087 // Data Processing instructions.
1089 void add(const Register& rd,
1091 const Operand& operand);
1093 // Add and update status flags.
1094 void adds(const Register& rd,
1096 const Operand& operand);
1098 // Compare negative.
1099 void cmn(const Register& rn, const Operand& operand);
1102 void sub(const Register& rd,
1104 const Operand& operand);
1106 // Subtract and update status flags.
1107 void subs(const Register& rd,
1109 const Operand& operand);
1112 void cmp(const Register& rn, const Operand& operand);
1115 void neg(const Register& rd,
1116 const Operand& operand);
1118 // Negate and update status flags.
1119 void negs(const Register& rd,
1120 const Operand& operand);
1122 // Add with carry bit.
1123 void adc(const Register& rd,
1125 const Operand& operand);
1127 // Add with carry bit and update status flags.
1128 void adcs(const Register& rd,
1130 const Operand& operand);
1132 // Subtract with carry bit.
1133 void sbc(const Register& rd,
1135 const Operand& operand);
1137 // Subtract with carry bit and update status flags.
1138 void sbcs(const Register& rd,
1140 const Operand& operand);
1142 // Negate with carry bit.
1143 void ngc(const Register& rd,
1144 const Operand& operand);
1146 // Negate with carry bit and update status flags.
1147 void ngcs(const Register& rd,
1148 const Operand& operand);
1150 // Logical instructions.
1151 // Bitwise and (A & B).
1152 void and_(const Register& rd,
1154 const Operand& operand);
1156 // Bitwise and (A & B) and update status flags.
1157 void ands(const Register& rd,
1159 const Operand& operand);
1161 // Bit test, and set flags.
1162 void tst(const Register& rn, const Operand& operand);
1164 // Bit clear (A & ~B).
1165 void bic(const Register& rd,
1167 const Operand& operand);
1169 // Bit clear (A & ~B) and update status flags.
1170 void bics(const Register& rd,
1172 const Operand& operand);
1174 // Bitwise or (A | B).
1175 void orr(const Register& rd, const Register& rn, const Operand& operand);
1177 // Bitwise nor (A | ~B).
1178 void orn(const Register& rd, const Register& rn, const Operand& operand);
1180 // Bitwise eor/xor (A ^ B).
1181 void eor(const Register& rd, const Register& rn, const Operand& operand);
1183 // Bitwise enor/xnor (A ^ ~B).
1184 void eon(const Register& rd, const Register& rn, const Operand& operand);
1186 // Logical shift left variable.
1187 void lslv(const Register& rd, const Register& rn, const Register& rm);
1189 // Logical shift right variable.
1190 void lsrv(const Register& rd, const Register& rn, const Register& rm);
1192 // Arithmetic shift right variable.
1193 void asrv(const Register& rd, const Register& rn, const Register& rm);
1195 // Rotate right variable.
1196 void rorv(const Register& rd, const Register& rn, const Register& rm);
1198 // Bitfield instructions.
1200 void bfm(const Register& rd,
1205 // Signed bitfield move.
1206 void sbfm(const Register& rd,
1211 // Unsigned bitfield move.
1212 void ubfm(const Register& rd,
1219 void bfi(const Register& rd,
1224 DCHECK(lsb + width <= rn.SizeInBits());
1225 bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1228 // Bitfield extract and insert low.
1229 void bfxil(const Register& rd,
1234 DCHECK(lsb + width <= rn.SizeInBits());
1235 bfm(rd, rn, lsb, lsb + width - 1);
1239 // Arithmetic shift right.
1240 void asr(const Register& rd, const Register& rn, unsigned shift) {
1241 DCHECK(shift < rd.SizeInBits());
1242 sbfm(rd, rn, shift, rd.SizeInBits() - 1);
1245 // Signed bitfield insert in zero.
1246 void sbfiz(const Register& rd,
1251 DCHECK(lsb + width <= rn.SizeInBits());
1252 sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1255 // Signed bitfield extract.
1256 void sbfx(const Register& rd,
1261 DCHECK(lsb + width <= rn.SizeInBits());
1262 sbfm(rd, rn, lsb, lsb + width - 1);
1265 // Signed extend byte.
1266 void sxtb(const Register& rd, const Register& rn) {
1270 // Signed extend halfword.
1271 void sxth(const Register& rd, const Register& rn) {
1272 sbfm(rd, rn, 0, 15);
1275 // Signed extend word.
1276 void sxtw(const Register& rd, const Register& rn) {
1277 sbfm(rd, rn, 0, 31);
1281 // Logical shift left.
1282 void lsl(const Register& rd, const Register& rn, unsigned shift) {
1283 unsigned reg_size = rd.SizeInBits();
1284 DCHECK(shift < reg_size);
1285 ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
1288 // Logical shift right.
1289 void lsr(const Register& rd, const Register& rn, unsigned shift) {
1290 DCHECK(shift < rd.SizeInBits());
1291 ubfm(rd, rn, shift, rd.SizeInBits() - 1);
1294 // Unsigned bitfield insert in zero.
1295 void ubfiz(const Register& rd,
1300 DCHECK(lsb + width <= rn.SizeInBits());
1301 ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
1304 // Unsigned bitfield extract.
1305 void ubfx(const Register& rd,
1310 DCHECK(lsb + width <= rn.SizeInBits());
1311 ubfm(rd, rn, lsb, lsb + width - 1);
1314 // Unsigned extend byte.
1315 void uxtb(const Register& rd, const Register& rn) {
1319 // Unsigned extend halfword.
1320 void uxth(const Register& rd, const Register& rn) {
1321 ubfm(rd, rn, 0, 15);
1324 // Unsigned extend word.
1325 void uxtw(const Register& rd, const Register& rn) {
1326 ubfm(rd, rn, 0, 31);
1330 void extr(const Register& rd,
1335 // Conditional select: rd = cond ? rn : rm.
1336 void csel(const Register& rd,
1341 // Conditional select increment: rd = cond ? rn : rm + 1.
1342 void csinc(const Register& rd,
1347 // Conditional select inversion: rd = cond ? rn : ~rm.
1348 void csinv(const Register& rd,
1353 // Conditional select negation: rd = cond ? rn : -rm.
1354 void csneg(const Register& rd,
1359 // Conditional set: rd = cond ? 1 : 0.
1360 void cset(const Register& rd, Condition cond);
1362 // Conditional set minus: rd = cond ? -1 : 0.
1363 void csetm(const Register& rd, Condition cond);
1365 // Conditional increment: rd = cond ? rn + 1 : rn.
1366 void cinc(const Register& rd, const Register& rn, Condition cond);
1368 // Conditional invert: rd = cond ? ~rn : rn.
1369 void cinv(const Register& rd, const Register& rn, Condition cond);
1371 // Conditional negate: rd = cond ? -rn : rn.
1372 void cneg(const Register& rd, const Register& rn, Condition cond);
1375 void ror(const Register& rd, const Register& rs, unsigned shift) {
1376 extr(rd, rs, rs, shift);
1379 // Conditional comparison.
1380 // Conditional compare negative.
1381 void ccmn(const Register& rn,
1382 const Operand& operand,
1386 // Conditional compare.
1387 void ccmp(const Register& rn,
1388 const Operand& operand,
1393 // 32 x 32 -> 32-bit and 64 x 64 -> 64-bit multiply.
1394 void mul(const Register& rd, const Register& rn, const Register& rm);
1396 // 32 + 32 x 32 -> 32-bit and 64 + 64 x 64 -> 64-bit multiply accumulate.
1397 void madd(const Register& rd,
1400 const Register& ra);
1402 // -(32 x 32) -> 32-bit and -(64 x 64) -> 64-bit multiply.
1403 void mneg(const Register& rd, const Register& rn, const Register& rm);
1405 // 32 - 32 x 32 -> 32-bit and 64 - 64 x 64 -> 64-bit multiply subtract.
1406 void msub(const Register& rd,
1409 const Register& ra);
1411 // 32 x 32 -> 64-bit multiply.
1412 void smull(const Register& rd, const Register& rn, const Register& rm);
1414 // Xd = bits<127:64> of Xn * Xm.
1415 void smulh(const Register& rd, const Register& rn, const Register& rm);
1417 // Signed 32 x 32 -> 64-bit multiply and accumulate.
1418 void smaddl(const Register& rd,
1421 const Register& ra);
1423 // Unsigned 32 x 32 -> 64-bit multiply and accumulate.
1424 void umaddl(const Register& rd,
1427 const Register& ra);
1429 // Signed 32 x 32 -> 64-bit multiply and subtract.
1430 void smsubl(const Register& rd,
1433 const Register& ra);
1435 // Unsigned 32 x 32 -> 64-bit multiply and subtract.
1436 void umsubl(const Register& rd,
1439 const Register& ra);
1441 // Signed integer divide.
1442 void sdiv(const Register& rd, const Register& rn, const Register& rm);
1444 // Unsigned integer divide.
1445 void udiv(const Register& rd, const Register& rn, const Register& rm);
1447 // Bit count, bit reverse and endian reverse.
1448 void rbit(const Register& rd, const Register& rn);
1449 void rev16(const Register& rd, const Register& rn);
1450 void rev32(const Register& rd, const Register& rn);
1451 void rev(const Register& rd, const Register& rn);
1452 void clz(const Register& rd, const Register& rn);
1453 void cls(const Register& rd, const Register& rn);
1455 // Memory instructions.
1457 // Load integer or FP register.
1458 void ldr(const CPURegister& rt, const MemOperand& src);
1460 // Store integer or FP register.
1461 void str(const CPURegister& rt, const MemOperand& dst);
1463 // Load word with sign extension.
1464 void ldrsw(const Register& rt, const MemOperand& src);
1467 void ldrb(const Register& rt, const MemOperand& src);
1470 void strb(const Register& rt, const MemOperand& dst);
1472 // Load byte with sign extension.
1473 void ldrsb(const Register& rt, const MemOperand& src);
1476 void ldrh(const Register& rt, const MemOperand& src);
1479 void strh(const Register& rt, const MemOperand& dst);
1481 // Load half-word with sign extension.
1482 void ldrsh(const Register& rt, const MemOperand& src);
1484 // Load integer or FP register pair.
1485 void ldp(const CPURegister& rt, const CPURegister& rt2,
1486 const MemOperand& src);
1488 // Store integer or FP register pair.
1489 void stp(const CPURegister& rt, const CPURegister& rt2,
1490 const MemOperand& dst);
1492 // Load word pair with sign extension.
1493 void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
1495 // Load literal to register from a pc relative address.
1496 void ldr_pcrel(const CPURegister& rt, int imm19);
1498 // Load literal to register.
1499 void ldr(const CPURegister& rt, const Immediate& imm);
1501 // Move instructions. The default shift of -1 indicates that the move
1502 // instruction will calculate an appropriate 16-bit immediate and left shift
1503 // that is equal to the 64-bit immediate argument. If an explicit left shift
1504 // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
1506 // For movk, an explicit shift can be used to indicate which half word should
1507 // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
1508 // half word with zero, whereas movk(x0, 0, 48) will overwrite the
1509 // most-significant.
1512 void movk(const Register& rd, uint64_t imm, int shift = -1) {
1513 MoveWide(rd, imm, shift, MOVK);
1516 // Move with non-zero.
1517 void movn(const Register& rd, uint64_t imm, int shift = -1) {
1518 MoveWide(rd, imm, shift, MOVN);
1522 void movz(const Register& rd, uint64_t imm, int shift = -1) {
1523 MoveWide(rd, imm, shift, MOVZ);
1526 // Misc instructions.
1527 // Monitor debug-mode breakpoint.
1530 // Halting debug-mode breakpoint.
1533 // Move register to register.
1534 void mov(const Register& rd, const Register& rn);
1536 // Move NOT(operand) to register.
1537 void mvn(const Register& rd, const Operand& operand);
1539 // System instructions.
1540 // Move to register from system register.
1541 void mrs(const Register& rt, SystemRegister sysreg);
1543 // Move from register to system register.
1544 void msr(SystemRegister sysreg, const Register& rt);
1547 void hint(SystemHint code);
1549 // Data memory barrier
1550 void dmb(BarrierDomain domain, BarrierType type);
1552 // Data synchronization barrier
1553 void dsb(BarrierDomain domain, BarrierType type);
1555 // Instruction synchronization barrier
1558 // Alias for system instructions.
1559 void nop() { hint(NOP); }
1561 // Different nop operations are used by the code generator to detect certain
1562 // states of the generated code.
1563 enum NopMarkerTypes {
1567 FIRST_NOP_MARKER = DEBUG_BREAK_NOP,
1568 LAST_NOP_MARKER = ADR_FAR_NOP
1571 void nop(NopMarkerTypes n) {
1572 DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
1573 mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
1577 // Move immediate to FP register.
1578 void fmov(FPRegister fd, double imm);
1579 void fmov(FPRegister fd, float imm);
1581 // Move FP register to register.
1582 void fmov(Register rd, FPRegister fn);
1584 // Move register to FP register.
1585 void fmov(FPRegister fd, Register rn);
1587 // Move FP register to FP register.
1588 void fmov(FPRegister fd, FPRegister fn);
1591 void fadd(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1594 void fsub(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1597 void fmul(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1599 // FP fused multiply and add.
1600 void fmadd(const FPRegister& fd,
1601 const FPRegister& fn,
1602 const FPRegister& fm,
1603 const FPRegister& fa);
1605 // FP fused multiply and subtract.
1606 void fmsub(const FPRegister& fd,
1607 const FPRegister& fn,
1608 const FPRegister& fm,
1609 const FPRegister& fa);
1611 // FP fused multiply, add and negate.
1612 void fnmadd(const FPRegister& fd,
1613 const FPRegister& fn,
1614 const FPRegister& fm,
1615 const FPRegister& fa);
1617 // FP fused multiply, subtract and negate.
1618 void fnmsub(const FPRegister& fd,
1619 const FPRegister& fn,
1620 const FPRegister& fm,
1621 const FPRegister& fa);
1624 void fdiv(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1627 void fmax(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1630 void fmin(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1633 void fmaxnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1636 void fminnm(const FPRegister& fd, const FPRegister& fn, const FPRegister& fm);
1639 void fabs(const FPRegister& fd, const FPRegister& fn);
1642 void fneg(const FPRegister& fd, const FPRegister& fn);
1645 void fsqrt(const FPRegister& fd, const FPRegister& fn);
1647 // FP round to integer (nearest with ties to away).
1648 void frinta(const FPRegister& fd, const FPRegister& fn);
1650 // FP round to integer (toward minus infinity).
1651 void frintm(const FPRegister& fd, const FPRegister& fn);
1653 // FP round to integer (nearest with ties to even).
1654 void frintn(const FPRegister& fd, const FPRegister& fn);
1656 // FP round to integer (towards plus infinity).
1657 void frintp(const FPRegister& fd, const FPRegister& fn);
1659 // FP round to integer (towards zero.)
1660 void frintz(const FPRegister& fd, const FPRegister& fn);
1662 // FP compare registers.
1663 void fcmp(const FPRegister& fn, const FPRegister& fm);
1665 // FP compare immediate.
1666 void fcmp(const FPRegister& fn, double value);
1668 // FP conditional compare.
1669 void fccmp(const FPRegister& fn,
1670 const FPRegister& fm,
1674 // FP conditional select.
1675 void fcsel(const FPRegister& fd,
1676 const FPRegister& fn,
1677 const FPRegister& fm,
1680 // Common FP Convert function
1681 void FPConvertToInt(const Register& rd,
1682 const FPRegister& fn,
1683 FPIntegerConvertOp op);
1685 // FP convert between single and double precision.
1686 void fcvt(const FPRegister& fd, const FPRegister& fn);
1688 // Convert FP to unsigned integer (nearest with ties to away).
1689 void fcvtau(const Register& rd, const FPRegister& fn);
1691 // Convert FP to signed integer (nearest with ties to away).
1692 void fcvtas(const Register& rd, const FPRegister& fn);
1694 // Convert FP to unsigned integer (round towards -infinity).
1695 void fcvtmu(const Register& rd, const FPRegister& fn);
1697 // Convert FP to signed integer (round towards -infinity).
1698 void fcvtms(const Register& rd, const FPRegister& fn);
1700 // Convert FP to unsigned integer (nearest with ties to even).
1701 void fcvtnu(const Register& rd, const FPRegister& fn);
1703 // Convert FP to signed integer (nearest with ties to even).
1704 void fcvtns(const Register& rd, const FPRegister& fn);
1706 // Convert FP to unsigned integer (round towards zero).
1707 void fcvtzu(const Register& rd, const FPRegister& fn);
1709 // Convert FP to signed integer (rounf towards zero).
1710 void fcvtzs(const Register& rd, const FPRegister& fn);
1712 // Convert signed integer or fixed point to FP.
1713 void scvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1715 // Convert unsigned integer or fixed point to FP.
1716 void ucvtf(const FPRegister& fd, const Register& rn, unsigned fbits = 0);
1718 // Instruction functions used only for test, debug, and patching.
1719 // Emit raw instructions in the instruction stream.
1720 void dci(Instr raw_inst) { Emit(raw_inst); }
1722 // Emit 8 bits of data in the instruction stream.
1723 void dc8(uint8_t data) { EmitData(&data, sizeof(data)); }
1725 // Emit 32 bits of data in the instruction stream.
1726 void dc32(uint32_t data) { EmitData(&data, sizeof(data)); }
1728 // Emit 64 bits of data in the instruction stream.
1729 void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
1731 // Emit an address in the instruction stream.
1732 void dcptr(Label* label);
1734 // Copy a string into the instruction stream, including the terminating NULL
1735 // character. The instruction pointer (pc_) is then aligned correctly for
1736 // subsequent instructions.
1737 void EmitStringData(const char* string);
1739 // Pseudo-instructions ------------------------------------------------------
1741 // Parameters are described in arm64/instructions-arm64.h.
1742 void debug(const char* message, uint32_t code, Instr params = BREAK);
1745 void dd(uint32_t data) { dc32(data); }
1746 void db(uint8_t data) { dc8(data); }
1747 void dq(uint64_t data) { dc64(data); }
1748 void dp(uintptr_t data) { dc64(data); }
1750 // Code generation helpers --------------------------------------------------
1752 bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
1754 Instruction* pc() const { return Instruction::Cast(pc_); }
1756 Instruction* InstructionAt(ptrdiff_t offset) const {
1757 return reinterpret_cast<Instruction*>(buffer_ + offset);
1760 ptrdiff_t InstructionOffset(Instruction* instr) const {
1761 return reinterpret_cast<byte*>(instr) - buffer_;
1764 // Register encoding.
1765 static Instr Rd(CPURegister rd) {
1766 DCHECK(rd.code() != kSPRegInternalCode);
1767 return rd.code() << Rd_offset;
1770 static Instr Rn(CPURegister rn) {
1771 DCHECK(rn.code() != kSPRegInternalCode);
1772 return rn.code() << Rn_offset;
1775 static Instr Rm(CPURegister rm) {
1776 DCHECK(rm.code() != kSPRegInternalCode);
1777 return rm.code() << Rm_offset;
1780 static Instr Ra(CPURegister ra) {
1781 DCHECK(ra.code() != kSPRegInternalCode);
1782 return ra.code() << Ra_offset;
1785 static Instr Rt(CPURegister rt) {
1786 DCHECK(rt.code() != kSPRegInternalCode);
1787 return rt.code() << Rt_offset;
1790 static Instr Rt2(CPURegister rt2) {
1791 DCHECK(rt2.code() != kSPRegInternalCode);
1792 return rt2.code() << Rt2_offset;
1795 // These encoding functions allow the stack pointer to be encoded, and
1796 // disallow the zero register.
1797 static Instr RdSP(Register rd) {
1798 DCHECK(!rd.IsZero());
1799 return (rd.code() & kRegCodeMask) << Rd_offset;
1802 static Instr RnSP(Register rn) {
1803 DCHECK(!rn.IsZero());
1804 return (rn.code() & kRegCodeMask) << Rn_offset;
1808 inline static Instr Flags(FlagsUpdate S);
1809 inline static Instr Cond(Condition cond);
1811 // PC-relative address encoding.
1812 inline static Instr ImmPCRelAddress(int imm21);
1815 inline static Instr ImmUncondBranch(int imm26);
1816 inline static Instr ImmCondBranch(int imm19);
1817 inline static Instr ImmCmpBranch(int imm19);
1818 inline static Instr ImmTestBranch(int imm14);
1819 inline static Instr ImmTestBranchBit(unsigned bit_pos);
1821 // Data Processing encoding.
1822 inline static Instr SF(Register rd);
1823 inline static Instr ImmAddSub(int imm);
1824 inline static Instr ImmS(unsigned imms, unsigned reg_size);
1825 inline static Instr ImmR(unsigned immr, unsigned reg_size);
1826 inline static Instr ImmSetBits(unsigned imms, unsigned reg_size);
1827 inline static Instr ImmRotate(unsigned immr, unsigned reg_size);
1828 inline static Instr ImmLLiteral(int imm19);
1829 inline static Instr BitN(unsigned bitn, unsigned reg_size);
1830 inline static Instr ShiftDP(Shift shift);
1831 inline static Instr ImmDPShift(unsigned amount);
1832 inline static Instr ExtendMode(Extend extend);
1833 inline static Instr ImmExtendShift(unsigned left_shift);
1834 inline static Instr ImmCondCmp(unsigned imm);
1835 inline static Instr Nzcv(StatusFlags nzcv);
1837 static bool IsImmAddSub(int64_t immediate);
1838 static bool IsImmLogical(uint64_t value,
1844 // MemOperand offset encoding.
1845 inline static Instr ImmLSUnsigned(int imm12);
1846 inline static Instr ImmLS(int imm9);
1847 inline static Instr ImmLSPair(int imm7, LSDataSize size);
1848 inline static Instr ImmShiftLS(unsigned shift_amount);
1849 inline static Instr ImmException(int imm16);
1850 inline static Instr ImmSystemRegister(int imm15);
1851 inline static Instr ImmHint(int imm7);
1852 inline static Instr ImmBarrierDomain(int imm2);
1853 inline static Instr ImmBarrierType(int imm2);
1854 inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
1856 static bool IsImmLSUnscaled(int64_t offset);
1857 static bool IsImmLSScaled(int64_t offset, LSDataSize size);
1858 static bool IsImmLLiteral(int64_t offset);
1860 // Move immediates encoding.
1861 inline static Instr ImmMoveWide(int imm);
1862 inline static Instr ShiftMoveWide(int shift);
1865 static Instr ImmFP32(float imm);
1866 static Instr ImmFP64(double imm);
1867 inline static Instr FPScale(unsigned scale);
1869 // FP register type.
1870 inline static Instr FPType(FPRegister fd);
1872 // Class for scoping postponing the constant pool generation.
1873 class BlockConstPoolScope {
1875 explicit BlockConstPoolScope(Assembler* assem) : assem_(assem) {
1876 assem_->StartBlockConstPool();
1878 ~BlockConstPoolScope() {
1879 assem_->EndBlockConstPool();
1885 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
1888 // Check if is time to emit a constant pool.
1889 void CheckConstPool(bool force_emit, bool require_jump);
1891 void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
1892 ConstantPoolEntry::Access access,
1893 ConstantPoolEntry::Type type) {
1894 // No embedded constant pool support.
1898 // Returns true if we should emit a veneer as soon as possible for a branch
1899 // which can at most reach to specified pc.
1900 bool ShouldEmitVeneer(int max_reachable_pc,
1901 int margin = kVeneerDistanceMargin);
1902 bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
1903 return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
1906 // The maximum code size generated for a veneer. Currently one branch
1907 // instruction. This is for code size checking purposes, and can be extended
1908 // in the future for example if we decide to add nops between the veneers.
1909 static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
1911 void RecordVeneerPool(int location_offset, int size);
1912 // Emits veneers for branches that are approaching their maximum range.
1913 // If need_protection is true, the veneers are protected by a branch jumping
1915 void EmitVeneers(bool force_emit, bool need_protection,
1916 int margin = kVeneerDistanceMargin);
1917 void EmitVeneersGuard() { EmitPoolGuard(); }
1918 // Checks whether veneers need to be emitted at this point.
1919 // If force_emit is set, a veneer is generated for *all* unresolved branches.
1920 void CheckVeneerPool(bool force_emit, bool require_jump,
1921 int margin = kVeneerDistanceMargin);
1923 class BlockPoolsScope {
1925 explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
1926 assem_->StartBlockPools();
1928 ~BlockPoolsScope() {
1929 assem_->EndBlockPools();
1935 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
1939 inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
1941 void LoadStore(const CPURegister& rt,
1942 const MemOperand& addr,
1945 void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
1946 const MemOperand& addr, LoadStorePairOp op);
1947 static bool IsImmLSPair(int64_t offset, LSDataSize size);
1949 void Logical(const Register& rd,
1951 const Operand& operand,
1953 void LogicalImmediate(const Register& rd,
1960 void ConditionalCompare(const Register& rn,
1961 const Operand& operand,
1964 ConditionalCompareOp op);
1965 static bool IsImmConditionalCompare(int64_t immediate);
1967 void AddSubWithCarry(const Register& rd,
1969 const Operand& operand,
1971 AddSubWithCarryOp op);
1973 // Functions for emulating operands not directly supported by the instruction
1975 void EmitShift(const Register& rd,
1979 void EmitExtendShift(const Register& rd,
1982 unsigned left_shift);
1984 void AddSub(const Register& rd,
1986 const Operand& operand,
1990 static bool IsImmFP32(float imm);
1991 static bool IsImmFP64(double imm);
1993 // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
1994 // registers. Only simple loads are supported; sign- and zero-extension (such
1995 // as in LDPSW_x or LDRB_w) are not supported.
1996 static inline LoadStoreOp LoadOpFor(const CPURegister& rt);
1997 static inline LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
1998 const CPURegister& rt2);
1999 static inline LoadStoreOp StoreOpFor(const CPURegister& rt);
2000 static inline LoadStorePairOp StorePairOpFor(const CPURegister& rt,
2001 const CPURegister& rt2);
2002 static inline LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
2004 // Remove the specified branch from the unbound label link chain.
2005 // If available, a veneer for this label can be used for other branches in the
2006 // chain if the link chain cannot be fixed up without this branch.
2007 void RemoveBranchFromLabelLinkChain(Instruction* branch,
2009 Instruction* label_veneer = NULL);
2012 // Instruction helpers.
2013 void MoveWide(const Register& rd,
2016 MoveWideImmediateOp mov_op);
2017 void DataProcShiftedRegister(const Register& rd,
2019 const Operand& operand,
2022 void DataProcExtendedRegister(const Register& rd,
2024 const Operand& operand,
2027 void ConditionalSelect(const Register& rd,
2031 ConditionalSelectOp op);
2032 void DataProcessing1Source(const Register& rd,
2034 DataProcessing1SourceOp op);
2035 void DataProcessing3Source(const Register& rd,
2039 DataProcessing3SourceOp op);
2040 void FPDataProcessing1Source(const FPRegister& fd,
2041 const FPRegister& fn,
2042 FPDataProcessing1SourceOp op);
2043 void FPDataProcessing2Source(const FPRegister& fd,
2044 const FPRegister& fn,
2045 const FPRegister& fm,
2046 FPDataProcessing2SourceOp op);
2047 void FPDataProcessing3Source(const FPRegister& fd,
2048 const FPRegister& fn,
2049 const FPRegister& fm,
2050 const FPRegister& fa,
2051 FPDataProcessing3SourceOp op);
2055 // Return an offset for a label-referencing instruction, typically a branch.
2056 int LinkAndGetByteOffsetTo(Label* label);
2058 // This is the same as LinkAndGetByteOffsetTo, but return an offset
2059 // suitable for fields that take instruction offsets.
2060 inline int LinkAndGetInstructionOffsetTo(Label* label);
2062 static const int kStartOfLabelLinkChain = 0;
2064 // Verify that a label's link chain is intact.
2065 void CheckLabelLinkChain(Label const * label);
2067 void RecordLiteral(int64_t imm, unsigned size);
2069 // Postpone the generation of the constant pool for the specified number of
2071 void BlockConstPoolFor(int instructions);
2073 // Set how far from current pc the next constant pool check will be.
2074 void SetNextConstPoolCheckIn(int instructions) {
2075 next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
2078 // Emit the instruction at pc_.
2079 void Emit(Instr instruction) {
2080 STATIC_ASSERT(sizeof(*pc_) == 1);
2081 STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
2082 DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
2084 memcpy(pc_, &instruction, sizeof(instruction));
2085 pc_ += sizeof(instruction);
2089 // Emit data inline in the instruction stream.
2090 void EmitData(void const * data, unsigned size) {
2091 DCHECK(sizeof(*pc_) == 1);
2092 DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
2094 // TODO(all): Somehow register we have some data here. Then we can
2095 // disassemble it correctly.
2096 memcpy(pc_, data, size);
2102 void CheckBufferSpace();
2105 // Pc offset of the next constant pool check.
2106 int next_constant_pool_check_;
2108 // Constant pool generation
2109 // Pools are emitted in the instruction stream. They are emitted when:
2110 // * the distance to the first use is above a pre-defined distance or
2111 // * the numbers of entries in the pool is above a pre-defined size or
2112 // * code generation is finished
2113 // If a pool needs to be emitted before code generation is finished a branch
2114 // over the emitted pool will be inserted.
2116 // Constants in the pool may be addresses of functions that gets relocated;
2117 // if so, a relocation info entry is associated to the constant pool entry.
2119 // Repeated checking whether the constant pool should be emitted is rather
2120 // expensive. By default we only check again once a number of instructions
2121 // has been generated. That also means that the sizing of the buffers is not
2122 // an exact science, and that we rely on some slop to not overrun buffers.
2123 static const int kCheckConstPoolInterval = 128;
2125 // Distance to first use after a which a pool will be emitted. Pool entries
2126 // are accessed with pc relative load therefore this cannot be more than
2127 // 1 * MB. Since constant pool emission checks are interval based this value
2128 // is an approximation.
2129 static const int kApproxMaxDistToConstPool = 64 * KB;
2131 // Number of pool entries after which a pool will be emitted. Since constant
2132 // pool emission checks are interval based this value is an approximation.
2133 static const int kApproxMaxPoolEntryCount = 512;
2135 // Emission of the constant pool may be blocked in some code sequences.
2136 int const_pool_blocked_nesting_; // Block emission if this is not zero.
2137 int no_const_pool_before_; // Block emission before this pc offset.
2139 // Emission of the veneer pools may be blocked in some code sequences.
2140 int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
2142 // Relocation info generation
2143 // Each relocation is encoded as a variable size value
2144 static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
2145 RelocInfoWriter reloc_info_writer;
2146 // Internal reference positions, required for (potential) patching in
2147 // GrowBuffer(); contains only those internal references whose labels
2148 // are already bound.
2149 std::deque<int> internal_reference_positions_;
2151 // Relocation info records are also used during code generation as temporary
2152 // containers for constants and code target addresses until they are emitted
2153 // to the constant pool. These pending relocation info records are temporarily
2154 // stored in a separate buffer until a constant pool is emitted.
2155 // If every instruction in a long sequence is accessing the pool, we need one
2156 // pending relocation entry per instruction.
2158 // The pending constant pool.
2159 ConstPool constpool_;
2161 // Relocation for a type-recording IC has the AST id added to it. This
2162 // member variable is a way to pass the information from the call site to
2163 // the relocation info.
2164 TypeFeedbackId recorded_ast_id_;
2166 inline TypeFeedbackId RecordedAstId();
2167 inline void ClearRecordedAstId();
2170 // Record the AST id of the CallIC being compiled, so that it can be placed
2171 // in the relocation information.
2172 void SetRecordedAstId(TypeFeedbackId ast_id) {
2173 DCHECK(recorded_ast_id_.IsNone());
2174 recorded_ast_id_ = ast_id;
2178 // The relocation writer's position is at least kGap bytes below the end of
2179 // the generated instructions. This is so that multi-instruction sequences do
2180 // not have to check for overflow. The same is true for writes of large
2181 // relocation info entries, and debug strings encoded in the instruction
2183 static const int kGap = 128;
2186 class FarBranchInfo {
2188 FarBranchInfo(int offset, Label* label)
2189 : pc_offset_(offset), label_(label) {}
2190 // Offset of the branch in the code generation buffer.
2192 // The label branched to.
2197 // Information about unresolved (forward) branches.
2198 // The Assembler is only allowed to delete out-of-date information from here
2199 // after a label is bound. The MacroAssembler uses this information to
2200 // generate veneers.
2202 // The second member gives information about the unresolved branch. The first
2203 // member of the pair is the maximum offset that the branch can reach in the
2204 // buffer. The map is sorted according to this reachable offset, allowing to
2205 // easily check when veneers need to be emitted.
2206 // Note that the maximum reachable offset (first member of the pairs) should
2207 // always be positive but has the same type as the return value for
2208 // pc_offset() for convenience.
2209 std::multimap<int, FarBranchInfo> unresolved_branches_;
2211 // We generate a veneer for a branch if we reach within this distance of the
2212 // limit of the range.
2213 static const int kVeneerDistanceMargin = 1 * KB;
2214 // The factor of 2 is a finger in the air guess. With a default margin of
2215 // 1KB, that leaves us an addional 256 instructions to avoid generating a
2216 // protective branch.
2217 static const int kVeneerNoProtectionFactor = 2;
2218 static const int kVeneerDistanceCheckMargin =
2219 kVeneerNoProtectionFactor * kVeneerDistanceMargin;
2220 int unresolved_branches_first_limit() const {
2221 DCHECK(!unresolved_branches_.empty());
2222 return unresolved_branches_.begin()->first;
2224 // This is similar to next_constant_pool_check_ and helps reduce the overhead
2225 // of checking for veneer pools.
2226 // It is maintained to the closest unresolved branch limit minus the maximum
2227 // veneer margin (or kMaxInt if there are no unresolved branches).
2228 int next_veneer_pool_check_;
2231 // If a veneer is emitted for a branch instruction, that instruction must be
2232 // removed from the associated label's link chain so that the assembler does
2233 // not later attempt (likely unsuccessfully) to patch it to branch directly to
2235 void DeleteUnresolvedBranchInfoForLabel(Label* label);
2236 // This function deletes the information related to the label by traversing
2237 // the label chain, and for each PC-relative instruction in the chain checking
2238 // if pending unresolved information exists. Its complexity is proportional to
2239 // the length of the label chain.
2240 void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
2243 PositionsRecorder positions_recorder_;
2244 friend class PositionsRecorder;
2245 friend class EnsureSpace;
2246 friend class ConstPool;
2249 class PatchingAssembler : public Assembler {
2251 // Create an Assembler with a buffer starting at 'start'.
2252 // The buffer size is
2253 // size of instructions to patch + kGap
2254 // Where kGap is the distance from which the Assembler tries to grow the
2256 // If more or fewer instructions than expected are generated or if some
2257 // relocation information takes space in the buffer, the PatchingAssembler
2258 // will crash trying to grow the buffer.
2259 PatchingAssembler(Instruction* start, unsigned count)
2261 reinterpret_cast<byte*>(start),
2262 count * kInstructionSize + kGap) {
2266 PatchingAssembler(byte* start, unsigned count)
2267 : Assembler(NULL, start, count * kInstructionSize + kGap) {
2268 // Block constant pool emission.
2272 ~PatchingAssembler() {
2273 // Const pool should still be blocked.
2274 DCHECK(is_const_pool_blocked());
2276 // Verify we have generated the number of instruction we expected.
2277 DCHECK((pc_offset() + kGap) == buffer_size_);
2278 // Verify no relocation information has been emitted.
2279 DCHECK(IsConstPoolEmpty());
2280 // Flush the Instruction cache.
2281 size_t length = buffer_size_ - kGap;
2282 Assembler::FlushICacheWithoutIsolate(buffer_, length);
2285 // See definition of PatchAdrFar() for details.
2286 static const int kAdrFarPatchableNNops = 2;
2287 static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
2288 void PatchAdrFar(int64_t target_offset);
2292 class EnsureSpace BASE_EMBEDDED {
2294 explicit EnsureSpace(Assembler* assembler) {
2295 assembler->CheckBufferSpace();
2299 } // namespace internal
2302 #endif // V8_ARM64_ASSEMBLER_ARM64_H_