#include <list>
#include <map>
+#include <vector>
-#include "src/cpu.h"
-#include "src/globals.h"
-#include "src/utils.h"
+#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
+#include "src/globals.h"
#include "src/serialize.h"
-#include "src/arm64/instructions-arm64.h"
+#include "src/utils.h"
namespace v8 {
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
- ASSERT(IsValidOrNone());
+ DCHECK(IsValidOrNone());
}
Register(const Register& r) { // NOLINT(runtime/explicit)
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
- ASSERT(IsValidOrNone());
+ DCHECK(IsValidOrNone());
}
bool IsValid() const {
- ASSERT(IsRegister() || IsNone());
+ DCHECK(IsRegister() || IsNone());
return IsValidRegister();
}
}
static Register FromAllocationIndex(unsigned index) {
- ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+ DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
// cp is the last allocatable register.
if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
return from_code(kAllocatableContext);
}
static const char* AllocationIndexToString(int index) {
- ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
- ASSERT((kAllocatableLowRangeBegin == 0) &&
+ DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
+ DCHECK((kAllocatableLowRangeBegin == 0) &&
(kAllocatableLowRangeEnd == 15) &&
(kAllocatableHighRangeBegin == 18) &&
(kAllocatableHighRangeEnd == 24) &&
}
static int ToAllocationIndex(Register reg) {
- ASSERT(reg.IsAllocatable());
+ DCHECK(reg.IsAllocatable());
unsigned code = reg.code();
if (code == kAllocatableContext) {
return NumAllocatableRegisters() - 1;
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
- ASSERT(IsValidOrNone());
+ DCHECK(IsValidOrNone());
}
FPRegister(const FPRegister& r) { // NOLINT(runtime/explicit)
reg_code = r.reg_code;
reg_size = r.reg_size;
reg_type = r.reg_type;
- ASSERT(IsValidOrNone());
+ DCHECK(IsValidOrNone());
}
bool IsValid() const {
- ASSERT(IsFPRegister() || IsNone());
+ DCHECK(IsFPRegister() || IsNone());
return IsValidFPRegister();
}
}
static FPRegister FromAllocationIndex(unsigned int index) {
- ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+ DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
return (index <= kAllocatableLowRangeEnd)
? from_code(index)
}
static const char* AllocationIndexToString(int index) {
- ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
- ASSERT((kAllocatableLowRangeBegin == 0) &&
+ DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
+ DCHECK((kAllocatableLowRangeBegin == 0) &&
(kAllocatableLowRangeEnd == 14) &&
(kAllocatableHighRangeBegin == 16) &&
(kAllocatableHighRangeEnd == 28));
}
static int ToAllocationIndex(FPRegister reg) {
- ASSERT(reg.IsAllocatable());
+ DCHECK(reg.IsAllocatable());
unsigned code = reg.code();
return (code <= kAllocatableLowRangeEnd)
CPURegister reg4 = NoCPUReg)
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
size_(reg1.SizeInBits()), type_(reg1.type()) {
- ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
- ASSERT(IsValid());
+ DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ DCHECK(IsValid());
}
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
: list_(list), size_(size), type_(type) {
- ASSERT(IsValid());
+ DCHECK(IsValid());
}
CPURegList(CPURegister::RegisterType type, unsigned size,
unsigned first_reg, unsigned last_reg)
: size_(size), type_(type) {
- ASSERT(((type == CPURegister::kRegister) &&
+ DCHECK(((type == CPURegister::kRegister) &&
(last_reg < kNumberOfRegisters)) ||
((type == CPURegister::kFPRegister) &&
(last_reg < kNumberOfFPRegisters)));
- ASSERT(last_reg >= first_reg);
+ DCHECK(last_reg >= first_reg);
list_ = (1UL << (last_reg + 1)) - 1;
list_ &= ~((1UL << first_reg) - 1);
- ASSERT(IsValid());
+ DCHECK(IsValid());
}
CPURegister::RegisterType type() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return type_;
}
RegList list() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return list_;
}
inline void set_list(RegList new_list) {
- ASSERT(IsValid());
+ DCHECK(IsValid());
list_ = new_list;
}
static CPURegList GetSafepointSavedRegisters();
bool IsEmpty() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return list_ == 0;
}
const CPURegister& other2 = NoCPUReg,
const CPURegister& other3 = NoCPUReg,
const CPURegister& other4 = NoCPUReg) const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
RegList list = 0;
if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
}
int Count() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return CountSetBits(list_, kRegListSizeInBits);
}
unsigned RegisterSizeInBits() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return size_;
}
unsigned RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits();
- ASSERT((size_in_bits % kBitsPerByte) == 0);
+ DCHECK((size_in_bits % kBitsPerByte) == 0);
return size_in_bits / kBitsPerByte;
}
unsigned TotalSizeInBytes() const {
- ASSERT(IsValid());
+ DCHECK(IsValid());
return RegisterSizeInBytes() * Count();
}
// MemOperand represents a memory operand in a load or store instruction.
class MemOperand {
public:
- inline explicit MemOperand();
+ inline MemOperand();
inline explicit MemOperand(Register base,
ptrdiff_t offset = 0,
AddrMode addrmode = Offset);
// handle indexed modes.
inline Operand OffsetAsOperand() const;
+ enum PairResult {
+ kNotPair, // Can't use a pair instruction.
+ kPairAB, // Can use a pair instruction (operandA has lower address).
+ kPairBA // Can use a pair instruction (operandB has lower address).
+ };
+ // Check if two MemOperand are consistent for stp/ldp use.
+ static PairResult AreConsistentForPair(const MemOperand& operandA,
+ const MemOperand& operandB,
+ int access_size_log2 = kXRegSizeLog2);
+
private:
Register base_;
Register regoffset_;
};
+class ConstPool {
+ public:
+ explicit ConstPool(Assembler* assm)
+ : assm_(assm),
+ first_use_(-1),
+ shared_entries_count(0) {}
+ void RecordEntry(intptr_t data, RelocInfo::Mode mode);
+ int EntryCount() const {
+ return shared_entries_count + unique_entries_.size();
+ }
+ bool IsEmpty() const {
+ return shared_entries_.empty() && unique_entries_.empty();
+ }
+ // Distance in bytes between the current pc and the first instruction
+ // using the pool. If there are no pending entries return kMaxInt.
+ int DistanceToFirstUse();
+ // Offset after which instructions using the pool will be out of range.
+ int MaxPcOffset();
+ // Maximum size the constant pool can be with current entries. It always
+ // includes alignment padding and branch over.
+ int WorstCaseSize();
+ // Size in bytes of the literal pool *if* it is emitted at the current
+ // pc. The size will include the branch over the pool if it was requested.
+ int SizeIfEmittedAtCurrentPc(bool require_jump);
+ // Emit the literal pool at the current pc with a branch over the pool if
+ // requested.
+ void Emit(bool require_jump);
+ // Discard any pending pool entries.
+ void Clear();
+
+ private:
+ bool CanBeShared(RelocInfo::Mode mode);
+ void EmitMarker();
+ void EmitGuard();
+ void EmitEntries();
+
+ Assembler* assm_;
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_use_;
+ // values, pc offset(s) of entries which can be shared.
+ std::multimap<uint64_t, int> shared_entries_;
+ // Number of distinct literal in shared entries.
+ int shared_entries_count;
+ // values, pc offset of entries which cannot be shared.
+ std::vector<std::pair<uint64_t, int> > unique_entries_;
+};
+
+
// -----------------------------------------------------------------------------
// Assembler.
virtual ~Assembler();
virtual void AbortedCodeGeneration() {
- num_pending_reloc_info_ = 0;
+ constpool_.Clear();
}
// System functions ---------------------------------------------------------
// Start generating code from the beginning of the buffer, discarding any code
// and data that has already been emitted into the buffer.
//
- // In order to avoid any accidental transfer of state, Reset ASSERTs that the
+ // In order to avoid any accidental transfer of state, Reset DCHECKs that the
// constant pool is not blocked.
void Reset();
// instruction stream that call will return from.
inline static Address return_address_from_call_start(Address pc);
+ // Return the code target address of the patch debug break slot
+ inline static Address break_address_from_return_address(Address pc);
+
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
// Size of the generated code in bytes
uint64_t SizeOfGeneratedCode() const {
- ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
+ DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
return pc_ - buffer_;
}
// Return the code size generated from label to the current position.
uint64_t SizeOfCodeGeneratedSince(const Label* label) {
- ASSERT(label->is_bound());
- ASSERT(pc_offset() >= label->pos());
- ASSERT(pc_offset() < buffer_size_);
+ DCHECK(label->is_bound());
+ DCHECK(pc_offset() >= label->pos());
+ DCHECK(pc_offset() < buffer_size_);
return pc_offset() - label->pos();
}
// TODO(jbramley): Work out what sign to use for these things and if possible,
// change things to be consistent.
void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
- ASSERT(size >= 0);
- ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
+ DCHECK(size >= 0);
+ DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
}
// Return the number of instructions generated from label to the
static bool IsConstantPoolAt(Instruction* instr);
static int ConstantPoolSizeAt(Instruction* instr);
// See Assembler::CheckConstPool for more info.
- void ConstantPoolMarker(uint32_t size);
void EmitPoolGuard();
- void ConstantPoolGuard();
// Prevent veneer pool emission until EndBlockVeneerPool is called.
// Call to this function can be nested but must be followed by an equal
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, lsb, lsb + width - 1);
}
// Sbfm aliases.
// Arithmetic shift right.
void asr(const Register& rd, const Register& rn, unsigned shift) {
- ASSERT(shift < rd.SizeInBits());
+ DCHECK(shift < rd.SizeInBits());
sbfm(rd, rn, shift, rd.SizeInBits() - 1);
}
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, lsb, lsb + width - 1);
}
// Logical shift left.
void lsl(const Register& rd, const Register& rn, unsigned shift) {
unsigned reg_size = rd.SizeInBits();
- ASSERT(shift < reg_size);
+ DCHECK(shift < reg_size);
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
}
// Logical shift right.
void lsr(const Register& rd, const Register& rn, unsigned shift) {
- ASSERT(shift < rd.SizeInBits());
+ DCHECK(shift < rd.SizeInBits());
ubfm(rd, rn, shift, rd.SizeInBits() - 1);
}
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
const Register& rn,
unsigned lsb,
unsigned width) {
- ASSERT(width >= 1);
- ASSERT(lsb + width <= rn.SizeInBits());
+ DCHECK(width >= 1);
+ DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, lsb, lsb + width - 1);
}
};
void nop(NopMarkerTypes n) {
- ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
+ DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
}
// subsequent instructions.
void EmitStringData(const char * string) {
size_t len = strlen(string) + 1;
- ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
+ DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
EmitData(string, len);
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
// Code generation helpers --------------------------------------------------
- unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
+ bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
+
+ Instruction* pc() const { return Instruction::Cast(pc_); }
Instruction* InstructionAt(int offset) const {
return reinterpret_cast<Instruction*>(buffer_ + offset);
// Register encoding.
static Instr Rd(CPURegister rd) {
- ASSERT(rd.code() != kSPRegInternalCode);
+ DCHECK(rd.code() != kSPRegInternalCode);
return rd.code() << Rd_offset;
}
static Instr Rn(CPURegister rn) {
- ASSERT(rn.code() != kSPRegInternalCode);
+ DCHECK(rn.code() != kSPRegInternalCode);
return rn.code() << Rn_offset;
}
static Instr Rm(CPURegister rm) {
- ASSERT(rm.code() != kSPRegInternalCode);
+ DCHECK(rm.code() != kSPRegInternalCode);
return rm.code() << Rm_offset;
}
static Instr Ra(CPURegister ra) {
- ASSERT(ra.code() != kSPRegInternalCode);
+ DCHECK(ra.code() != kSPRegInternalCode);
return ra.code() << Ra_offset;
}
static Instr Rt(CPURegister rt) {
- ASSERT(rt.code() != kSPRegInternalCode);
+ DCHECK(rt.code() != kSPRegInternalCode);
return rt.code() << Rt_offset;
}
static Instr Rt2(CPURegister rt2) {
- ASSERT(rt2.code() != kSPRegInternalCode);
+ DCHECK(rt2.code() != kSPRegInternalCode);
return rt2.code() << Rt2_offset;
}
// These encoding functions allow the stack pointer to be encoded, and
// disallow the zero register.
static Instr RdSP(Register rd) {
- ASSERT(!rd.IsZero());
+ DCHECK(!rd.IsZero());
return (rd.code() & kRegCodeMask) << Rd_offset;
}
static Instr RnSP(Register rn) {
- ASSERT(!rn.IsZero());
+ DCHECK(!rn.IsZero());
return (rn.code() & kRegCodeMask) << Rn_offset;
}
static bool IsImmLSUnscaled(ptrdiff_t offset);
static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
+ void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& addr, LoadStorePairOp op);
+ static bool IsImmLSPair(ptrdiff_t offset, LSDataSize size);
+
void Logical(const Register& rd,
const Register& rn,
const Operand& operand,
const Operand& operand,
FlagsUpdate S,
Instr op);
- void LoadStorePair(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairOp op);
void LoadStorePairNonTemporal(const CPURegister& rt,
const CPURegister& rt2,
const MemOperand& addr,
// instructions.
void BlockConstPoolFor(int instructions);
+ // Set how far from current pc the next constant pool check will be.
+ void SetNextConstPoolCheckIn(int instructions) {
+ next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
+ }
+
// Emit the instruction at pc_.
void Emit(Instr instruction) {
STATIC_ASSERT(sizeof(*pc_) == 1);
STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
- ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
+ DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
memcpy(pc_, &instruction, sizeof(instruction));
pc_ += sizeof(instruction);
// Emit data inline in the instruction stream.
void EmitData(void const * data, unsigned size) {
- ASSERT(sizeof(*pc_) == 1);
- ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
+ DCHECK(sizeof(*pc_) == 1);
+ DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
// TODO(all): Somehow register we have some data here. Then we can
// disassemble it correctly.
int next_constant_pool_check_;
// Constant pool generation
- // Pools are emitted in the instruction stream, preferably after unconditional
- // jumps or after returns from functions (in dead code locations).
- // If a long code sequence does not contain unconditional jumps, it is
- // necessary to emit the constant pool before the pool gets too far from the
- // location it is accessed from. In this case, we emit a jump over the emitted
- // constant pool.
+ // Pools are emitted in the instruction stream. They are emitted when:
+ // * the distance to the first use is above a pre-defined distance or
+ // * the numbers of entries in the pool is above a pre-defined size or
+ // * code generation is finished
+ // If a pool needs to be emitted before code generation is finished a branch
+ // over the emitted pool will be inserted.
+
// Constants in the pool may be addresses of functions that gets relocated;
// if so, a relocation info entry is associated to the constant pool entry.
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckConstPoolIntervalInst = 128;
- static const int kCheckConstPoolInterval =
- kCheckConstPoolIntervalInst * kInstructionSize;
-
- // Constants in pools are accessed via pc relative addressing, which can
- // reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant.
- static const int kMaxDistToConstPool = 4 * KB;
- static const int kMaxNumPendingRelocInfo =
- kMaxDistToConstPool / kInstructionSize;
-
-
- // Average distance beetween a constant pool and the first instruction
- // accessing the constant pool. Longer distance should result in less I-cache
- // pollution.
- // In practice the distance will be smaller since constant pool emission is
- // forced after function return and sometimes after unconditional branches.
- static const int kAvgDistToConstPool =
- kMaxDistToConstPool - kCheckConstPoolInterval;
+ static const int kCheckConstPoolInterval = 128;
+
+ // Distance to first use after a which a pool will be emitted. Pool entries
+ // are accessed with pc relative load therefore this cannot be more than
+ // 1 * MB. Since constant pool emission checks are interval based this value
+ // is an approximation.
+ static const int kApproxMaxDistToConstPool = 64 * KB;
+
+ // Number of pool entries after which a pool will be emitted. Since constant
+ // pool emission checks are interval based this value is an approximation.
+ static const int kApproxMaxPoolEntryCount = 512;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
- // Keep track of the first instruction requiring a constant pool entry
- // since the previous constant pool was emitted.
- int first_const_pool_use_;
-
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
- // the buffer of pending relocation info
- RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
- // number of pending reloc info entries in the buffer
- int num_pending_reloc_info_;
+ // The pending constant pool.
+ ConstPool constpool_;
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
void SetRecordedAstId(TypeFeedbackId ast_id) {
- ASSERT(recorded_ast_id_.IsNone());
+ DCHECK(recorded_ast_id_.IsNone());
recorded_ast_id_ = ast_id;
}
static const int kVeneerDistanceCheckMargin =
kVeneerNoProtectionFactor * kVeneerDistanceMargin;
int unresolved_branches_first_limit() const {
- ASSERT(!unresolved_branches_.empty());
+ DCHECK(!unresolved_branches_.empty());
return unresolved_branches_.begin()->first;
}
// This is similar to next_constant_pool_check_ and helps reduce the overhead
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
friend class EnsureSpace;
+ friend class ConstPool;
};
class PatchingAssembler : public Assembler {
~PatchingAssembler() {
// Const pool should still be blocked.
- ASSERT(is_const_pool_blocked());
+ DCHECK(is_const_pool_blocked());
EndBlockPools();
// Verify we have generated the number of instruction we expected.
- ASSERT((pc_offset() + kGap) == buffer_size_);
+ DCHECK((pc_offset() + kGap) == buffer_size_);
// Verify no relocation information has been emitted.
- ASSERT(num_pending_reloc_info() == 0);
+ DCHECK(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
- CPU::FlushICache(buffer_, length);
+ CpuFeatures::FlushICache(buffer_, length);
}
- static const int kMovInt64NInstrs = 4;
- void MovInt64(const Register& rd, int64_t imm);
-
// See definition of PatchAdrFar() for details.
- static const int kAdrFarPatchableNNops = kMovInt64NInstrs - 1;
- static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 3;
- void PatchAdrFar(Instruction* target);
+ static const int kAdrFarPatchableNNops = 2;
+ static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
+ void PatchAdrFar(ptrdiff_t target_offset);
};