bool patchFunctionEntry(const bool Enable, uint32_t FuncId,
const XRaySledEntry &Sled,
void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ const uint64_t Address = Sled.Address;
if (Enable) {
// lis 0, FuncId[16..32]
// li 0, FuncId[0..15]
- *reinterpret_cast<uint64_t *>(Sled.Address) =
+ *reinterpret_cast<uint64_t *>(Address) =
(0x3c000000ull + (FuncId >> 16)) +
((0x60000000ull + (FuncId & 0xffff)) << 32);
} else {
// b +JumpOverInstNum instructions.
- *reinterpret_cast<uint32_t *>(Sled.Address) =
+ *reinterpret_cast<uint32_t *>(Address) =
0x48000000ull + (JumpOverInstNum << 2);
}
- clearCache(reinterpret_cast<void *>(Sled.Address), 8);
+ clearCache(reinterpret_cast<void *>(Address), 8);
return true;
}
bool patchFunctionExit(const bool Enable, uint32_t FuncId,
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ const uint64_t Address = Sled.Address;
if (Enable) {
// lis 0, FuncId[16..32]
// li 0, FuncId[0..15]
- *reinterpret_cast<uint64_t *>(Sled.Address) =
+ *reinterpret_cast<uint64_t *>(Address) =
(0x3c000000ull + (FuncId >> 16)) +
((0x60000000ull + (FuncId & 0xffff)) << 32);
} else {
// Copy the blr/b instruction after JumpOverInstNum instructions.
- *reinterpret_cast<uint32_t *>(Sled.Address) =
- *(reinterpret_cast<uint32_t *>(Sled.Address) + JumpOverInstNum);
+ *reinterpret_cast<uint32_t *>(Address) =
+ *(reinterpret_cast<uint32_t *>(Address) + JumpOverInstNum);
}
- clearCache(reinterpret_cast<void *>(Sled.Address), 8);
+ clearCache(reinterpret_cast<void *>(Address), 8);
return true;
}
// opcode and first operand.
//
// Prerequisite is to compute the relative offset to the trampoline's address.
+ const uint64_t Address = Sled.Address;
int64_t TrampolineOffset = reinterpret_cast<int64_t>(Trampoline) -
- (static_cast<int64_t>(Sled.Address) + 11);
+ (static_cast<int64_t>(Address) + 11);
if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
- Report("XRay Entry trampoline (%p) too far from sled (%p)\n",
- Trampoline, reinterpret_cast<void *>(Sled.Address));
+ Report("XRay Entry trampoline (%p) too far from sled (%p)\n", Trampoline,
+ reinterpret_cast<void *>(Address));
return false;
}
if (Enable) {
- *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
- *reinterpret_cast<uint8_t *>(Sled.Address + 6) = CallOpCode;
- *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
+ *reinterpret_cast<uint32_t *>(Address + 2) = FuncId;
+ *reinterpret_cast<uint8_t *>(Address + 6) = CallOpCode;
+ *reinterpret_cast<uint32_t *>(Address + 7) = TrampolineOffset;
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), MovR10Seq,
std::memory_order_release);
} else {
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp9Seq,
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp9Seq,
std::memory_order_release);
// FIXME: Write out the nops still?
}
//
// Prerequisite is to compute the relative offset fo the
// __xray_FunctionExit function's address.
+ const uint64_t Address = Sled.Address;
int64_t TrampolineOffset = reinterpret_cast<int64_t>(__xray_FunctionExit) -
- (static_cast<int64_t>(Sled.Address) + 11);
+ (static_cast<int64_t>(Address) + 11);
if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
Report("XRay Exit trampoline (%p) too far from sled (%p)\n",
- __xray_FunctionExit, reinterpret_cast<void *>(Sled.Address));
+ __xray_FunctionExit, reinterpret_cast<void *>(Address));
return false;
}
if (Enable) {
- *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
- *reinterpret_cast<uint8_t *>(Sled.Address + 6) = JmpOpCode;
- *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
+ *reinterpret_cast<uint32_t *>(Address + 2) = FuncId;
+ *reinterpret_cast<uint8_t *>(Address + 6) = JmpOpCode;
+ *reinterpret_cast<uint32_t *>(Address + 7) = TrampolineOffset;
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), MovR10Seq,
std::memory_order_release);
} else {
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint8_t> *>(Sled.Address), RetOpCode,
+ reinterpret_cast<std::atomic<uint8_t> *>(Address), RetOpCode,
std::memory_order_release);
// FIXME: Write out the nops still?
}
const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
// Here we do the dance of replacing the tail call sled with a similar
// sequence as the entry sled, but calls the tail exit sled instead.
+ const uint64_t Address = Sled.Address;
int64_t TrampolineOffset =
reinterpret_cast<int64_t>(__xray_FunctionTailExit) -
- (static_cast<int64_t>(Sled.Address) + 11);
+ (static_cast<int64_t>(Address) + 11);
if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
Report("XRay Tail Exit trampoline (%p) too far from sled (%p)\n",
- __xray_FunctionTailExit, reinterpret_cast<void *>(Sled.Address));
+ __xray_FunctionTailExit, reinterpret_cast<void *>(Address));
return false;
}
if (Enable) {
- *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
- *reinterpret_cast<uint8_t *>(Sled.Address + 6) = CallOpCode;
- *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
+ *reinterpret_cast<uint32_t *>(Address + 2) = FuncId;
+ *reinterpret_cast<uint8_t *>(Address + 6) = CallOpCode;
+ *reinterpret_cast<uint32_t *>(Address + 7) = TrampolineOffset;
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), MovR10Seq,
std::memory_order_release);
} else {
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp9Seq,
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp9Seq,
std::memory_order_release);
// FIXME: Write out the nops still?
}
// The jump offset is now 15 bytes (0x0f), so when restoring the nopw back
// to a jmp, use 15 bytes instead.
//
+ const uint64_t Address = Sled.Address;
if (Enable) {
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), NopwSeq,
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), NopwSeq,
std::memory_order_release);
} else {
switch (Sled.Version) {
case 1:
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp15Seq,
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp15Seq,
std::memory_order_release);
break;
case 0:
default:
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp20Seq,
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp20Seq,
std::memory_order_release);
break;
}
// unstashes the registers and returns. If the arguments are already in
// the correct registers, the stashing and unstashing become equivalently
// sized nops.
+ const uint64_t Address = Sled.Address;
if (Enable) {
std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), NopwSeq,
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), NopwSeq,
std::memory_order_release);
} else {
- std::atomic_store_explicit(
- reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp20Seq,
- std::memory_order_release);
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Address), Jmp20Seq,
+ std::memory_order_release);
}
return false;
}