From: Fangrui Song Date: Tue, 14 Apr 2020 05:28:31 +0000 (-0700) Subject: [XRay] Define uint64_t Address = Sled.Address; NFC X-Git-Tag: llvmorg-12-init~9142 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4113e388c82f0530622149d61d92f630a80b0abb;p=platform%2Fupstream%2Fllvm.git [XRay] Define uint64_t Address = Sled.Address; NFC This makes it easy to change the address stored in a sled from an absolute address to a PC-relative address. --- diff --git a/compiler-rt/lib/xray/xray_powerpc64.cpp b/compiler-rt/lib/xray/xray_powerpc64.cpp index b41f1bc..067488b 100644 --- a/compiler-rt/lib/xray/xray_powerpc64.cpp +++ b/compiler-rt/lib/xray/xray_powerpc64.cpp @@ -52,35 +52,37 @@ namespace __xray { bool patchFunctionEntry(const bool Enable, uint32_t FuncId, const XRaySledEntry &Sled, void (*Trampoline)()) XRAY_NEVER_INSTRUMENT { + const uint64_t Address = Sled.Address; if (Enable) { // lis 0, FuncId[16..32] // li 0, FuncId[0..15] - *reinterpret_cast(Sled.Address) = + *reinterpret_cast(Address) = (0x3c000000ull + (FuncId >> 16)) + ((0x60000000ull + (FuncId & 0xffff)) << 32); } else { // b +JumpOverInstNum instructions. - *reinterpret_cast(Sled.Address) = + *reinterpret_cast(Address) = 0x48000000ull + (JumpOverInstNum << 2); } - clearCache(reinterpret_cast(Sled.Address), 8); + clearCache(reinterpret_cast(Address), 8); return true; } bool patchFunctionExit(const bool Enable, uint32_t FuncId, const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { + const uint64_t Address = Sled.Address; if (Enable) { // lis 0, FuncId[16..32] // li 0, FuncId[0..15] - *reinterpret_cast(Sled.Address) = + *reinterpret_cast(Address) = (0x3c000000ull + (FuncId >> 16)) + ((0x60000000ull + (FuncId & 0xffff)) << 32); } else { // Copy the blr/b instruction after JumpOverInstNum instructions. - *reinterpret_cast(Sled.Address) = - *(reinterpret_cast(Sled.Address) + JumpOverInstNum); + *reinterpret_cast(Address) = + *(reinterpret_cast(Address) + JumpOverInstNum); } - clearCache(reinterpret_cast(Sled.Address), 8); + clearCache(reinterpret_cast(Address), 8); return true; } diff --git a/compiler-rt/lib/xray/xray_x86_64.cpp b/compiler-rt/lib/xray/xray_x86_64.cpp index e63ee1b..61a9a88 100644 --- a/compiler-rt/lib/xray/xray_x86_64.cpp +++ b/compiler-rt/lib/xray/xray_x86_64.cpp @@ -151,23 +151,24 @@ bool patchFunctionEntry(const bool Enable, const uint32_t FuncId, // opcode and first operand. // // Prerequisite is to compute the relative offset to the trampoline's address. + const uint64_t Address = Sled.Address; int64_t TrampolineOffset = reinterpret_cast(Trampoline) - - (static_cast(Sled.Address) + 11); + (static_cast(Address) + 11); if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) { - Report("XRay Entry trampoline (%p) too far from sled (%p)\n", - Trampoline, reinterpret_cast(Sled.Address)); + Report("XRay Entry trampoline (%p) too far from sled (%p)\n", Trampoline, + reinterpret_cast(Address)); return false; } if (Enable) { - *reinterpret_cast(Sled.Address + 2) = FuncId; - *reinterpret_cast(Sled.Address + 6) = CallOpCode; - *reinterpret_cast(Sled.Address + 7) = TrampolineOffset; + *reinterpret_cast(Address + 2) = FuncId; + *reinterpret_cast(Address + 6) = CallOpCode; + *reinterpret_cast(Address + 7) = TrampolineOffset; std::atomic_store_explicit( - reinterpret_cast *>(Sled.Address), MovR10Seq, + reinterpret_cast *>(Address), MovR10Seq, std::memory_order_release); } else { std::atomic_store_explicit( - reinterpret_cast *>(Sled.Address), Jmp9Seq, + reinterpret_cast *>(Address), Jmp9Seq, std::memory_order_release); // FIXME: Write out the nops still? } @@ -196,23 +197,24 @@ bool patchFunctionExit(const bool Enable, const uint32_t FuncId, // // Prerequisite is to compute the relative offset fo the // __xray_FunctionExit function's address. + const uint64_t Address = Sled.Address; int64_t TrampolineOffset = reinterpret_cast(__xray_FunctionExit) - - (static_cast(Sled.Address) + 11); + (static_cast(Address) + 11); if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) { Report("XRay Exit trampoline (%p) too far from sled (%p)\n", - __xray_FunctionExit, reinterpret_cast(Sled.Address)); + __xray_FunctionExit, reinterpret_cast(Address)); return false; } if (Enable) { - *reinterpret_cast(Sled.Address + 2) = FuncId; - *reinterpret_cast(Sled.Address + 6) = JmpOpCode; - *reinterpret_cast(Sled.Address + 7) = TrampolineOffset; + *reinterpret_cast(Address + 2) = FuncId; + *reinterpret_cast(Address + 6) = JmpOpCode; + *reinterpret_cast(Address + 7) = TrampolineOffset; std::atomic_store_explicit( - reinterpret_cast *>(Sled.Address), MovR10Seq, + reinterpret_cast *>(Address), MovR10Seq, std::memory_order_release); } else { std::atomic_store_explicit( - reinterpret_cast *>(Sled.Address), RetOpCode, + reinterpret_cast *>(Address), RetOpCode, std::memory_order_release); // FIXME: Write out the nops still? } @@ -223,24 +225,25 @@ bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId, const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT { // Here we do the dance of replacing the tail call sled with a similar // sequence as the entry sled, but calls the tail exit sled instead. + const uint64_t Address = Sled.Address; int64_t TrampolineOffset = reinterpret_cast(__xray_FunctionTailExit) - - (static_cast(Sled.Address) + 11); + (static_cast(Address) + 11); if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) { Report("XRay Tail Exit trampoline (%p) too far from sled (%p)\n", - __xray_FunctionTailExit, reinterpret_cast(Sled.Address)); + __xray_FunctionTailExit, reinterpret_cast(Address)); return false; } if (Enable) { - *reinterpret_cast(Sled.Address + 2) = FuncId; - *reinterpret_cast(Sled.Address + 6) = CallOpCode; - *reinterpret_cast(Sled.Address + 7) = TrampolineOffset; + *reinterpret_cast(Address + 2) = FuncId; + *reinterpret_cast(Address + 6) = CallOpCode; + *reinterpret_cast(Address + 7) = TrampolineOffset; std::atomic_store_explicit( - reinterpret_cast *>(Sled.Address), MovR10Seq, + reinterpret_cast *>(Address), MovR10Seq, std::memory_order_release); } else { std::atomic_store_explicit( - reinterpret_cast *>(Sled.Address), Jmp9Seq, + reinterpret_cast *>(Address), Jmp9Seq, std::memory_order_release); // FIXME: Write out the nops still? } @@ -272,21 +275,22 @@ bool patchCustomEvent(const bool Enable, const uint32_t FuncId, // The jump offset is now 15 bytes (0x0f), so when restoring the nopw back // to a jmp, use 15 bytes instead. // + const uint64_t Address = Sled.Address; if (Enable) { std::atomic_store_explicit( - reinterpret_cast *>(Sled.Address), NopwSeq, + reinterpret_cast *>(Address), NopwSeq, std::memory_order_release); } else { switch (Sled.Version) { case 1: std::atomic_store_explicit( - reinterpret_cast *>(Sled.Address), Jmp15Seq, + reinterpret_cast *>(Address), Jmp15Seq, std::memory_order_release); break; case 0: default: std::atomic_store_explicit( - reinterpret_cast *>(Sled.Address), Jmp20Seq, + reinterpret_cast *>(Address), Jmp20Seq, std::memory_order_release); break; } @@ -313,14 +317,15 @@ bool patchTypedEvent(const bool Enable, const uint32_t FuncId, // unstashes the registers and returns. If the arguments are already in // the correct registers, the stashing and unstashing become equivalently // sized nops. + const uint64_t Address = Sled.Address; if (Enable) { std::atomic_store_explicit( - reinterpret_cast *>(Sled.Address), NopwSeq, + reinterpret_cast *>(Address), NopwSeq, std::memory_order_release); } else { - std::atomic_store_explicit( - reinterpret_cast *>(Sled.Address), Jmp20Seq, - std::memory_order_release); + std::atomic_store_explicit( + reinterpret_cast *>(Address), Jmp20Seq, + std::memory_order_release); } return false; }