return addrToSlot(this, Ptr + PageSize); // Round up.
}
+uintptr_t AllocatorState::internallyDetectedErrorFaultAddress() const {
+ return GuardedPagePoolEnd - 0x10;
+}
+
} // namespace gwp_asan
uint8_t Magic[4] = {};
// Update the version number when the AllocatorState or AllocationMetadata
// change.
- static constexpr uint16_t kAllocatorVersion = 1;
+ static constexpr uint16_t kAllocatorVersion = 2;
uint16_t Version = 0;
uint16_t Reserved = 0;
};
// Whether this allocation has been deallocated yet.
bool IsDeallocated = false;
+
+ // In recoverable mode, whether this allocation has had a crash associated
+ // with it. This has certain side effects, like meaning this allocation will
+ // permanently occupy a slot, and won't ever have another crash reported from
+ // it.
+ bool HasCrashed = false;
};
// This holds the state that's shared between the GWP-ASan allocator and the
// must be within memory owned by this pool, else the result is undefined.
bool isGuardPage(uintptr_t Ptr) const;
+ // Returns the address that's used by __gwp_asan_get_internal_crash_address()
+ // and GPA::raiseInternallyDetectedError() to communicate that the SEGV in
+ // question comes from an internally-detected error.
+ uintptr_t internallyDetectedErrorFaultAddress() const;
+
// The number of guarded slots that this pool holds.
size_t MaxSimultaneousAllocations = 0;
}
uintptr_t
-__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State) {
+__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State,
+ uintptr_t ErrorPtr) {
+ // There can be a race between internally- and externally-raised faults. The
+ // fault address from the signal handler is used to discriminate whether it's
+ // internally- or externally-raised, and the pool maintains a special page at
+ // the end of the GuardedPagePool specifically for the internally-raised
+ // faults.
+ if (ErrorPtr != State->internallyDetectedErrorFaultAddress())
+ return 0u;
return State->FailureAddress;
}
const gwp_asan::AllocationMetadata *Metadata,
uintptr_t ErrorPtr);
-// For internally-detected errors (double free, invalid free), this function
-// returns the pointer that the error occurred at. If the error is unrelated to
-// GWP-ASan, or if the error was caused by a non-internally detected failure,
-// this function returns zero.
+// This function, provided the fault address from the signal handler, returns
+// the following values:
+// 1. If the crash was caused by an internally-detected error (invalid free,
+// double free), this function returns the pointer that was used for the
+// internally-detected bad operation (i.e. the pointer given to free()).
+// 2. For externally-detected crashes (use-after-free, buffer-overflow), this
+// function returns zero.
+// 3. If GWP-ASan wasn't responsible for the crash at all, this function also
+// returns zero.
uintptr_t
-__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State);
+__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State,
+ uintptr_t ErrorPtr);
// Returns a pointer to the metadata for the allocation that's responsible for
// the crash. This metadata should not be dereferenced directly due to API
#include "gwp_asan/guarded_pool_allocator.h"
+#include "gwp_asan/crash_handler.h"
#include "gwp_asan/options.h"
#include "gwp_asan/utilities.h"
assert((PageSize & (PageSize - 1)) == 0);
State.PageSize = PageSize;
+ // Number of pages required =
+ // + MaxSimultaneousAllocations * maximumAllocationSize (N pages per slot)
+ // + MaxSimultaneousAllocations (one guard on the left side of each slot)
+ // + 1 (an extra guard page at the end of the pool, on the right side)
+ // + 1 (an extra page that's used for reporting internally-detected crashes,
+ // like double free and invalid free, to the signal handler; see
+ // raiseInternallyDetectedError() for more info)
size_t PoolBytesRequired =
- PageSize * (1 + State.MaxSimultaneousAllocations) +
+ PageSize * (2 + State.MaxSimultaneousAllocations) +
State.MaxSimultaneousAllocations * State.maximumAllocationSize();
assert(PoolBytesRequired % PageSize == 0);
void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired);
return reinterpret_cast<void *>(UserPtr);
}
-void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) {
+void GuardedPoolAllocator::raiseInternallyDetectedError(uintptr_t Address,
+ Error E) {
+ // Disable the allocator before setting the internal failure state. In
+ // non-recoverable mode, the allocator will be permanently disabled, and so
+ // things will be accessed without locks.
+ disable();
+
+ // Races between internally- and externally-raised faults can happen. Right
+ // now, in this thread we've locked the allocator in order to raise an
+ // internally-detected fault, and another thread could SIGSEGV to raise an
+ // externally-detected fault. What will happen is that the other thread will
+ // wait in the signal handler, as we hold the allocator's locks from the
+ // disable() above. We'll trigger the signal handler by touching the
+ // internal-signal-raising address below, and the signal handler from our
+ // thread will get to run first as we will continue to hold the allocator
+ // locks until the enable() at the end of this function. Be careful though, if
+ // this thread receives another SIGSEGV after the disable() above, but before
+ // touching the internal-signal-raising address below, then this thread will
+ // get an "externally-raised" SIGSEGV while *also* holding the allocator
+ // locks, which means this thread's signal handler will deadlock. This could
+ // be resolved with a re-entrant lock, but asking platforms to implement this
+ // seems unnecessary given the only way to get a SIGSEGV in this critical
+ // section is either a memory safety bug in the couple lines of code below (be
+ // careful!), or someone outside uses `kill(this_thread, SIGSEGV)`, which
+ // really shouldn't happen.
+
State.FailureType = E;
State.FailureAddress = Address;
- // Raise a SEGV by touching first guard page.
- volatile char *p = reinterpret_cast<char *>(State.GuardedPagePool);
+ // Raise a SEGV by touching a specific address that identifies to the crash
+ // handler that this is an internally-raised fault. Changing this address?
+ // Don't forget to update __gwp_asan_get_internal_crash_address.
+ volatile char *p =
+ reinterpret_cast<char *>(State.internallyDetectedErrorFaultAddress());
*p = 0;
- // Normally, would be __builtin_unreachable(), but because of
- // https://bugs.llvm.org/show_bug.cgi?id=47480, unreachable will DCE the
- // volatile store above, even though it has side effects.
- __builtin_trap();
-}
-void GuardedPoolAllocator::stop() {
- getThreadLocals()->RecursiveGuard = true;
- PoolMutex.tryLock();
+ // This should never be reached in non-recoverable mode. Ensure that the
+ // signal handler called handleRecoverablePostCrashReport(), which was
+ // responsible for re-setting these fields.
+ assert(State.FailureType == Error::UNKNOWN);
+ assert(State.FailureAddress == 0u);
+
+ // In recoverable mode, the signal handler (after dumping the crash) marked
+ // the page containing the InternalFaultSegvAddress as read/writeable, to
+ // allow the second touch to succeed after returning from the signal handler.
+ // Now, we need to mark the page as non-read/write-able again, so future
+ // internal faults can be raised.
+ deallocateInGuardedPool(
+ reinterpret_cast<void *>(getPageAddr(
+ State.internallyDetectedErrorFaultAddress(), State.PageSize)),
+ State.PageSize);
+
+ // And now we're done with patching ourselves back up, enable the allocator.
+ enable();
}
void GuardedPoolAllocator::deallocate(void *Ptr) {
size_t Slot = State.getNearestSlot(UPtr);
uintptr_t SlotStart = State.slotToAddr(Slot);
AllocationMetadata *Meta = addrToMetadata(UPtr);
+
+ // If this allocation is responsible for crash, never recycle it. Turn the
+ // deallocate() call into a no-op.
+ if (Meta->HasCrashed)
+ return;
+
if (Meta->Addr != UPtr) {
- // If multiple errors occur at the same time, use the first one.
- ScopedLock L(PoolMutex);
- trapOnAddress(UPtr, Error::INVALID_FREE);
+ raiseInternallyDetectedError(UPtr, Error::INVALID_FREE);
+ return;
+ }
+ if (Meta->IsDeallocated) {
+ raiseInternallyDetectedError(UPtr, Error::DOUBLE_FREE);
+ return;
}
// Intentionally scope the mutex here, so that other threads can access the
// pool during the expensive markInaccessible() call.
{
ScopedLock L(PoolMutex);
- if (Meta->IsDeallocated) {
- trapOnAddress(UPtr, Error::DOUBLE_FREE);
- }
// Ensure that the deallocation is recorded before marking the page as
// inaccessible. Otherwise, a racy use-after-free will have inconsistent
freeSlot(Slot);
}
+// Thread-compatible, protected by PoolMutex.
+static bool PreviousRecursiveGuard;
+
+void GuardedPoolAllocator::preCrashReport(void *Ptr) {
+ assert(pointerIsMine(Ptr) && "Pointer is not mine!");
+ uintptr_t InternalCrashAddr = __gwp_asan_get_internal_crash_address(
+ &State, reinterpret_cast<uintptr_t>(Ptr));
+ if (!InternalCrashAddr)
+ disable();
+
+ // If something in the signal handler calls malloc() while dumping the
+ // GWP-ASan report (e.g. backtrace_symbols()), make sure that GWP-ASan doesn't
+ // service that allocation. `PreviousRecursiveGuard` is protected by the
+ // allocator locks taken in disable(), either explicitly above for
+ // externally-raised errors, or implicitly in raiseInternallyDetectedError()
+ // for internally-detected errors.
+ PreviousRecursiveGuard = getThreadLocals()->RecursiveGuard;
+ getThreadLocals()->RecursiveGuard = true;
+}
+
+void GuardedPoolAllocator::postCrashReportRecoverableOnly(void *SignalPtr) {
+ uintptr_t SignalUPtr = reinterpret_cast<uintptr_t>(SignalPtr);
+ uintptr_t InternalCrashAddr =
+ __gwp_asan_get_internal_crash_address(&State, SignalUPtr);
+ uintptr_t ErrorUptr = InternalCrashAddr ?: SignalUPtr;
+
+ AllocationMetadata *Metadata = addrToMetadata(ErrorUptr);
+ Metadata->HasCrashed = true;
+
+ allocateInGuardedPool(
+ reinterpret_cast<void *>(getPageAddr(SignalUPtr, State.PageSize)),
+ State.PageSize);
+
+ // Clear the internal state in order to not confuse the crash handler if a
+ // use-after-free or buffer-overflow comes from a different allocation in the
+ // future.
+ if (InternalCrashAddr) {
+ State.FailureType = Error::UNKNOWN;
+ State.FailureAddress = 0;
+ }
+
+ size_t Slot = State.getNearestSlot(ErrorUptr);
+ // If the slot is available, remove it permanently.
+ for (size_t i = 0; i < FreeSlotsLength; ++i) {
+ if (FreeSlots[i] == Slot) {
+ FreeSlots[i] = FreeSlots[FreeSlotsLength - 1];
+ FreeSlotsLength -= 1;
+ break;
+ }
+ }
+
+ getThreadLocals()->RecursiveGuard = PreviousRecursiveGuard;
+ if (!InternalCrashAddr)
+ enable();
+}
+
size_t GuardedPoolAllocator::getSize(const void *Ptr) {
assert(pointerIsMine(Ptr));
ScopedLock L(PoolMutex);
// allocate.
void iterate(void *Base, size_t Size, iterate_callback Cb, void *Arg);
- // This function is used to signal the allocator to indefinitely stop
- // functioning, as a crash has occurred. This stops the allocator from
- // servicing any further allocations permanently.
- void stop();
-
// Return whether the allocation should be randomly chosen for sampling.
GWP_ASAN_ALWAYS_INLINE bool shouldSample() {
// NextSampleCounter == 0 means we "should regenerate the counter".
// Returns a pointer to the AllocatorState region.
const AllocatorState *getAllocatorState() const { return &State; }
+ // Functions that the signal handler is responsible for calling, while
+ // providing the SEGV pointer, prior to dumping the crash, and after dumping
+ // the crash (in recoverable mode only).
+ void preCrashReport(void *Ptr);
+ void postCrashReportRecoverableOnly(void *Ptr);
+
// Exposed as protected for testing.
protected:
// Returns the actual allocation size required to service an allocation with
// Raise a SEGV and set the corresponding fields in the Allocator's State in
// order to tell the crash handler what happened. Used when errors are
// detected internally (Double Free, Invalid Free).
- void trapOnAddress(uintptr_t Address, Error E);
+ void raiseInternallyDetectedError(uintptr_t Address, Error E);
static GuardedPoolAllocator *getSingleton();
// before this function.
void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf,
gwp_asan::backtrace::PrintBacktrace_t PrintBacktrace,
- gwp_asan::backtrace::SegvBacktrace_t SegvBacktrace);
+ gwp_asan::backtrace::SegvBacktrace_t SegvBacktrace,
+ bool Recoverable = false);
// Uninistall the signal handlers, test-only.
void uninstallSignalHandlers();
void installSignalHandlers(gwp_asan::GuardedPoolAllocator * /* GPA */,
Printf_t /* Printf */,
backtrace::PrintBacktrace_t /* PrintBacktrace */,
- backtrace::SegvBacktrace_t /* SegvBacktrace */) {}
+ backtrace::SegvBacktrace_t /* SegvBacktrace */,
+ bool /* Recoverable */) {}
void uninstallSignalHandlers() {}
} // namespace segv_handler
assert(State && "dumpReport missing Allocator State.");
assert(Metadata && "dumpReport missing Metadata.");
assert(Printf && "dumpReport missing Printf.");
+ assert(__gwp_asan_error_is_mine(State, ErrorPtr) &&
+ "dumpReport() called on a non-GWP-ASan error.");
- if (!__gwp_asan_error_is_mine(State, ErrorPtr))
+ uintptr_t InternalErrorPtr =
+ __gwp_asan_get_internal_crash_address(State, ErrorPtr);
+ if (InternalErrorPtr)
+ ErrorPtr = InternalErrorPtr;
+
+ const gwp_asan::AllocationMetadata *AllocMeta =
+ __gwp_asan_get_metadata(State, Metadata, ErrorPtr);
+
+ // It's unusual for a signal handler to be invoked multiple times for the same
+ // allocation, but it's possible in various scenarios, like:
+ // 1. A double-free or invalid-free was invoked in one thread at the same
+ // time as a buffer-overflow or use-after-free in another thread, or
+ // 2. Two threads do a use-after-free or buffer-overflow at the same time.
+ // In these instances, we've already dumped a report for this allocation, so
+ // skip dumping this issue as well.
+ if (AllocMeta->HasCrashed)
return;
Printf("*** GWP-ASan detected a memory error ***\n");
ScopedEndOfReportDecorator Decorator(Printf);
- uintptr_t InternalErrorPtr = __gwp_asan_get_internal_crash_address(State);
- if (InternalErrorPtr != 0u)
- ErrorPtr = InternalErrorPtr;
-
Error E = __gwp_asan_diagnose_error(State, Metadata, ErrorPtr);
-
if (E == Error::UNKNOWN) {
Printf("GWP-ASan cannot provide any more information about this error. "
"This may occur due to a wild memory access into the GWP-ASan pool, "
return;
}
- const gwp_asan::AllocationMetadata *AllocMeta =
- __gwp_asan_get_metadata(State, Metadata, ErrorPtr);
-
// Print the error header.
printHeader(E, ErrorPtr, AllocMeta, Printf);
struct sigaction PreviousHandler;
bool SignalHandlerInstalled;
+bool RecoverableSignal;
gwp_asan::GuardedPoolAllocator *GPAForSignalHandler;
Printf_t PrintfForSignalHandler;
PrintBacktrace_t PrintBacktraceForSignalHandler;
SegvBacktrace_t BacktraceForSignalHandler;
static void sigSegvHandler(int sig, siginfo_t *info, void *ucontext) {
- if (GPAForSignalHandler) {
- GPAForSignalHandler->stop();
+ const gwp_asan::AllocatorState *State =
+ GPAForSignalHandler->getAllocatorState();
+ void *FaultAddr = info->si_addr;
+ uintptr_t FaultAddrUPtr = reinterpret_cast<uintptr_t>(FaultAddr);
- dumpReport(reinterpret_cast<uintptr_t>(info->si_addr),
- GPAForSignalHandler->getAllocatorState(),
- GPAForSignalHandler->getMetadataRegion(),
+ if (__gwp_asan_error_is_mine(State, FaultAddrUPtr)) {
+ GPAForSignalHandler->preCrashReport(FaultAddr);
+
+ dumpReport(FaultAddrUPtr, State, GPAForSignalHandler->getMetadataRegion(),
BacktraceForSignalHandler, PrintfForSignalHandler,
PrintBacktraceForSignalHandler, ucontext);
+
+ if (RecoverableSignal) {
+ GPAForSignalHandler->postCrashReportRecoverableOnly(FaultAddr);
+ return;
+ }
}
- // Process any previous handlers.
+ // Process any previous handlers as long as the crash wasn't a GWP-ASan crash
+ // in recoverable mode.
if (PreviousHandler.sa_flags & SA_SIGINFO) {
PreviousHandler.sa_sigaction(sig, info, ucontext);
} else if (PreviousHandler.sa_handler == SIG_DFL) {
void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf,
PrintBacktrace_t PrintBacktrace,
- SegvBacktrace_t SegvBacktrace) {
+ SegvBacktrace_t SegvBacktrace, bool Recoverable) {
assert(GPA && "GPA wasn't provided to installSignalHandlers.");
assert(Printf && "Printf wasn't provided to installSignalHandlers.");
assert(PrintBacktrace &&
PrintfForSignalHandler = Printf;
PrintBacktraceForSignalHandler = PrintBacktrace;
BacktraceForSignalHandler = SegvBacktrace;
+ RecoverableSignal = Recoverable;
struct sigaction Action = {};
Action.sa_sigaction = sigSegvHandler;
"the same. Note, if the previously installed SIGSEGV handler is SIG_IGN, "
"we terminate the process after dumping the error report.")
+GWP_ASAN_OPTION(
+ bool, Recoverable, false,
+ "Install GWP-ASan's signal handler in recoverable mode. This means that "
+ "upon GWP-ASan detecting an error, it'll print the error report, but *not* "
+ "crash. Only one crash per sampled allocation will ever be recorded, and "
+ "if a sampled allocation does actually cause a crash, it'll permanently "
+ "occupy a slot in the pool. The recoverable mode also means that "
+ "previously-installed signal handlers will only be triggered for "
+ "non-GWP-ASan errors, as all GWP-ASan errors won't be forwarded.")
+
GWP_ASAN_OPTION(bool, InstallForkHandlers, true,
"Install GWP-ASan atfork handlers to acquire internal locks "
"before fork and release them after.")
harness.cpp
enable_disable.cpp
late_init.cpp
- options.cpp)
+ options.cpp
+ recoverable.cpp)
set(GWP_ASAN_UNIT_TEST_HEADERS
${GWP_ASAN_HEADERS}
//
//===----------------------------------------------------------------------===//
+#include <regex>
#include <string>
#include "gwp_asan/common.h"
#include "gwp_asan/crash_handler.h"
#include "gwp_asan/tests/harness.h"
-// Optnone to ensure that the calls to these functions are not optimized away,
-// as we're looking for them in the backtraces.
-__attribute((optnone)) void *
-AllocateMemory(gwp_asan::GuardedPoolAllocator &GPA) {
- return GPA.allocate(1);
-}
-__attribute((optnone)) void
-DeallocateMemory(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) {
- GPA.deallocate(Ptr);
-}
-__attribute((optnone)) void
-DeallocateMemory2(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) {
- GPA.deallocate(Ptr);
-}
-__attribute__((optnone)) void TouchMemory(void *Ptr) {
- *(reinterpret_cast<volatile char *>(Ptr)) = 7;
-}
-
-TEST_F(BacktraceGuardedPoolAllocatorDeathTest, DoubleFree) {
+TEST_P(BacktraceGuardedPoolAllocatorDeathTest, DoubleFree) {
void *Ptr = AllocateMemory(GPA);
DeallocateMemory(GPA, Ptr);
- std::string DeathRegex = "Double Free.*";
- DeathRegex.append("DeallocateMemory2.*");
-
- DeathRegex.append("was deallocated.*");
- DeathRegex.append("DeallocateMemory.*");
-
- DeathRegex.append("was allocated.*");
- DeathRegex.append("AllocateMemory.*");
- ASSERT_DEATH(DeallocateMemory2(GPA, Ptr), DeathRegex);
+ std::string DeathRegex = "Double Free.*DeallocateMemory2.*";
+ DeathRegex.append("was deallocated.*DeallocateMemory[^2].*");
+ DeathRegex.append("was allocated.*AllocateMemory");
+ if (!Recoverable) {
+ ASSERT_DEATH(DeallocateMemory2(GPA, Ptr), DeathRegex);
+ return;
+ }
+
+ // For recoverable, assert that DeallocateMemory2() doesn't crash.
+ DeallocateMemory2(GPA, Ptr);
+ // Fuchsia's zxtest doesn't have an EXPECT_THAT(testing::MatchesRegex(), ...),
+ // so check the regex manually.
+ EXPECT_TRUE(std::regex_search(
+ GetOutputBuffer(),
+ std::basic_regex(DeathRegex, std::regex_constants::extended)))
+ << "Regex \"" << DeathRegex
+ << "\" was not found in input:\n============\n"
+ << GetOutputBuffer() << "\n============";
}
-TEST_F(BacktraceGuardedPoolAllocatorDeathTest, UseAfterFree) {
+TEST_P(BacktraceGuardedPoolAllocatorDeathTest, UseAfterFree) {
#if defined(__linux__) && __ARM_ARCH == 7
// Incomplete backtrace on Armv7 Linux
GTEST_SKIP();
void *Ptr = AllocateMemory(GPA);
DeallocateMemory(GPA, Ptr);
- std::string DeathRegex = "Use After Free.*";
- DeathRegex.append("TouchMemory.*");
-
- DeathRegex.append("was deallocated.*");
- DeathRegex.append("DeallocateMemory.*");
-
- DeathRegex.append("was allocated.*");
- DeathRegex.append("AllocateMemory.*");
- ASSERT_DEATH(TouchMemory(Ptr), DeathRegex);
+ std::string DeathRegex = "Use After Free.*TouchMemory.*";
+ DeathRegex.append("was deallocated.*DeallocateMemory[^2].*");
+ DeathRegex.append("was allocated.*AllocateMemory");
+
+ if (!Recoverable) {
+ ASSERT_DEATH(TouchMemory(Ptr), DeathRegex);
+ return;
+ }
+
+ // For recoverable, assert that TouchMemory() doesn't crash.
+ TouchMemory(Ptr);
+ // Fuchsia's zxtest doesn't have an EXPECT_THAT(testing::MatchesRegex(), ...),
+ // so check the regex manually.
+ EXPECT_TRUE(std::regex_search(
+ GetOutputBuffer(),
+ std::basic_regex(DeathRegex, std::regex_constants::extended)))
+ << "Regex \"" << DeathRegex
+ << "\" was not found in input:\n============\n"
+ << GetOutputBuffer() << "\n============";
+ ;
}
+INSTANTIATE_TEST_SUITE_P(RecoverableSignalDeathTest,
+ BacktraceGuardedPoolAllocatorDeathTest,
+ /* Recoverable */ testing::Bool());
+
TEST(Backtrace, Short) {
gwp_asan::AllocationMetadata Meta;
Meta.AllocationTrace.RecordBacktrace(
void setupState() {
State.GuardedPagePool = 0x2000;
- State.GuardedPagePoolEnd = 0xb000;
+ State.GuardedPagePoolEnd = 0xc000;
+ InternalFaultAddr = State.GuardedPagePoolEnd - 0x10;
State.MaxSimultaneousAllocations = 4; // 0x3000, 0x5000, 0x7000, 0x9000.
State.PageSize = 0x1000;
}
static uintptr_t BacktraceConstants[kNumBacktraceConstants];
AllocatorState State = {};
AllocationMetadata Metadata[4] = {};
+ uintptr_t InternalFaultAddr;
};
uintptr_t CrashHandlerAPITest::BacktraceConstants[kNumBacktraceConstants] = {
EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress));
EXPECT_EQ(Error::UNKNOWN,
__gwp_asan_diagnose_error(&State, Metadata, FailureAddress));
- EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress));
EXPECT_EQ(nullptr, __gwp_asan_get_metadata(&State, Metadata, FailureAddress));
}
EXPECT_TRUE(__gwp_asan_error_is_mine(&State));
EXPECT_EQ(Error::DOUBLE_FREE,
__gwp_asan_diagnose_error(&State, Metadata, 0x0));
- EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(FailureAddress,
+ __gwp_asan_get_internal_crash_address(&State, InternalFaultAddr));
checkMetadata(Index, FailureAddress);
}
EXPECT_TRUE(__gwp_asan_error_is_mine(&State));
EXPECT_EQ(Error::INVALID_FREE,
__gwp_asan_diagnose_error(&State, Metadata, 0x0));
- EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(FailureAddress,
+ __gwp_asan_get_internal_crash_address(&State, InternalFaultAddr));
checkMetadata(Index, FailureAddress);
}
EXPECT_TRUE(__gwp_asan_error_is_mine(&State));
EXPECT_EQ(Error::INVALID_FREE,
__gwp_asan_diagnose_error(&State, Metadata, 0x0));
- EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(FailureAddress,
+ __gwp_asan_get_internal_crash_address(&State, InternalFaultAddr));
EXPECT_EQ(nullptr, __gwp_asan_get_metadata(&State, Metadata, FailureAddress));
}
EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress));
EXPECT_EQ(Error::USE_AFTER_FREE,
__gwp_asan_diagnose_error(&State, Metadata, FailureAddress));
- EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress));
checkMetadata(Index, FailureAddress);
}
EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress));
EXPECT_EQ(Error::BUFFER_OVERFLOW,
__gwp_asan_diagnose_error(&State, Metadata, FailureAddress));
- EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress));
checkMetadata(Index, FailureAddress);
}
EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress));
EXPECT_EQ(Error::BUFFER_UNDERFLOW,
__gwp_asan_diagnose_error(&State, Metadata, FailureAddress));
- EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State));
+ EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress));
checkMetadata(Index, FailureAddress);
}
}
} // namespace test
} // namespace gwp_asan
+
+// Optnone to ensure that the calls to these functions are not optimized away,
+// as we're looking for them in the backtraces.
+__attribute__((optnone)) char *
+AllocateMemory(gwp_asan::GuardedPoolAllocator &GPA) {
+ return static_cast<char *>(GPA.allocate(1));
+}
+__attribute__((optnone)) void
+DeallocateMemory(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) {
+ GPA.deallocate(Ptr);
+}
+__attribute__((optnone)) void
+DeallocateMemory2(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) {
+ GPA.deallocate(Ptr);
+}
+__attribute__((optnone)) void TouchMemory(void *Ptr) {
+ *(reinterpret_cast<volatile char *>(Ptr)) = 7;
+}
#if defined(__Fuchsia__)
#include <zxtest/zxtest.h>
using Test = ::zxtest::Test;
+template <typename T> using TestWithParam = ::zxtest::TestWithParam<T>;
#else
#include "gtest/gtest.h"
using Test = ::testing::Test;
+template <typename T> using TestWithParam = ::testing::TestWithParam<T>;
#endif
#include "gwp_asan/guarded_pool_allocator.h"
}; // namespace test
}; // namespace gwp_asan
+char *AllocateMemory(gwp_asan::GuardedPoolAllocator &GPA);
+void DeallocateMemory(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr);
+void DeallocateMemory2(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr);
+void TouchMemory(void *Ptr);
+
class DefaultGuardedPoolAllocator : public Test {
public:
void SetUp() override {
MaxSimultaneousAllocations;
};
-class BacktraceGuardedPoolAllocator : public Test {
+class BacktraceGuardedPoolAllocator
+ : public TestWithParam</* Recoverable */ bool> {
public:
void SetUp() override {
gwp_asan::options::Options Opts;
Opts.InstallForkHandlers = gwp_asan::test::OnlyOnce();
GPA.init(Opts);
+ // In recoverable mode, capture GWP-ASan logs to an internal buffer so that
+ // we can search it in unit tests. For non-recoverable tests, the default
+ // buffer is fine, as any tests should be EXPECT_DEATH()'d.
+ Recoverable = GetParam();
+ gwp_asan::Printf_t PrintfFunction = PrintfToBuffer;
+ GetOutputBuffer().clear();
+ if (!Recoverable)
+ PrintfFunction = gwp_asan::test::getPrintfFunction();
+
gwp_asan::segv_handler::installSignalHandlers(
- &GPA, gwp_asan::test::getPrintfFunction(),
- gwp_asan::backtrace::getPrintBacktraceFunction(),
- gwp_asan::backtrace::getSegvBacktraceFunction());
+ &GPA, PrintfFunction, gwp_asan::backtrace::getPrintBacktraceFunction(),
+ gwp_asan::backtrace::getSegvBacktraceFunction(),
+ /* Recoverable */ Recoverable);
}
void TearDown() override {
}
protected:
+ static std::string &GetOutputBuffer() {
+ static std::string Buffer;
+ return Buffer;
+ }
+
+ __attribute__((format(printf, 1, 2))) static void
+ PrintfToBuffer(const char *Format, ...) {
+ va_list AP;
+ va_start(AP, Format);
+ char Buffer[8192];
+ vsnprintf(Buffer, sizeof(Buffer), Format, AP);
+ GetOutputBuffer() += Buffer;
+ va_end(AP);
+ }
+
gwp_asan::GuardedPoolAllocator GPA;
+ bool Recoverable;
};
// https://github.com/google/googletest/blob/master/docs/advanced.md#death-tests-and-threads
--- /dev/null
+//===-- recoverable.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <atomic>
+#include <mutex>
+#include <regex>
+#include <string>
+#include <thread>
+#include <vector>
+
+#include "gwp_asan/common.h"
+#include "gwp_asan/crash_handler.h"
+#include "gwp_asan/tests/harness.h"
+
+void CheckOnlyOneGwpAsanCrash(const std::string &OutputBuffer) {
+ const char *kGwpAsanErrorString = "GWP-ASan detected a memory error";
+ size_t FirstIndex = OutputBuffer.find(kGwpAsanErrorString);
+ ASSERT_NE(FirstIndex, std::string::npos) << "Didn't detect a GWP-ASan crash";
+ ASSERT_EQ(OutputBuffer.find(kGwpAsanErrorString, FirstIndex + 1),
+ std::string::npos)
+ << "Detected more than one GWP-ASan crash:\n"
+ << OutputBuffer;
+}
+
+TEST_P(BacktraceGuardedPoolAllocator, MultipleDoubleFreeOnlyOneOutput) {
+ SCOPED_TRACE("");
+ void *Ptr = AllocateMemory(GPA);
+ DeallocateMemory(GPA, Ptr);
+ // First time should generate a crash report.
+ DeallocateMemory(GPA, Ptr);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find("Double Free"));
+
+ // Ensure the crash is only reported once.
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ DeallocateMemory(GPA, Ptr);
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ }
+}
+
+TEST_P(BacktraceGuardedPoolAllocator, MultipleInvalidFreeOnlyOneOutput) {
+ SCOPED_TRACE("");
+ char *Ptr = static_cast<char *>(AllocateMemory(GPA));
+ // First time should generate a crash report.
+ DeallocateMemory(GPA, Ptr + 1);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find("Invalid (Wild) Free"));
+
+ // Ensure the crash is only reported once.
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ DeallocateMemory(GPA, Ptr + 1);
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ }
+}
+
+TEST_P(BacktraceGuardedPoolAllocator, MultipleUseAfterFreeOnlyOneOutput) {
+ SCOPED_TRACE("");
+ void *Ptr = AllocateMemory(GPA);
+ DeallocateMemory(GPA, Ptr);
+ // First time should generate a crash report.
+ TouchMemory(Ptr);
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find("Use After Free"));
+
+ // Ensure the crash is only reported once.
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ TouchMemory(Ptr);
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ }
+}
+
+TEST_P(BacktraceGuardedPoolAllocator, MultipleBufferOverflowOnlyOneOutput) {
+ SCOPED_TRACE("");
+ char *Ptr = static_cast<char *>(AllocateMemory(GPA));
+ // First time should generate a crash report.
+ TouchMemory(Ptr - 16);
+ TouchMemory(Ptr + 16);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+ if (GetOutputBuffer().find("Buffer Overflow") == std::string::npos &&
+ GetOutputBuffer().find("Buffer Underflow") == std::string::npos)
+ FAIL() << "Failed to detect buffer underflow/overflow:\n"
+ << GetOutputBuffer();
+
+ // Ensure the crash is only reported once.
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ TouchMemory(Ptr - 16);
+ TouchMemory(Ptr + 16);
+ ASSERT_TRUE(GetOutputBuffer().empty()) << GetOutputBuffer();
+ }
+}
+
+TEST_P(BacktraceGuardedPoolAllocator, OneDoubleFreeOneUseAfterFree) {
+ SCOPED_TRACE("");
+ void *Ptr = AllocateMemory(GPA);
+ DeallocateMemory(GPA, Ptr);
+ // First time should generate a crash report.
+ DeallocateMemory(GPA, Ptr);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find("Double Free"));
+
+ // Ensure the crash is only reported once.
+ GetOutputBuffer().clear();
+ for (size_t i = 0; i < 100; ++i) {
+ DeallocateMemory(GPA, Ptr);
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ }
+}
+
+// We use double-free to detect that each slot can generate as single error.
+// Use-after-free would also be acceptable, but buffer-overflow wouldn't be, as
+// the random left/right alignment means that one right-overflow can disable
+// page protections, and a subsequent left-overflow of a slot that's on the
+// right hand side may not trap.
+TEST_P(BacktraceGuardedPoolAllocator, OneErrorReportPerSlot) {
+ SCOPED_TRACE("");
+ std::vector<void *> Ptrs;
+ for (size_t i = 0; i < GPA.getAllocatorState()->MaxSimultaneousAllocations;
+ ++i) {
+ void *Ptr = AllocateMemory(GPA);
+ ASSERT_NE(Ptr, nullptr);
+ Ptrs.push_back(Ptr);
+ DeallocateMemory(GPA, Ptr);
+ DeallocateMemory(GPA, Ptr);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+ ASSERT_NE(std::string::npos, GetOutputBuffer().find("Double Free"));
+ // Ensure the crash from this slot is only reported once.
+ GetOutputBuffer().clear();
+ DeallocateMemory(GPA, Ptr);
+ ASSERT_TRUE(GetOutputBuffer().empty());
+ // Reset the buffer, as we're gonna move to the next allocation.
+ GetOutputBuffer().clear();
+ }
+
+ // All slots should have been used. No further errors should occur.
+ for (size_t i = 0; i < 100; ++i)
+ ASSERT_EQ(AllocateMemory(GPA), nullptr);
+ for (void *Ptr : Ptrs) {
+ DeallocateMemory(GPA, Ptr);
+ TouchMemory(Ptr);
+ }
+ ASSERT_TRUE(GetOutputBuffer().empty());
+}
+
+void singleAllocThrashTask(gwp_asan::GuardedPoolAllocator *GPA,
+ std::atomic<bool> *StartingGun,
+ unsigned NumIterations, unsigned Job, char *Ptr) {
+ while (!*StartingGun) {
+ // Wait for starting gun.
+ }
+
+ for (unsigned i = 0; i < NumIterations; ++i) {
+ switch (Job) {
+ case 0:
+ DeallocateMemory(*GPA, Ptr);
+ break;
+ case 1:
+ DeallocateMemory(*GPA, Ptr + 1);
+ break;
+ case 2:
+ TouchMemory(Ptr);
+ break;
+ case 3:
+ TouchMemory(Ptr - 16);
+ TouchMemory(Ptr + 16);
+ break;
+ default:
+ __builtin_trap();
+ }
+ }
+}
+
+void runInterThreadThrashingSingleAlloc(unsigned NumIterations,
+ gwp_asan::GuardedPoolAllocator *GPA) {
+ std::atomic<bool> StartingGun{false};
+ std::vector<std::thread> Threads;
+ constexpr unsigned kNumThreads = 4;
+ if (std::thread::hardware_concurrency() < kNumThreads) {
+ GTEST_SKIP() << "Not enough threads to run this test";
+ }
+
+ char *Ptr = static_cast<char *>(AllocateMemory(*GPA));
+
+ for (unsigned i = 0; i < kNumThreads; ++i) {
+ Threads.emplace_back(singleAllocThrashTask, GPA, &StartingGun,
+ NumIterations, i, Ptr);
+ }
+
+ StartingGun = true;
+
+ for (auto &T : Threads)
+ T.join();
+}
+
+TEST_P(BacktraceGuardedPoolAllocator, InterThreadThrashingSingleAlloc) {
+ SCOPED_TRACE("");
+ constexpr unsigned kNumIterations = 100000;
+ runInterThreadThrashingSingleAlloc(kNumIterations, &GPA);
+ CheckOnlyOneGwpAsanCrash(GetOutputBuffer());
+}
+
+INSTANTIATE_TEST_SUITE_P(RecoverableTests, BacktraceGuardedPoolAllocator,
+ /* Recoverable */ testing::Values(true));
getFlags()->GWP_ASAN_MaxSimultaneousAllocations;
Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate;
Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers;
+ Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable;
// Embedded GWP-ASan is locked through the Scudo atfork handler (via
// Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork
// handler.
gwp_asan::segv_handler::installSignalHandlers(
&GuardedAlloc, Printf,
gwp_asan::backtrace::getPrintBacktraceFunction(),
- gwp_asan::backtrace::getSegvBacktraceFunction());
+ gwp_asan::backtrace::getSegvBacktraceFunction(),
+ Opt.Recoverable);
GuardedAllocSlotSize =
GuardedAlloc.getAllocatorState()->maximumAllocationSize();