From: Reid Kleckner Date: Thu, 10 Mar 2016 20:47:26 +0000 (+0000) Subject: [Windows] Fix UnmapOrDie and MmapAlignedOrDie X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=ad04914a53bc21c2f1678ecc5ab0c5617b668ad0;p=platform%2Fupstream%2Fllvm.git [Windows] Fix UnmapOrDie and MmapAlignedOrDie Now ASan can return virtual memory to the underlying OS. Portable sanitizer runtime code needs to be aware that UnmapOrDie cannot unmap part of previous mapping. In particular, this required changing how we implement MmapAlignedOrDie on Windows, which is what Allocator32 uses. The new code first attempts to allocate memory of the given size, and if it is appropriately aligned, returns early. If not, it frees the memory and attempts to reserve size + alignment bytes. In this region there must be an aligned address. We then free the oversized mapping and request a new mapping at the aligned address immediately after. However, a thread could allocate that virtual address in between our free and allocation, so we have to retry if that allocation fails. The existing thread creation stress test managed to trigger this condition, so the code isn't totally untested. Reviewers: samsonov Differential Revision: http://reviews.llvm.org/D17431 llvm-svn: 263160 --- diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.cc b/compiler-rt/lib/sanitizer_common/sanitizer_common.cc index ce91dc4..576875f 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_common.cc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.cc @@ -230,27 +230,6 @@ void SortArray(uptr *array, uptr size) { InternalSort(&array, size, CompareLess); } -// We want to map a chunk of address space aligned to 'alignment'. -// We do it by maping a bit more and then unmaping redundant pieces. -// We probably can do it with fewer syscalls in some OS-dependent way. -void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { -// uptr PageSize = GetPageSizeCached(); - CHECK(IsPowerOfTwo(size)); - CHECK(IsPowerOfTwo(alignment)); - uptr map_size = size + alignment; - uptr map_res = (uptr)MmapOrDie(map_size, mem_type); - uptr map_end = map_res + map_size; - uptr res = map_res; - if (res & (alignment - 1)) // Not aligned. - res = (map_res + alignment) & ~(alignment - 1); - uptr end = res + size; - if (res != map_res) - UnmapOrDie((void*)map_res, res - map_res); - if (end != map_end) - UnmapOrDie((void*)end, map_end - end); - return (void*)res; -} - const char *StripPathPrefix(const char *filepath, const char *strip_path_prefix) { if (!filepath) return nullptr; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix.cc b/compiler-rt/lib/sanitizer_common/sanitizer_posix.cc index a383b21..5bcc86b 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_posix.cc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix.cc @@ -139,6 +139,26 @@ void UnmapOrDie(void *addr, uptr size) { DecreaseTotalMmap(size); } +// We want to map a chunk of address space aligned to 'alignment'. +// We do it by maping a bit more and then unmaping redundant pieces. +// We probably can do it with fewer syscalls in some OS-dependent way. +void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { + CHECK(IsPowerOfTwo(size)); + CHECK(IsPowerOfTwo(alignment)); + uptr map_size = size + alignment; + uptr map_res = (uptr)MmapOrDie(map_size, mem_type); + uptr map_end = map_res + map_size; + uptr res = map_res; + if (res & (alignment - 1)) // Not aligned. + res = (map_res + alignment) & ~(alignment - 1); + uptr end = res + size; + if (res != map_res) + UnmapOrDie((void*)map_res, res - map_res); + if (end != map_end) + UnmapOrDie((void*)end, map_end - end); + return (void*)res; +} + void *MmapNoReserveOrDie(uptr size, const char *mem_type) { uptr PageSize = GetPageSizeCached(); uptr p = internal_mmap(nullptr, diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_win.cc b/compiler-rt/lib/sanitizer_common/sanitizer_win.cc index 7794812..b0424aa 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_win.cc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_win.cc @@ -97,7 +97,17 @@ void UnmapOrDie(void *addr, uptr size) { if (!size || !addr) return; - if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) { + // Make sure that this API is only used to unmap an entire previous mapping. + // Windows cannot unmap part of a previous mapping. Unfortunately, + // we can't check that size matches the original size because mbi.RegionSize + // doesn't describe the size of the full allocation if some of the pages were + // protected. + MEMORY_BASIC_INFORMATION mbi; + CHECK(VirtualQuery(addr, &mbi, sizeof(mbi))); + CHECK(mbi.AllocationBase == addr && + "Windows cannot unmap part of a previous mapping"); + + if (VirtualFree(addr, 0, MEM_RELEASE) == 0) { Report("ERROR: %s failed to " "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n", SanitizerToolName, size, size, addr, GetLastError()); @@ -105,6 +115,61 @@ void UnmapOrDie(void *addr, uptr size) { } } +// We want to map a chunk of address space aligned to 'alignment'. +void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { + CHECK(IsPowerOfTwo(size)); + CHECK(IsPowerOfTwo(alignment)); + + // Windows will align our allocations to at least 64K. + alignment = Max(alignment, GetMmapGranularity()); + + uptr mapped_addr = + (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + if (!mapped_addr) + ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError()); + + // If we got it right on the first try, return. Otherwise, unmap it and go to + // the slow path. + if (IsAligned(mapped_addr, alignment)) + return (void*)mapped_addr; + if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) + ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); + + // If we didn't get an aligned address, overallocate, find an aligned address, + // unmap, and try to allocate at that aligned address. + int retries = 0; + const int kMaxRetries = 10; + for (; retries < kMaxRetries && + (mapped_addr == 0 || !IsAligned(mapped_addr, alignment)); + retries++) { + // Overallocate size + alignment bytes. + mapped_addr = + (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS); + if (!mapped_addr) + ReportMmapFailureAndDie(size, mem_type, "allocate aligned", + GetLastError()); + + // Find the aligned address. + uptr aligned_addr = RoundUpTo(mapped_addr, alignment); + + // Free the overallocation. + if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0) + ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError()); + + // Attempt to allocate exactly the number of bytes we need at the aligned + // address. This may fail for a number of reasons, in which case we continue + // the loop. + mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size, + MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + } + + // Fail if we can't make this work quickly. + if (retries == kMaxRetries && mapped_addr == 0) + ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError()); + + return (void *)mapped_addr; +} + void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { // FIXME: is this really "NoReserve"? On Win32 this does not matter much, // but on Win64 it does. @@ -119,7 +184,15 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { } void *MmapFixedOrDie(uptr fixed_addr, uptr size) { - return MmapFixedNoReserve(fixed_addr, size); + void *p = VirtualAlloc((LPVOID)fixed_addr, size, + MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); + if (p == 0) { + char mem_type[30]; + internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx", + fixed_addr); + ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError()); + } + return p; } void *MmapNoReserveOrDie(uptr size, const char *mem_type) { diff --git a/compiler-rt/test/asan/TestCases/Windows/oom.cc b/compiler-rt/test/asan/TestCases/Windows/oom.cc index b24cddf..3475af7 100644 --- a/compiler-rt/test/asan/TestCases/Windows/oom.cc +++ b/compiler-rt/test/asan/TestCases/Windows/oom.cc @@ -6,7 +6,6 @@ int main() { while (true) { void *ptr = malloc(200 * 1024 * 1024); // 200MB - free(ptr); } // CHECK: failed to allocate }