MmapFixedNoReserve does not terminate process on failure.
Failure to check its result and die will always lead to harder
to debug crashes later in execution. This was observed in Go
processes due to some address space conflicts.
Consistently check result of MmapFixedNoReserve.
While we are here also add warn_unused_result attribute
to prevent such bugs in future and change return type to bool
as that's what all callers want.
Reviewed in https://reviews.llvm.org/D49367
llvm-svn: 337531
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
- void *res = MmapFixedNoReserve(beg, size, name);
- if (res != (void *)beg) {
+ if (!MmapFixedNoReserve(beg, size, name)) {
Report(
"ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
"Perhaps you're using ulimit -v\n",
InitializePlatformEarly();
- MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr());
+ if (!MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr()))
+ Die();
// Protect the region of memory we don't use, to preserve the one-to-one
// mapping from application to shadow memory. But if ASLR is disabled, Linux
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
- void *res = MmapFixedNoReserve(beg, size, name);
- if (res != (void *)beg) {
+ if (!MmapFixedNoReserve(beg, size, name)) {
Report(
"ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
"Perhaps you're using ulimit -v\n",
if (map) {
if (!CheckMemoryRangeAvailability(start, size))
return false;
- if ((uptr)MmapFixedNoReserve(start, size, kMemoryLayout[i].name) != start)
+ if (!MmapFixedNoReserve(start, size, kMemoryLayout[i].name))
return false;
if (common_flags()->use_madv_dontdump)
DontDumpShadowMemory(start, size);
if (page_end != shadow_end) {
REAL(memset)((void *)page_end, 0, shadow_end - page_end);
}
- MmapFixedNoReserve(page_beg, page_end - page_beg);
+ if (!MmapFixedNoReserve(page_beg, page_end - page_beg))
+ Die();
}
}
}
// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
// case returns nullptr.
void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
-void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
- const char *name = nullptr);
+bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
+ WARN_UNUSED_RESULT;
void *MmapNoReserveOrDie(uptr size, const char *mem_type);
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
# define LIKELY(x) (x)
# define UNLIKELY(x) (x)
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */ (void)0
+# define WARN_UNUSED_RESULT
#else // _MSC_VER
# define ALWAYS_INLINE inline __attribute__((always_inline))
# define ALIAS(x) __attribute__((alias(x)))
# else
# define PREFETCH(x) __builtin_prefetch(x)
# endif
+# define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
#endif // _MSC_VER
#if !defined(_MSC_VER) || defined(__clang__)
}
#endif
-void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
+bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
int fd = name ? GetNamedMappingFd(name, size) : -1;
unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE;
if (fd == -1) flags |= MAP_ANON;
RoundUpTo(size, PageSize), PROT_READ | PROT_WRITE,
flags, fd, 0);
int reserrno;
- if (internal_iserror(p, &reserrno))
+ if (internal_iserror(p, &reserrno)) {
Report("ERROR: %s failed to "
"allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
SanitizerToolName, size, size, fixed_addr, reserrno);
+ return false;
+ }
IncreaseTotalMmap(size);
- return (void *)p;
+ return true;
}
uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
return (void *)mapped_addr;
}
-void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
+bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
// but on Win64 it does.
(void)name; // unsupported
void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
PAGE_READWRITE);
#endif
- if (p == 0)
+ if (p == 0) {
Report("ERROR: %s failed to "
"allocate %p (%zd) bytes at %p (error code: %d)\n",
SanitizerToolName, size, size, fixed_addr, GetLastError());
- return p;
+ return false;
+ }
+ return true;
}
// Memory space mapped by 'MmapFixedOrDie' must have been reserved by
#if !SANITIZER_GO
void InitializeShadowMemory() {
// Map memory shadow.
- uptr shadow =
- (uptr)MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
- "shadow");
- if (shadow != ShadowBeg()) {
+ if (!MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), "shadow")) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
- Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", shadow, ShadowBeg());
+ Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Die();
}
// This memory range is used for thread stacks and large user mmaps.
(ShadowEnd() - ShadowBeg()) >> 30);
// Map meta shadow.
- uptr meta_size = MetaShadowEnd() - MetaShadowBeg();
- uptr meta =
- (uptr)MmapFixedNoReserve(MetaShadowBeg(), meta_size, "meta shadow");
- if (meta != MetaShadowBeg()) {
+ const uptr meta = MetaShadowBeg();
+ const uptr meta_size = MetaShadowEnd() - meta;
+ if (!MmapFixedNoReserve(meta, meta_size, "meta shadow")) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
- Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", meta, MetaShadowBeg());
+ Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Die();
}
NoHugePagesInShadow(meta, meta_size);
const uptr kPageSize = GetPageSizeCached();
uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
- MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow");
+ if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
+ Die();
// Meta shadow is 2:1, so tread carefully.
static bool data_mapped = false;
if (!data_mapped) {
// First call maps data+bss.
data_mapped = true;
- MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
+ if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ Die();
} else {
// Mapping continous heap.
// Windows wants 64K alignment.
return;
if (meta_begin < mapped_meta_end)
meta_begin = mapped_meta_end;
- MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
+ if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ Die();
mapped_meta_end = meta_end;
}
VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
CHECK_GE(addr, TraceMemBeg());
CHECK_LE(addr + size, TraceMemEnd());
CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
- uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name);
- if (addr1 != addr) {
- Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
- addr, size, addr1);
+ if (!MmapFixedNoReserve(addr, size, name)) {
+ Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
+ addr, size);
Die();
}
}
u64 *p1 = p;
p = RoundDown(end, kPageSize);
UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
- MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
+ if (!MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1))
+ Die();
// Set the ending.
while (p < end) {
*p++ = val;
uptr metap = (uptr)MemToMeta(p0);
uptr metasz = sz0 / kMetaRatio;
UnmapOrDie((void*)metap, metasz);
- MmapFixedNoReserve(metap, metasz);
+ if (!MmapFixedNoReserve(metap, metasz))
+ Die();
}
MBlock* MetaMap::GetBlock(uptr p) {