Allow user memory in the first TB of address space.
This also enabled non-pie binaries and freebsd.
Fixes issue:
https://code.google.com/p/thread-sanitizer/issues/detail?id=5
llvm-svn: 220571
}
}
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
};
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
- static uptr AdditionalSize() {
- return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
- GetPageSizeCached());
- }
-
RegionInfo *GetRegionInfo(uptr class_id) {
CHECK_LT(class_id, kNumClasses);
RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
// Platform-specific code.
//===----------------------------------------------------------------------===//
+#ifndef TSAN_PLATFORM_H
+#define TSAN_PLATFORM_H
+
+#if !defined(__LP64__) && !defined(_WIN64)
+# error "Only 64-bit is supported"
+#endif
+
+#include "tsan_defs.h"
+#include "tsan_trace.h"
+
+namespace __tsan {
+
+#if !defined(TSAN_GO)
+
/*
-C++ linux memory layout:
-0000 0000 0000 - 03c0 0000 0000: protected
-03c0 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 3000 0000 0000: protected
+C/C++ on linux and freebsd
+0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings
+0100 0000 0000 - 0200 0000 0000: -
+0200 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: protected
+4000 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack
+7e00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+*/
-Go linux and darwin memory layout:
-0000 0000 0000 - 0000 1000 0000: executable
-0000 1000 0000 - 00f8 0000 0000: -
+const uptr kMetaShadowBeg = 0x300000000000ull;
+const uptr kMetaShadowEnd = 0x400000000000ull;
+const uptr kTraceMemBeg = 0x600000000000ull;
+const uptr kTraceMemEnd = 0x620000000000ull;
+const uptr kShadowBeg = 0x020000000000ull;
+const uptr kShadowEnd = 0x100000000000ull;
+const uptr kHeapMemBeg = 0x7d0000000000ull;
+const uptr kHeapMemEnd = 0x7e0000000000ull;
+const uptr kLoAppMemBeg = 0x000000001000ull;
+const uptr kLoAppMemEnd = 0x010000000000ull;
+const uptr kHiAppMemBeg = 0x7e8000000000ull;
+const uptr kHiAppMemEnd = 0x800000000000ull;
+const uptr kAppMemMsk = 0x7c0000000000ull;
+const uptr kAppMemXor = 0x020000000000ull;
+
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return (mem >= kHeapMemBeg && mem < kHeapMemEnd) ||
+ (mem >= kLoAppMemBeg && mem < kLoAppMemEnd) ||
+ (mem >= kHiAppMemBeg && mem < kHiAppMemEnd);
+}
+
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
+
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
+
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (((x) & ~(kAppMemMsk | (kShadowCell - 1)))
+ ^ kAppMemXor) * kShadowCnt;
+}
+
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((((x) & ~(kAppMemMsk | (kMetaShadowCell - 1)))
+ ^ kAppMemXor) / kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ if (s >= MemToShadow(kLoAppMemBeg) && s <= MemToShadow(kLoAppMemEnd - 1))
+ return (s / kShadowCnt) ^ kAppMemXor;
+ else
+ return ((s / kShadowCnt) ^ kAppMemXor) | kAppMemMsk;
+}
+
+static USED uptr UserRegions[] = {
+ kLoAppMemBeg, kLoAppMemEnd,
+ kHiAppMemBeg, kHiAppMemEnd,
+ kHeapMemBeg, kHeapMemEnd,
+};
+
+#elif defined(TSAN_GO) && !SANITIZER_WINDOWS
+
+/* Go on linux, darwin and freebsd
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
-00e0 0000 0000 - 1000 0000 0000: -
-1000 0000 0000 - 1380 0000 0000: shadow
-1460 0000 0000 - 2000 0000 0000: -
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
4000 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
-6200 0000 0000 - 7fff ffff ffff: -
+6200 0000 0000 - 8000 0000 0000: -
+*/
+
+const uptr kMetaShadowBeg = 0x300000000000ull;
+const uptr kMetaShadowEnd = 0x400000000000ull;
+const uptr kTraceMemBeg = 0x600000000000ull;
+const uptr kTraceMemEnd = 0x620000000000ull;
+const uptr kShadowBeg = 0x200000000000ull;
+const uptr kShadowEnd = 0x238000000000ull;
+const uptr kAppMemBeg = 0x000000001000ull;
+const uptr kAppMemEnd = 0x00e000000000ull;
+
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return mem >= kAppMemBeg && mem < kAppMemEnd;
+}
+
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
+
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
+
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
+}
-Go windows memory layout:
-0000 0000 0000 - 0000 1000 0000: executable
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ return (s & ~kShadowBeg) / kShadowCnt;
+}
+
+static USED uptr UserRegions[] = {
+ kAppMemBeg, kAppMemEnd,
+};
+
+#elif defined(TSAN_GO) && SANITIZER_WINDOWS
+
+/* Go on windows
+0000 0000 1000 - 0000 1000 0000: executable
0000 1000 0000 - 00f8 0000 0000: -
00c0 0000 0000 - 00e0 0000 0000: heap
00e0 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 0560 0000 0000: shadow
+0100 0000 0000 - 0380 0000 0000: shadow
+0380 0000 0000 - 0560 0000 0000: -
0560 0000 0000 - 0760 0000 0000: traces
0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
-07d0 0000 0000 - 07ff ffff ffff: -
+07d0 0000 0000 - 8000 0000 0000: -
*/
-#ifndef TSAN_PLATFORM_H
-#define TSAN_PLATFORM_H
-
-#include "tsan_defs.h"
-#include "tsan_trace.h"
-
-#if defined(__LP64__) || defined(_WIN64)
-namespace __tsan {
-
-#if defined(TSAN_GO)
-static const uptr kLinuxAppMemBeg = 0x000000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x04dfffffffffULL;
-# if SANITIZER_WINDOWS
-static const uptr kLinuxShadowMsk = 0x010000000000ULL;
-static const uptr kMetaShadow = 0x076000000000ULL;
-static const uptr kMetaSize = 0x007000000000ULL;
-# else // if SANITIZER_WINDOWS
-static const uptr kLinuxShadowMsk = 0x200000000000ULL;
-static const uptr kMetaShadow = 0x300000000000ULL;
-static const uptr kMetaSize = 0x100000000000ULL;
-# endif // if SANITIZER_WINDOWS
-#else // defined(TSAN_GO)
-static const uptr kMetaShadow = 0x300000000000ULL;
-static const uptr kMetaSize = 0x100000000000ULL;
-static const uptr kLinuxAppMemBeg = 0x7cf000000000ULL;
-static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
-#endif
+const uptr kMetaShadowBeg = 0x076000000000ull;
+const uptr kMetaShadowEnd = 0x07d000000000ull;
+const uptr kTraceMemBeg = 0x056000000000ull;
+const uptr kTraceMemEnd = 0x076000000000ull;
+const uptr kShadowBeg = 0x010000000000ull;
+const uptr kShadowEnd = 0x038000000000ull;
+const uptr kAppMemBeg = 0x000000001000ull;
+const uptr kAppMemEnd = 0x00e000000000ull;
-static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+ return mem >= kAppMemBeg && mem < kAppMemEnd;
+}
-#if SANITIZER_WINDOWS
-const uptr kTraceMemBegin = 0x056000000000ULL;
-#else
-const uptr kTraceMemBegin = 0x600000000000ULL;
-#endif
-const uptr kTraceMemSize = 0x020000000000ULL;
-
-// This has to be a macro to allow constant initialization of constants below.
-#ifndef TSAN_GO
-#define MemToShadow(addr) \
- ((((uptr)addr) & ~(kLinuxAppMemMsk | (kShadowCell - 1))) * kShadowCnt)
-#define MemToMeta(addr) \
- (u32*)(((((uptr)addr) & ~(kLinuxAppMemMsk | (kMetaShadowCell - 1))) \
- / kMetaShadowCell * kMetaShadowSize) | kMetaShadow)
-#else
-#define MemToShadow(addr) \
- (((((uptr)addr) & ~(kShadowCell - 1)) * kShadowCnt) | kLinuxShadowMsk)
-#define MemToMeta(addr) \
- (u32*)(((((uptr)addr) & ~(kMetaShadowCell - 1)) \
- / kMetaShadowCell * kMetaShadowSize) | kMetaShadow)
-#endif
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+ return mem >= kShadowBeg && mem <= kShadowEnd;
+}
-static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);
-static const uptr kLinuxShadowEnd =
- MemToShadow(kLinuxAppMemEnd) | 0xff;
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+ return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+}
-static inline bool IsAppMem(uptr mem) {
-#if defined(TSAN_GO)
- return mem <= kLinuxAppMemEnd;
-#else
- return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd;
-#endif
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+ DCHECK(IsAppMem(x));
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
}
-static inline bool IsShadowMem(uptr mem) {
- return mem >= kLinuxShadowBeg && mem <= kLinuxShadowEnd;
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+ DCHECK(IsAppMem(x));
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | kMetaShadowEnd);
}
-static inline uptr ShadowToMem(uptr shadow) {
- CHECK(IsShadowMem(shadow));
-#ifdef TSAN_GO
- return (shadow & ~kLinuxShadowMsk) / kShadowCnt;
-#else
- return (shadow / kShadowCnt) | kLinuxAppMemMsk;
-#endif
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+ CHECK(IsShadowMem(s));
+ // FIXME(dvyukov): this is most likely wrong as the mapping is not bijection.
+ return (x & ~kShadowBeg) / kShadowCnt;
}
-void FlushShadowMemory();
-void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
-uptr GetRSS();
+static USED uptr UserRegions[] = {
+ kAppMemBeg, kAppMemEnd,
+};
-void InitializePlatform();
-void FinalizePlatform();
+#else
+# error "Unknown platform"
+#endif
// The additional page is to catch shadow stack overflow as paging fault.
// Windows wants 64K alignment for mmaps.
+ (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
uptr ALWAYS_INLINE GetThreadTrace(int tid) {
- uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize;
- DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize;
+ DCHECK_LT(p, kTraceMemEnd);
return p;
}
uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) {
- uptr p = kTraceMemBegin + (uptr)tid * kTotalTraceSize
+ uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize
+ kTraceSize * sizeof(Event);
- DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ DCHECK_LT(p, kTraceMemEnd);
return p;
}
+void InitializePlatform();
+void FlushShadowMemory();
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
+uptr GetRSS();
+
void *internal_start_thread(void(*func)(void*), void *arg);
void internal_join_thread(void *th);
} // namespace __tsan
-#else // defined(__LP64__) || defined(_WIN64)
-# error "Only 64-bit is supported"
-#endif
-
#endif // TSAN_PLATFORM_H
namespace __tsan {
+static uptr g_data_start;
+static uptr g_data_end;
+
const uptr kPageSize = 4096;
enum {
MemCount = 8,
};
-void FillProfileCallback(uptr start, uptr rss, bool file,
+void FillProfileCallback(uptr p, uptr rss, bool file,
uptr *mem, uptr stats_size) {
mem[MemTotal] += rss;
- start >>= 40;
- if (start < 0x10)
+ if (p >= kShadowBeg && p < kShadowEnd)
mem[MemShadow] += rss;
- else if (start >= 0x20 && start < 0x30)
- mem[file ? MemFile : MemMmap] += rss;
- else if (start >= 0x30 && start < 0x40)
+ else if (p >= kMetaShadowBeg && p < kMetaShadowEnd)
mem[MemMeta] += rss;
- else if (start >= 0x7e)
+#ifndef TSAN_GO
+ else if (p >= kHeapMemBeg && p < kHeapMemEnd)
+ mem[MemHeap] += rss;
+ else if (p >= kLoAppMemBeg && p < kLoAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
+ else if (p >= kHiAppMemBeg && p < kHiAppMemEnd)
+ mem[file ? MemFile : MemMmap] += rss;
+#else
+ else if (p >= kAppMemBeg && p < kAppMemEnd)
mem[file ? MemFile : MemMmap] += rss;
- else if (start >= 0x60 && start < 0x62)
+#endif
+ else if (p >= kTraceMemBeg && p < kTraceMemEnd)
mem[MemTrace] += rss;
- else if (start >= 0x7d && start < 0x7e)
- mem[MemHeap] += rss;
else
mem[MemOther] += rss;
}
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
- FlushUnneededShadowMemory(kLinuxShadowBeg, kLinuxShadowEnd - kLinuxShadowBeg);
+ FlushUnneededShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
}
#endif
void InitializeShadowMemory() {
// Map memory shadow.
- uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
- kLinuxShadowEnd - kLinuxShadowBeg);
- if (shadow != kLinuxShadowBeg) {
+ uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
+ kShadowEnd - kShadowBeg);
+ if (shadow != kShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", shadow, kLinuxShadowBeg);
+ "to link with -pie (%p, %p).\n", shadow, kShadowBeg);
Die();
}
// This memory range is used for thread stacks and large user mmaps.
0x10000000000ULL * kShadowMultiplier, MADV_NOHUGEPAGE);
#endif
DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
- kLinuxShadowBeg, kLinuxShadowEnd,
- (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
+ kShadowBeg, kShadowEnd,
+ (kShadowEnd - kShadowBeg) >> 30);
// Map meta shadow.
- if (MemToMeta(kLinuxAppMemBeg) < (u32*)kMetaShadow) {
- Printf("ThreadSanitizer: bad meta shadow (%p -> %p < %p)\n",
- kLinuxAppMemBeg, MemToMeta(kLinuxAppMemBeg), kMetaShadow);
- Die();
- }
- if (MemToMeta(kLinuxAppMemEnd) >= (u32*)(kMetaShadow + kMetaSize)) {
- Printf("ThreadSanitizer: bad meta shadow (%p -> %p >= %p)\n",
- kLinuxAppMemEnd, MemToMeta(kLinuxAppMemEnd), kMetaShadow + kMetaSize);
- Die();
- }
- uptr meta = (uptr)MmapFixedNoReserve(kMetaShadow, kMetaSize);
- if (meta != kMetaShadow) {
+ uptr meta = (uptr)MmapFixedNoReserve(kMetaShadowBeg,
+ kMetaShadowEnd - kMetaShadowBeg);
+ if (meta != kMetaShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", meta, kMetaShadow);
+ "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg);
Die();
}
DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
kMetaShadow, kMetaShadow + kMetaSize, kMetaSize >> 30);
- // Protect gaps.
- const uptr kClosedLowBeg = 0x200000;
- const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
- const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
- const uptr kClosedMidEnd = min(min(kLinuxAppMemBeg, kTraceMemBegin),
- kMetaShadow);
-
- ProtectRange(kClosedLowBeg, kClosedLowEnd);
- ProtectRange(kClosedMidBeg, kClosedMidEnd);
- VPrintf(2, "kClosedLow %zx-%zx (%zuGB)\n",
- kClosedLowBeg, kClosedLowEnd, (kClosedLowEnd - kClosedLowBeg) >> 30);
- VPrintf(2, "kClosedMid %zx-%zx (%zuGB)\n",
- kClosedMidBeg, kClosedMidEnd, (kClosedMidEnd - kClosedMidBeg) >> 30);
- VPrintf(2, "app mem: %zx-%zx (%zuGB)\n",
- kLinuxAppMemBeg, kLinuxAppMemEnd,
- (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
- VPrintf(2, "stack: %zx\n", (uptr)&shadow);
-
MapRodata();
}
-#endif
-
-static uptr g_data_start;
-static uptr g_data_end;
-
-#ifndef TSAN_GO
-static void CheckPIE() {
- // Ensure that the binary is indeed compiled with -pie.
- MemoryMappingLayout proc_maps(true);
- uptr start, end;
- if (proc_maps.Next(&start, &end,
- /*offset*/0, /*filename*/0, /*filename_size*/0,
- /*protection*/0)) {
- if ((u64)start < kLinuxAppMemBeg) {
- Printf("FATAL: ThreadSanitizer can not mmap the shadow memory ("
- "something is mapped at 0x%zx < 0x%zx)\n",
- start, kLinuxAppMemBeg);
- Printf("FATAL: Make sure to compile with -fPIE"
- " and to link with -pie.\n");
- Die();
- }
- }
-}
static void InitDataSeg() {
MemoryMappingLayout proc_maps(true);
CHECK_LT((uptr)&g_data_start, g_data_end);
}
+static void CheckAndProtect() {
+ // Ensure that the binary is indeed compiled with -pie.
+ MemoryMappingLayout proc_maps(true);
+ uptr p, end;
+ while (proc_maps.Next(&p, &end, 0, 0, 0, 0)) {
+ if (IsAppMem(p))
+ continue;
+ if (p >= kHeapMemEnd &&
+ p < kHeapMemEnd + PrimaryAllocator::AdditionalSize())
+ continue;
+ if (p >= 0xf000000000000000ull) // vdso
+ break;
+ Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
+ Die();
+ }
+
+ ProtectRange(kLoAppMemEnd, kShadowBeg);
+ ProtectRange(kShadowEnd, kMetaShadowBeg);
+ ProtectRange(kMetaShadowEnd, kTraceMemBeg);
+ ProtectRange(kTraceMemEnd, kHeapMemBeg);
+ ProtectRange(kHeapMemEnd + PrimaryAllocator::AdditionalSize(), kHiAppMemBeg);
+}
#endif // #ifndef TSAN_GO
void InitializePlatform() {
}
#ifndef TSAN_GO
- CheckPIE();
+ CheckAndProtect();
InitTlsSize();
InitDataSeg();
#endif
#ifndef TSAN_GO
void InitializeShadowMemory() {
- uptr shadow = (uptr)MmapFixedNoReserve(kLinuxShadowBeg,
- kLinuxShadowEnd - kLinuxShadowBeg);
- if (shadow != kLinuxShadowBeg) {
+ uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
+ kShadowEnd - kShadowBeg);
+ if (shadow != kShadowBeg) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
"to link with -pie.\n");
Die();
}
- DPrintf("kLinuxShadow %zx-%zx (%zuGB)\n",
- kLinuxShadowBeg, kLinuxShadowEnd,
- (kLinuxShadowEnd - kLinuxShadowBeg) >> 30);
- DPrintf("kLinuxAppMem %zx-%zx (%zuGB)\n",
- kLinuxAppMemBeg, kLinuxAppMemEnd,
- (kLinuxAppMemEnd - kLinuxAppMemBeg) >> 30);
+ DPrintf("kShadow %zx-%zx (%zuGB)\n",
+ kShadowBeg, kShadowEnd,
+ (kShadowEnd - kShadowBeg) >> 30);
+ DPrintf("kAppMem %zx-%zx (%zuGB)\n",
+ kAppMemBeg, kAppMemEnd,
+ (kAppMemEnd - kAppMemBeg) >> 30);
}
#endif
DisableCoreDumperIfNecessary();
}
-void FinalizePlatform() {
- fflush(0);
-}
-
#ifndef TSAN_GO
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
void *abstime), void *c, void *m, void *abstime,
void InitializePlatform() {
}
-void FinalizePlatform() {
- fflush(0);
-}
-
} // namespace __tsan
#endif // SANITIZER_WINDOWS
void MapThreadTrace(uptr addr, uptr size) {
DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
- CHECK_GE(addr, kTraceMemBegin);
- CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
+ CHECK_GE(addr, kTraceMemBeg);
+ CHECK_LE(addr + size, kTraceMemEnd);
CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
uptr addr1 = (uptr)MmapFixedNoReserve(addr, size);
if (addr1 != addr) {
}
}
+static void CheckShadowMapping() {
+ for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
+ const uptr beg = UserRegions[i];
+ const uptr end = UserRegions[i + 1];
+ VPrintf(3, "checking shadow region %p-%p\n", beg, end);
+ for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
+ for (int x = -1; x <= 1; x++) {
+ const uptr p = p0 + x;
+ if (p < beg || p >= end)
+ continue;
+ const uptr s = MemToShadow(p);
+ VPrintf(3, " checking pointer %p -> %p\n", p, s);
+ CHECK(IsAppMem(p));
+ CHECK(IsShadowMem(s));
+ CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
+ const uptr m = (uptr)MemToMeta(p);
+ CHECK(IsMetaMem(m));
+ }
+ }
+ }
+}
+
void Initialize(ThreadState *thr) {
// Thread safe because done before all threads exist.
static bool is_initialized = false;
InitializeAllocator();
#endif
InitializeInterceptors();
+ CheckShadowMapping();
InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4
bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
+ // NOTE: this check can fail if the shadow is concurrently mutated
+ // by other threads.
DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
return res;
#else
namespace __tsan {
#ifndef TSAN_GO
-const uptr kAllocatorSpace = 0x7d0000000000ULL;
-const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
-
struct MapUnmapCallback;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
+typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
EXPECT_EQ(20U, user_alloc_usable_size(p2));
user_free(thr, pc, p);
user_free(thr, pc, p2);
- EXPECT_EQ(0U, user_alloc_usable_size((void*)0x123));
+ EXPECT_EQ(0U, user_alloc_usable_size((void*)0x4123));
}
TEST(Mman, Stats) {
--- /dev/null
+// RUN: %clangxx_tsan -O1 %s -o %t && %deflake %run %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/mman.h>
+
+// Test for issue:
+// https://code.google.com/p/thread-sanitizer/issues/detail?id=5
+
+void *Thread(void *ptr) {
+ *(int*)ptr = 42;
+ return 0;
+}
+
+int main() {
+ void *ptr = mmap(0, 128 << 10, PROT_READ|PROT_WRITE,
+ MAP_32BIT|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+ fprintf(stderr, "ptr=%p\n", ptr);
+ if (ptr == MAP_FAILED) {
+ fprintf(stderr, "mmap failed: %d\n", errno);
+ return 1;
+ }
+ if ((uintptr_t)ptr >= (1ull << 32)) {
+ fprintf(stderr, "ptr is too high\n");
+ return 1;
+ }
+ pthread_t t;
+ pthread_create(&t, 0, Thread, ptr);
+ sleep(1);
+ *(int*)ptr = 42;
+ pthread_join(t, 0);
+ munmap(ptr, 128 << 10);
+ fprintf(stderr, "DONE\n");
+}
+
+// CHECK: WARNING: ThreadSanitizer: data race
+// CHECK: DONE
+