CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
MmapFixedNoAccess(kSpaceBeg, kSpaceSize)));
} else {
- NonConstSpaceBeg = reinterpret_cast<uptr>(
- MmapFixedNoAccess(0, kSpaceSize + AdditionalSize()));
+ NonConstSpaceBeg =
+ reinterpret_cast<uptr>(MmapNoAccess(kSpaceSize + AdditionalSize()));
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
}
MapWithCallback(SpaceEnd(), AdditionalSize());
void *MmapNoReserveOrDie(uptr size, const char *mem_type);
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
+void *MmapNoAccess(uptr size);
// Map aligned chunk of address space; size and alignment are powers of two.
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
0);
}
+void *MmapNoAccess(uptr size) {
+ unsigned flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
+ return (void *)internal_mmap(nullptr, size, PROT_NONE, flags, -1, 0);
+}
+
// This function is defined elsewhere if we intercepted pthread_attr_getstack.
extern "C" {
SANITIZER_WEAK_ATTRIBUTE int
return res;
}
+void *MmapNoAccess(uptr size) {
+ // FIXME: unsupported.
+ return nullptr;
+}
+
bool MprotectNoAccess(uptr addr, uptr size) {
DWORD old_protection;
return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
static const uptr kAllocatorSpace = 0x600000000000ULL;
static const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
+// typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
+typedef SizeClassAllocator64<~(uptr)0, kAllocatorSize, 0,
CompactSizeClassMap> PrimaryAllocator;
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;