static const uptr kAllocatorSpace = ~(uptr)0;
static const uptr kAllocatorSize = 0x8000000000ULL; // 500G
static const u64 kAddressSpaceSize = 1ULL << 47;
+typedef DefaultSizeClassMap SizeClassMap;
+#elif SANITIZER_ANDROID && defined(__aarch64__)
+static const uptr kAllocatorSpace = 0x3000000000ULL;
+static const uptr kAllocatorSize = 0x2000000000ULL;
+static const u64 kAddressSpaceSize = 1ULL << 39;
+typedef VeryCompactSizeClassMap SizeClassMap;
#else
static const uptr kAllocatorSpace = 0x700000000000ULL;
static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
static const u64 kAddressSpaceSize = 1ULL << 47;
+typedef DefaultSizeClassMap SizeClassMap;
#endif
struct AP64 { // Allocator Params. Short name for shorter demangled names..
static const uptr kSpaceBeg = kAllocatorSpace;
static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = 16;
- typedef DefaultSizeClassMap SizeClassMap;
+ typedef ::SizeClassMap SizeClassMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
static const uptr kSpaceBeg = ~(uptr)0;
static const uptr kSpaceSize = kAllocatorSize;
static const uptr kMetadataSize = 16;
- typedef DefaultSizeClassMap SizeClassMap;
+ typedef ::SizeClassMap SizeClassMap;
typedef NoOpMapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
TestSizeClassAllocator<Allocator64Dynamic>();
}
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64Compact) {
TestSizeClassAllocator<Allocator64Compact>();
}
+#endif
+
TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
TestSizeClassAllocator<Allocator64VeryCompact>();
}
void *allocated[kNumAllocs];
void *meta[kNumAllocs];
for (uptr i = 0; i < kNumAllocs; i++) {
- void *x = cache.Allocate(a, 1 + i % 50);
+ void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
allocated[i] = x;
meta[i] = a->GetMetaData(x);
}
EXPECT_EQ(m, meta[idx]);
}
for (uptr i = 0; i < kNumAllocs; i++) {
- cache.Deallocate(a, 1 + i % 50, allocated[i]);
+ cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
}
a->TestOnlyUnmap();
SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
}
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64Compact>();
}
#endif
+
+#endif
#endif // SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator32Compact>();
// to run them all at the same time. FIXME: Make them not flaky and reenable.
#if !SANITIZER_WINDOWS
TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
- SizeClassAllocatorGetBlockBeginStress<Allocator64>(1ULL << 33);
+ SizeClassAllocatorGetBlockBeginStress<Allocator64>(
+ 1ULL << (SANITIZER_ANDROID ? 31 : 33));
}
TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
- SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(1ULL << 33);
+ SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
+ 1ULL << (SANITIZER_ANDROID ? 31 : 33));
}
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
}
+#endif
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
// Does not have > 4Gb for each class.
SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
#if !SANITIZER_WINDOWS
struct AP64WithCallback {
- static const uptr kSpaceBeg = kAllocatorSpace;
- static const uptr kSpaceSize = kAllocatorSize;
- static const uptr kMetadataSize = 16;
- typedef DefaultSizeClassMap SizeClassMap;
- typedef TestMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags = 0;
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 16;
+ typedef ::SizeClassMap SizeClassMap;
+ typedef TestMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
};
TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
stats.Init();
const size_t kNumChunks = 128;
uint32_t chunks[kNumChunks];
- a->GetFromAllocator(&stats, 32, chunks, kNumChunks);
+ a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
// State + alloc + metadata + freearray.
EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
a->TestOnlyUnmap();
// Don't test OOM conditions on Win64 because it causes other tests on the same
// machine to OOM.
-#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
+#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
}
SizeClassAllocatorLocalCache<Allocator64Dynamic> > ();
}
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, CombinedAllocator64Compact) {
TestCombinedAllocator<Allocator64Compact,
LargeMmapAllocator<>,
SizeClassAllocatorLocalCache<Allocator64Compact> > ();
}
+#endif
TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
TestCombinedAllocator<Allocator64VeryCompact,
SizeClassAllocatorLocalCache<Allocator64Dynamic> >();
}
+#if !SANITIZER_ANDROID
TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
TestSizeClassAllocatorLocalCache<
SizeClassAllocatorLocalCache<Allocator64Compact> >();
}
+#endif
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
TestSizeClassAllocatorLocalCache<
SizeClassAllocatorLocalCache<Allocator64VeryCompact> >();
// Don't test OOM conditions on Win64 because it causes other tests on the same
// machine to OOM.
-#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
+#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
typedef SizeClassMap<3, 4, 8, 63, 128, 16> SpecialSizeClassMap;
struct AP64_SpecialSizeClassMap {
static const uptr kSpaceBeg = kAllocatorSpace;