#else
static const uptr kAllocatorSpace = 0x600000000000ULL;
#endif
- static const uptr kAllocatorSize = 0x80000000000; // 8T.
+ static const uptr kAllocatorSize = 0x40000000000; // 4T.
static const uptr kMetadataSize = sizeof(Metadata);
static const uptr kMaxAllowedMallocSize = 8UL << 30;
struct PerClass {
uptr count;
uptr max_count;
- void *batch[2 * SizeClassMap::kMaxNumCached];
+ void *batch[2 * TransferBatch::kMaxNumCached];
};
PerClass per_class_[kNumClasses];
AllocatorStats stats_;
return;
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
- c->max_count = 2 * SizeClassMap::MaxCached(i);
+ c->max_count = 2 * TransferBatch::MaxCached(i);
}
}
? 0
: SizeClassMap::kBatchClassID;
if (Allocator::ClassIdToSize(class_id) <
- sizeof(TransferBatch) -
- sizeof(uptr) * (SizeClassMap::kMaxNumCached -
- SizeClassMap::MaxCached(class_id)))
+ TransferBatch::AllocationSizeRequiredForNElements(
+ TransferBatch::MaxCached(class_id)))
return SizeClassMap::ClassID(sizeof(TransferBatch));
return 0;
}
uptr first_idx_to_drain = c->count - cnt;
TransferBatch *b = CreateBatch(
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
- b->SetFromArray(&c->batch[first_idx_to_drain], cnt);
+ b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
+ &c->batch[first_idx_to_drain], cnt);
c->count -= cnt;
allocator->DeallocateBatch(&stats_, class_id, b);
}
class SizeClassAllocator32 {
public:
struct TransferBatch {
- static const uptr kMaxNumCached = SizeClassMap::kMaxNumCached;
- void SetFromArray(void *batch[], uptr count) {
+ static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
+ void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
count_ = count;
CHECK_LE(count_, kMaxNumCached);
for (uptr i = 0; i < count; i++)
for (uptr i = 0, n = Count(); i < n; i++)
to_batch[i] = batch_[i];
}
+
+ // How much memory do we need for a batch containing n elements.
+ static uptr AllocationSizeRequiredForNElements(uptr n) {
+ return sizeof(uptr) * 2 + sizeof(void *) * n;
+ }
+ static uptr MaxCached(uptr class_id) {
+ return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
+ }
+
TransferBatch *next;
private:
static const uptr kBatchSize = sizeof(TransferBatch);
COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
+ COMPILER_CHECK(sizeof(TransferBatch) ==
+ SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
static uptr ClassIdToSize(uptr class_id) {
return class_id == SizeClassMap::kBatchClassID
sci->free_list.push_front(b);
}
+ uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
+
bool PointerIsMine(const void *p) {
uptr mem = reinterpret_cast<uptr>(p);
if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
uptr size = ClassIdToSize(class_id);
uptr reg = AllocateRegion(stat, class_id);
uptr n_chunks = kRegionSize / (size + kMetadataSize);
- uptr max_count = SizeClassMap::MaxCached(class_id);
+ uptr max_count = TransferBatch::MaxCached(class_id);
TransferBatch *b = nullptr;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
if (!b) {
class SizeClassAllocator64 {
public:
struct TransferBatch {
- static const uptr kMaxNumCached = SizeClassMap::kMaxNumCached;
+ static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 4;
void SetFromRange(uptr region_beg, uptr beg_offset, uptr step, uptr count) {
count_ = count;
CHECK_LE(count_, kMaxNumCached);
+ region_beg_ = region_beg;
for (uptr i = 0; i < count; i++)
- batch_[i] = (void*)(region_beg + beg_offset + i * step);
+ batch_[i] = static_cast<u32>((beg_offset + i * step) >> 4);
}
- void SetFromArray(void *batch[], uptr count) {
+ void SetFromArray(uptr region_beg, void *batch[], uptr count) {
count_ = count;
CHECK_LE(count_, kMaxNumCached);
+ region_beg_ = region_beg;
for (uptr i = 0; i < count; i++)
- batch_[i] = batch[i];
+ batch_[i] = static_cast<u32>(
+ ((reinterpret_cast<uptr>(batch[i])) - region_beg) >> 4);
}
void CopyToArray(void *to_batch[]) {
for (uptr i = 0, n = Count(); i < n; i++)
- to_batch[i] = batch_[i];
+ to_batch[i] = reinterpret_cast<void*>(Get(i));
}
uptr Count() const { return count_; }
+
+ // How much memory do we need for a batch containing n elements.
+ static uptr AllocationSizeRequiredForNElements(uptr n) {
+ return sizeof(uptr) * 2 + sizeof(u32) * n;
+ }
+ static uptr MaxCached(uptr class_id) {
+ return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
+ }
+
TransferBatch *next;
private:
- uptr count_;
- void *batch_[kMaxNumCached];
+ uptr Get(uptr i) {
+ return region_beg_ + (static_cast<uptr>(batch_[i]) << 4);
+ }
+ // Instead of storing 64-bit pointers we store 32-bit offsets from the
+ // region start divided by 4. This imposes two limitations:
+ // * all allocations are 16-aligned,
+ // * regions are not larger than 2^36.
+ uptr region_beg_ : SANITIZER_WORDSIZE - 10; // Region-beg is 4096-aligned.
+ uptr count_ : 10;
+ u32 batch_[kMaxNumCached];
};
static const uptr kBatchSize = sizeof(TransferBatch);
COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
+ COMPILER_CHECK(sizeof(TransferBatch) ==
+ SizeClassMap::kMaxNumCachedHint * sizeof(u32));
+ COMPILER_CHECK(TransferBatch::kMaxNumCached < 1024); // count_ uses 10 bits.
static uptr ClassIdToSize(uptr class_id) {
return class_id == SizeClassMap::kBatchClassID
space_beg;
}
+ uptr GetRegionBeginBySizeClass(uptr class_id) {
+ return SpaceBeg() + kRegionSize * class_id;
+ }
+
uptr GetSizeClass(const void *p) {
if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
// kRegionSize must be >= 2^32.
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // kRegionSize must be <= 2^36, see TransferBatch.
+ COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
// Call mmap for user memory with at least this size.
static const uptr kUserMapSize = 1 << 16;
// Call mmap for metadata memory with at least this size.
if (b)
return b;
uptr size = ClassIdToSize(class_id);
- uptr count = SizeClassMap::MaxCached(class_id);
+ uptr count = TransferBatch::MaxCached(class_id);
uptr beg_idx = region->allocated_user;
uptr end_idx = beg_idx + count * size;
uptr region_beg = SpaceBeg() + kRegionSize * class_id;
//
// This class also gives a hint to a thread-caching allocator about the amount
// of chunks that need to be cached per-thread:
-// - kMaxNumCached is the maximal number of chunks per size class.
+// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
+// The actual number is computed in TransferBatch.
// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
//
// There is one extra size class kBatchClassID that is used for allocating
//
// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
-template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
+template <uptr kMaxSizeLog, uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
class SizeClassMap {
static const uptr kMinSizeLog = 4;
static const uptr kMidSizeLog = kMinSizeLog + 4;
static const uptr M = (1 << S) - 1;
public:
- static const uptr kMaxNumCached = kMaxNumCachedT;
- COMPILER_CHECK(((kMaxNumCached + 2) & (kMaxNumCached + 1)) == 0);
+ // kMaxNumCachedHintT is a power of two. It serves as a hint
+ // for the size of TransferBatch, the actual size could be a bit smaller.
+ static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
+ COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0);
static const uptr kMaxSize = 1UL << kMaxSizeLog;
static const uptr kNumClasses =
return kMidClass + (l1 << S) + hbits + (lbits > 0);
}
- static uptr MaxCached(uptr class_id) {
+ static uptr MaxCachedHint(uptr class_id) {
if (class_id == 0) return 0;
// Estimate the result for kBatchClassID because this class
// does not know the exact size of TransferBatch.
// Moreover, we need to cache fewer batches than user chunks,
// so this number could be small.
- if (class_id == kBatchClassID) return Min((uptr)8, kMaxNumCached);
+ if (class_id == kBatchClassID) return 8;
uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
- return Max<uptr>(1, Min(kMaxNumCached, n));
+ return Max<uptr>(1, Min(kMaxNumCachedHint, n));
}
static void Print() {
uptr d = s - prev_s;
uptr p = prev_s ? (d * 100 / prev_s) : 0;
uptr l = s ? MostSignificantSetBitIndex(s) : 0;
- uptr cached = MaxCached(i) * s;
+ uptr cached = MaxCachedHint(i) * s;
if (i == kBatchClassID)
d = l = p = 0;
Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
"cached: %zd %zd; id %zd\n",
- i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
+ i, Size(i), d, p, l, MaxCachedHint(i), cached, ClassID(s));
total_cached += cached;
prev_s = s;
}
}
};
-typedef SizeClassMap<17, 126, 16> DefaultSizeClassMap;
-typedef SizeClassMap<17, 62, 14> CompactSizeClassMap;
+typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
+typedef SizeClassMap<17, 64, 14> CompactSizeClassMap;
template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
// Regression test for out-of-memory condition in PopulateFreeList().
TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
// In a world where regions are small and chunks are huge...
- typedef SizeClassMap<63, 126, 16> SpecialSizeClassMap;
+ typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
SpecialSizeClassMap> SpecialAllocator64;
const uptr kRegionSize =