/// Used so that we can compute how much space was wasted.
size_t BytesAllocated;
+ /// \brief How many slabs we've allocated.
+ ///
+ /// Used to scale the size of each slab and reduce the number of allocations
+ /// for extremely heavy memory use scenarios.
+ size_t NumSlabs;
+
/// \brief Aligns \c Ptr to \c Alignment bytes, rounding up.
///
/// Alignment should be a power of two. This method rounds up, so
void Deallocate(const void * /*Ptr*/) {}
- unsigned GetNumSlabs() const;
+ size_t GetNumSlabs() const { return NumSlabs; }
void PrintStats() const;
BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold,
SlabAllocator &allocator)
: SlabSize(size), SizeThreshold(std::min(size, threshold)),
- Allocator(allocator), CurSlab(0), BytesAllocated(0) { }
+ Allocator(allocator), CurSlab(0), BytesAllocated(0), NumSlabs(0) {}
BumpPtrAllocator::BumpPtrAllocator(size_t size, size_t threshold)
: SlabSize(size), SizeThreshold(std::min(size, threshold)),
- Allocator(DefaultSlabAllocator), CurSlab(0), BytesAllocated(0) { }
+ Allocator(DefaultSlabAllocator), CurSlab(0), BytesAllocated(0),
+ NumSlabs(0) {}
BumpPtrAllocator::~BumpPtrAllocator() {
DeallocateSlabs(CurSlab);
/// StartNewSlab - Allocate a new slab and move the bump pointers over into
/// the new slab. Modifies CurPtr and End.
void BumpPtrAllocator::StartNewSlab() {
- // If we allocated a big number of slabs already it's likely that we're going
- // to allocate more. Increase slab size to reduce mallocs and possibly memory
- // overhead. The factors are chosen conservatively to avoid overallocation.
- if (BytesAllocated >= SlabSize * 128)
- SlabSize *= 2;
-
- MemSlab *NewSlab = Allocator.Allocate(SlabSize);
+ ++NumSlabs;
+ // Scale the actual allocated slab size based on the number of slabs
+ // allocated. Every 128 slabs allocated, we double the allocated size to
+ // reduce allocation frequency, but saturate at multiplying the slab size by
+ // 2^30.
+ // FIXME: Currently, this count includes special slabs for objects above the
+ // size threshold. That will be fixed in a subsequent commit to make the
+ // growth even more predictable.
+ size_t AllocatedSlabSize =
+ SlabSize * (1 << std::min<size_t>(30, NumSlabs / 128));
+
+ MemSlab *NewSlab = Allocator.Allocate(AllocatedSlabSize);
NewSlab->NextPtr = CurSlab;
CurSlab = NewSlab;
CurPtr = (char*)(CurSlab + 1);
#endif
Allocator.Deallocate(Slab);
Slab = NextSlab;
+ --NumSlabs;
}
}
// If Size is really big, allocate a separate slab for it.
size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
if (PaddedSize > SizeThreshold) {
+ ++NumSlabs;
MemSlab *NewSlab = Allocator.Allocate(PaddedSize);
// Put the new slab after the current slab, since we are not allocating
return Ptr;
}
-unsigned BumpPtrAllocator::GetNumSlabs() const {
- unsigned NumSlabs = 0;
- for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {
- ++NumSlabs;
- }
- return NumSlabs;
-}
-
size_t BumpPtrAllocator::getTotalMemory() const {
size_t TotalMemory = 0;
for (MemSlab *Slab = CurSlab; Slab != 0; Slab = Slab->NextPtr) {