void releaseToOS() {
initThreadMaybe();
Primary.releaseToOS();
+ Secondary.releaseToOS();
}
// Iterate over all chunks and call a callback for all busy chunks located
SizeClassInfo *Sci = getSizeClassInfo(I);
Sci->RandState = getRandomU32(&Seed);
// See comment in the 64-bit primary about releasing smaller size classes.
- Sci->CanRelease = (ReleaseToOsInterval >= 0) &&
- (I != SizeClassMap::BatchClassId) &&
+ Sci->CanRelease = (I != SizeClassMap::BatchClassId) &&
(getSizeByClassId(I) >= (PageSize / 32));
}
ReleaseToOsIntervalMs = ReleaseToOsInterval;
uptr releaseToOS() {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
- if (I == SizeClassMap::BatchClassId)
- continue;
SizeClassInfo *Sci = getSizeClassInfo(I);
ScopedLock L(Sci->Mutex);
TotalReleasedBytes += releaseToOSMaybe(Sci, I, /*Force=*/true);
// memory accesses which ends up being fairly costly. The current lower
// limit is mostly arbitrary and based on empirical observations.
// TODO(kostyak): make the lower limit a runtime option
- Region->CanRelease = (ReleaseToOsInterval >= 0) &&
- (I != SizeClassMap::BatchClassId) &&
+ Region->CanRelease = (I != SizeClassMap::BatchClassId) &&
(getSizeByClassId(I) >= (PageSize / 32));
Region->RandState = getRandomU32(&Seed);
}
uptr releaseToOS() {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
- if (I == SizeClassMap::BatchClassId)
- continue;
RegionInfo *Region = getRegionInfo(I);
ScopedLock L(Region->Mutex);
TotalReleasedBytes += releaseToOSMaybe(Region, I, /*Force=*/true);
static bool canCache(UNUSED uptr Size) { return false; }
void disable() {}
void enable() {}
+ void releaseToOS() {}
};
template <uptr MaxEntriesCount = 32U, uptr MaxEntrySize = 1UL << 19>
}
bool retrieve(uptr Size, LargeBlock::Header **H) {
+ const uptr PageSize = getPageSizeCached();
ScopedLock L(Mutex);
if (EntriesCount == 0)
return false;
const uptr BlockSize = Entries[I].BlockEnd - Entries[I].Block;
if (Size > BlockSize)
continue;
- if (Size < BlockSize - getPageSizeCached() * 4U)
+ if (Size < BlockSize - PageSize * 4U)
continue;
*H = reinterpret_cast<LargeBlock::Header *>(Entries[I].Block);
Entries[I].Block = 0;
return MaxEntriesCount != 0U && Size <= MaxEntrySize;
}
+ void releaseToOS() { releaseOlderThan(UINT64_MAX); }
+
void disable() { Mutex.lock(); }
void enable() { Mutex.unlock(); }
static uptr canCache(uptr Size) { return CacheT::canCache(Size); }
+ void releaseToOS() { Cache.releaseToOS(); }
+
private:
CacheT Cache;