reportError("SizeClassAllocator32 is not supported on Fuchsia");
PossibleRegions.initLinkerInitialized();
- MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.
u32 Seed;
const u64 Time = getMonotonicTime();
for (uptr I = 0; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
Sci->RandState = getRandomU32(&Seed);
+ // Sci->MaxRegionIndex is already initialized to 0.
+ Sci->MinRegionIndex = NumRegions;
// See comment in the 64-bit primary about releasing smaller size classes.
Sci->CanRelease = (I != SizeClassMap::BatchClassId) &&
(getSizeByClassId(I) >= (PageSize / 32));
while (NumberOfStashedRegions > 0)
unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
RegionSize);
- for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+ uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ if (Sci->MinRegionIndex < MinRegionIndex)
+ MinRegionIndex = Sci->MinRegionIndex;
+ if (Sci->MaxRegionIndex > MaxRegionIndex)
+ MaxRegionIndex = Sci->MaxRegionIndex;
+ }
+ for (uptr I = MinRegionIndex; I < MaxRegionIndex; I++)
if (PossibleRegions[I])
unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
PossibleRegions.unmapTestOnly();
}
template <typename F> void iterateOverBlocks(F Callback) {
+ uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ if (Sci->MinRegionIndex < MinRegionIndex)
+ MinRegionIndex = Sci->MinRegionIndex;
+ if (Sci->MaxRegionIndex > MaxRegionIndex)
+ MaxRegionIndex = Sci->MaxRegionIndex;
+ }
for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
if (PossibleRegions[I] &&
(PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
return TotalReleasedBytes;
}
- static bool useMemoryTagging(Options Options) {
- (void)Options;
- return false;
- }
+ static bool useMemoryTagging(UNUSED Options Options) { return false; }
void disableMemoryTagging() {}
const char *getRegionInfoArrayAddress() const { return nullptr; }
static uptr getRegionInfoArraySize() { return 0; }
- static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) {
- (void)RegionInfoData;
- (void)Ptr;
+ static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData,
+ UNUSED uptr Ptr) {
return {};
}
bool CanRelease;
u32 RandState;
uptr AllocatedUser;
+ // Lowest & highest region index allocated for this size class, to avoid
+ // looping through the whole NumRegions.
+ uptr MinRegionIndex;
+ uptr MaxRegionIndex;
ReleaseToOsInfo ReleaseInfo;
};
static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
return Region;
}
- uptr allocateRegion(uptr ClassId) {
+ uptr allocateRegion(SizeClassInfo *Sci, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
uptr Region = 0;
{
if (!Region)
Region = allocateRegionSlow();
if (LIKELY(Region)) {
+ // Sci->Mutex is held by the caller, updating the Min/Max is safe.
const uptr RegionIndex = computeRegionId(Region);
- if (RegionIndex < MinRegionIndex)
- MinRegionIndex = RegionIndex;
- if (RegionIndex > MaxRegionIndex)
- MaxRegionIndex = RegionIndex;
+ if (RegionIndex < Sci->MinRegionIndex)
+ Sci->MinRegionIndex = RegionIndex;
+ if (RegionIndex > Sci->MaxRegionIndex)
+ Sci->MaxRegionIndex = RegionIndex;
PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
}
return Region;
Offset = Sci->CurrentRegionAllocated;
} else {
DCHECK_EQ(Sci->CurrentRegionAllocated, 0U);
- Region = allocateRegion(ClassId);
+ Region = allocateRegion(Sci, ClassId);
if (UNLIKELY(!Region))
return nullptr;
C->getStats().add(StatMapped, RegionSize);
const uptr BlockSize = getSizeByClassId(ClassId);
const uptr PageSize = getPageSizeCached();
- CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
+ DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
const uptr BytesInFreeList =
Sci->AllocatedUser -
(Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize;
}
}
- DCHECK_GT(MinRegionIndex, 0U);
- uptr First = 0;
- for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
- if (PossibleRegions[I] - 1U == ClassId) {
- First = I;
- break;
- }
- }
- uptr Last = 0;
- for (uptr I = MaxRegionIndex; I >= MinRegionIndex; I--) {
- if (PossibleRegions[I] - 1U == ClassId) {
- Last = I;
- break;
- }
- }
+ const uptr First = Sci->MinRegionIndex;
+ const uptr Last = Sci->MaxRegionIndex;
+ DCHECK_NE(Last, 0U);
+ DCHECK_LE(First, Last);
uptr TotalReleasedBytes = 0;
+ const uptr Base = First * RegionSize;
+ const uptr NumberOfRegions = Last - First + 1U;
+ ReleaseRecorder Recorder(Base);
auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
};
- if (First && Last) {
- const uptr Base = First * RegionSize;
- const uptr NumberOfRegions = Last - First + 1U;
- ReleaseRecorder Recorder(Base);
- releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions,
- BlockSize, &Recorder, SkipRegion);
- if (Recorder.getReleasedRangesCount() > 0) {
- Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
- Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
- Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
- TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
- }
+ releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions,
+ BlockSize, &Recorder, SkipRegion);
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
+ Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes;
}
Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+
return TotalReleasedBytes;
}
// Track the regions in use, 0 is unused, otherwise store ClassId + 1.
ByteMap PossibleRegions;
- // Keep track of the lowest & highest regions allocated to avoid looping
- // through the whole NumRegions.
- uptr MinRegionIndex;
- uptr MaxRegionIndex;
atomic_s32 ReleaseToOsIntervalMs;
// Unless several threads request regions simultaneously from different size
// classes, the stash rarely contains more than 1 entry.