}
}
uptr TotalReleasedBytes = 0;
+ auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
+ return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
+ };
if (First && Last) {
const uptr Base = First * RegionSize;
const uptr NumberOfRegions = Last - First + 1U;
ReleaseRecorder Recorder(Base);
releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions,
- BlockSize, &Recorder);
+ BlockSize, &Recorder, SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
}
}
+ auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
releaseFreeMemoryToOS(Region->FreeList, Region->RegionBeg,
- Region->AllocatedUser, 1U, BlockSize, &Recorder);
+ Region->AllocatedUser, 1U, BlockSize, &Recorder,
+ SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
Region->ReleaseInfo.PushedBlocksAtLastRelease =
CurrentPage++;
}
+ void skipPages(uptr N) {
+ closeOpenedRange();
+ CurrentPage += N;
+ }
+
void finish() { closeOpenedRange(); }
private:
uptr CurrentRangeStatePage = 0;
};
-template <class TransferBatchT, class ReleaseRecorderT>
+template <class TransferBatchT, class ReleaseRecorderT, typename SkipRegionT>
NOINLINE void
releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
- ReleaseRecorderT *Recorder) {
+ ReleaseRecorderT *Recorder, SkipRegionT SkipRegion) {
const uptr PageSize = getPageSizeCached();
// Figure out the number of chunks per page and whether we can take a fast
FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
if (SameBlockCountPerPage) {
// Fast path, every page has the same number of chunks affecting it.
- for (uptr I = 0; I < NumberOfRegions; I++)
+ for (uptr I = 0; I < NumberOfRegions; I++) {
+ if (SkipRegion(I)) {
+ RangeTracker.skipPages(PagesCount);
+ continue;
+ }
for (uptr J = 0; J < PagesCount; J++)
RangeTracker.processNextPage(Counters.get(I, J) ==
FullPagesBlockCountMax);
+ }
} else {
// Slow path, go through the pages keeping count how many chunks affect
// each page.
// up the number of chunks on the current page and checking on every step
// whether the page boundary was crossed.
for (uptr I = 0; I < NumberOfRegions; I++) {
+ if (SkipRegion(I)) {
+ RangeTracker.skipPages(PagesCount);
+ continue;
+ }
uptr PrevPageBoundary = 0;
uptr CurrentBoundary = 0;
for (uptr J = 0; J < PagesCount; J++) {
}
// Release the memory.
+ auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
ReleasedPagesRecorder Recorder;
releaseFreeMemoryToOS(FreeList, 0, MaxBlocks * BlockSize, 1U, BlockSize,
- &Recorder);
+ &Recorder, SkipRegion);
// Verify that there are no released pages touched by used chunks and all
// ranges of free chunks big enough to contain the entire memory pages had