void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
MapPlatformData *Data) {
+ // TODO: DCHECK the BaseAddress is consistent with the data in
+ // MapPlatformData.
DCHECK(Data);
DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
compactPtrGroup(compactPtr(ClassId, Sci->CurrentRegion));
ReleaseRecorder Recorder(Base);
- PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
+ PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions,
+ /*ReleaseSize=*/RegionSize);
auto DecompactPtr = [](CompactPtrT CompactPtr) {
return reinterpret_cast<uptr>(CompactPtr);
const uptr GroupSize = (1U << GroupSizeLog);
const uptr AllocatedUserEnd = Region->AllocatedUser + Region->RegionBeg;
- ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
- PageReleaseContext Context(BlockSize, Region->AllocatedUser,
- /*NumberOfRegions=*/1U);
-
const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId);
auto DecompactPtr = [CompactPtrBase](CompactPtrT CompactPtr) {
return decompactPtrInternal(CompactPtrBase, CompactPtr);
};
- for (BatchGroup &BG : Region->FreeList) {
+
+ // Instead of always preparing PageMap for the entire region, we only do it
+ // for the range of releasing groups. To do that, the free-block marking
+ // process includes visiting BlockGroups twice.
+
+ // The first visit is to determine the range of BatchGroups we are going to
+ // release. And we will extract those BatchGroups out and push into
+ // `GroupToRelease`.
+ SinglyLinkedList<BatchGroup> GroupToRelease;
+ GroupToRelease.clear();
+
+ // This is only used for debugging to ensure the consistency of the number
+ // of groups.
+ uptr NumberOfBatchGroups = Region->FreeList.size();
+
+ for (BatchGroup *BG = Region->FreeList.front(), *Prev = nullptr;
+ BG != nullptr;) {
const uptr PushedBytesDelta =
- BG.PushedBlocks - BG.PushedBlocksAtLastCheckpoint;
- if (PushedBytesDelta * BlockSize < PageSize)
+ BG->PushedBlocks - BG->PushedBlocksAtLastCheckpoint;
+ if (PushedBytesDelta * BlockSize < PageSize) {
+ Prev = BG;
+ BG = BG->Next;
continue;
+ }
// Group boundary does not necessarily have the same alignment as Region.
// It may sit across a Region boundary. Which means that we may have the
//
// 1. Group boundary sits before RegionBeg.
//
- // (BatchGroupBeg)
+ // (BatchGroupBase)
// batchGroupBase RegionBeg BatchGroupEnd
// | | |
// v v v
//
// 2. Group boundary sits after RegionBeg.
//
- // (BatchGroupBeg)
+ // (BatchGroupBase)
// RegionBeg batchGroupBase BatchGroupEnd
// | | |
// v v v
// Note that in the first case, the group range before RegionBeg is never
// used. Therefore, while calculating the used group size, we should
// exclude that part to get the correct size.
- const uptr BatchGroupBeg =
- Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg);
- DCHECK_GE(AllocatedUserEnd, BatchGroupBeg);
+ const uptr BatchGroupBase =
+ Max(batchGroupBase(CompactPtrBase, BG->GroupId), Region->RegionBeg);
+ DCHECK_GE(AllocatedUserEnd, BatchGroupBase);
const uptr BatchGroupEnd =
- batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize;
+ batchGroupBase(CompactPtrBase, BG->GroupId) + GroupSize;
const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
- ? BatchGroupEnd - BatchGroupBeg
- : AllocatedUserEnd - BatchGroupBeg;
- if (AllocatedGroupSize == 0)
+ ? BatchGroupEnd - BatchGroupBase
+ : AllocatedUserEnd - BatchGroupBase;
+ if (AllocatedGroupSize == 0) {
+ Prev = BG;
+ BG = BG->Next;
continue;
+ }
// TransferBatches are pushed in front of BG.Batches. The first one may
// not have all caches used.
- const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
- BG.Batches.front()->getCount();
+ const uptr NumBlocks = (BG->Batches.size() - 1) * BG->MaxCachedPerBatch +
+ BG->Batches.front()->getCount();
const uptr BytesInBG = NumBlocks * BlockSize;
// Given the randomness property, we try to release the pages only if the
// bytes used by free blocks exceed certain proportion of group size. Note
// are allocated.
if (CheckDensity && (BytesInBG * 100U) / AllocatedGroupSize <
(100U - 1U - BlockSize / 16U)) {
+ Prev = BG;
+ BG = BG->Next;
continue;
}
+ // If `BG` is the first BatchGroup in the list, we only need to advance
+ // `BG` and call FreeList::pop_front(). No update is needed for `Prev`.
+ //
+ // (BG) (BG->Next)
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // nil +--+ +--+
+ // |X | -> | | -> ...
+ // +--+ +--+
+ //
+ // Otherwise, `Prev` will be used to extract the `Cur` from the
+ // `FreeList`.
+ //
+ // (BG) (BG->Next)
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // +--+ +--+ +--+
+ // | | -> |X | -> | | -> ...
+ // +--+ +--+ +--+
+ //
+ // After FreeList::extract(),
+ //
+ // Prev Cur BG
+ // | | |
+ // v v v
+ // +--+ +--+ +--+
+ // | |-+ |X | +->| | -> ...
+ // +--+ | +--+ | +--+
+ // +--------+
+ //
+ // Note that we need to advance before pushing this BatchGroup to
+ // GroupToRelease because it's a destructive operation.
+
+ BatchGroup *Cur = BG;
+ BG = BG->Next;
+
+ if (Prev != nullptr)
+ Region->FreeList.extract(Prev, Cur);
+ else
+ Region->FreeList.pop_front();
+ GroupToRelease.push_back(Cur);
+ }
+
+ if (GroupToRelease.empty())
+ return 0;
+
+ const uptr ReleaseBase =
+ Max(batchGroupBase(CompactPtrBase, GroupToRelease.front()->GroupId),
+ Region->RegionBeg);
+ const uptr LastGroupEnd =
+ Min(batchGroupBase(CompactPtrBase, GroupToRelease.back()->GroupId) +
+ GroupSize,
+ AllocatedUserEnd);
+ // The last block may straddle the group boundary. Rounding up to BlockSize
+ // to get the exact range.
+ const uptr ReleaseEnd =
+ roundUpSlow(LastGroupEnd - Region->RegionBeg, BlockSize) +
+ Region->RegionBeg;
+ const uptr ReleaseRangeSize = ReleaseEnd - ReleaseBase;
+ const uptr ReleaseOffset = ReleaseBase - Region->RegionBeg;
+
+ ReleaseRecorder Recorder(Region->RegionBeg, ReleaseOffset, &Region->Data);
+ PageReleaseContext Context(
+ BlockSize, Region->AllocatedUser, /*NumberOfRegions=*/1U,
+ ReleaseRangeSize, ReleaseOffset);
+
+ for (BatchGroup &BG : GroupToRelease) {
BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks;
- const uptr BatchGroupUsedEnd = BatchGroupBeg + AllocatedGroupSize;
+ // TODO(chiahungduan): Replace GroupId with BatchGroupBase to simplify the
+ // calculation here and above (where we determine the set of groups to
+ // release).
+ const uptr BatchGroupBase =
+ Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg);
+ const uptr BatchGroupEnd =
+ batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize;
+ const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd
+ ? BatchGroupEnd - BatchGroupBase
+ : AllocatedUserEnd - BatchGroupBase;
+ const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize;
const bool BlockAlignedWithUsedEnd =
(BatchGroupUsedEnd - Region->RegionBeg) % BlockSize == 0;
if (!BlockAlignedWithUsedEnd)
++MaxContainedBlocks;
+ const uptr NumBlocks = (BG.Batches.size() - 1) * BG.MaxCachedPerBatch +
+ BG.Batches.front()->getCount();
+
if (NumBlocks == MaxContainedBlocks) {
for (const auto &It : BG.Batches)
for (u16 I = 0; I < It.getCount(); ++I)
DCHECK_EQ(compactPtrGroup(It.get(I)), BG.GroupId);
- Context.markRangeAsAllCounted(BatchGroupBeg, BatchGroupUsedEnd,
+ Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd,
Region->RegionBeg);
} else {
DCHECK_LT(NumBlocks, MaxContainedBlocks);
}
}
- if (!Context.hasBlockMarked())
- return 0;
+ DCHECK(Context.hasBlockMarked());
auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
}
Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+
+ // Merge GroupToRelease back to the Region::FreeList. Note that both
+ // `Region->FreeList` and `GroupToRelease` are sorted.
+ for (BatchGroup *BG = Region->FreeList.front(), *Prev = nullptr;;) {
+ if (BG == nullptr || GroupToRelease.empty()) {
+ if (!GroupToRelease.empty())
+ Region->FreeList.append_back(&GroupToRelease);
+ break;
+ }
+
+ DCHECK_NE(BG->GroupId, GroupToRelease.front()->GroupId);
+
+ if (BG->GroupId < GroupToRelease.front()->GroupId) {
+ Prev = BG;
+ BG = BG->Next;
+ continue;
+ }
+
+ // At here, the `BG` is the first BatchGroup with GroupId larger than the
+ // first element in `GroupToRelease`. We need to insert
+ // `GroupToRelease::front()` (which is `Cur` below) before `BG`.
+ //
+ // 1. If `Prev` is nullptr, we simply push `Cur` to the front of
+ // FreeList.
+ // 2. Otherwise, use `insert()` which inserts an element next to `Prev`.
+ //
+ // Afterwards, we don't need to advance `BG` because the order between
+ // `BG` and the new `GroupToRelease::front()` hasn't been checked.
+ BatchGroup *Cur = GroupToRelease.front();
+ GroupToRelease.pop_front();
+ if (Prev == nullptr)
+ Region->FreeList.push_front(Cur);
+ else
+ Region->FreeList.insert(Prev, Cur);
+ DCHECK_EQ(Cur->Next, BG);
+ Prev = Cur;
+ }
+
+ DCHECK_EQ(Region->FreeList.size(), NumberOfBatchGroups);
+ (void)NumberOfBatchGroups;
+
+ if (SCUDO_DEBUG) {
+ BatchGroup *Prev = Region->FreeList.front();
+ for (BatchGroup *Cur = Prev->Next; Cur != nullptr;
+ Prev = Cur, Cur = Cur->Next) {
+ CHECK_LT(Prev->GroupId, Cur->GroupId);
+ }
+ }
+
return Recorder.getReleasedBytes();
}
};
class ReleaseRecorder {
public:
- ReleaseRecorder(uptr Base, MapPlatformData *Data = nullptr)
- : Base(Base), Data(Data) {}
+ ReleaseRecorder(uptr Base, uptr Offset = 0, MapPlatformData *Data = nullptr)
+ : Base(Base), Offset(Offset), Data(Data) {}
uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
// Releases [From, To) range of pages back to OS.
void releasePageRangeToOS(uptr From, uptr To) {
const uptr Size = To - From;
- releasePagesToOS(Base, From, Size, Data);
+ releasePagesToOS(Base, From + Offset, Size, Data);
ReleasedRangesCount++;
ReleasedBytes += Size;
}
private:
uptr ReleasedRangesCount = 0;
uptr ReleasedBytes = 0;
+ // The starting address to release. Note that we may want to combine (Base +
+ // Offset) as a new Base. However, the Base is retrieved from
+ // `MapPlatformData` on Fuchsia, which means the offset won't be aware.
+ // Therefore, store them separately to make it work on all the platforms.
uptr Base = 0;
+ // The release offset from Base. This is used when we know a given range after
+ // Base will not be released.
+ uptr Offset = 0;
MapPlatformData *Data = nullptr;
};
};
struct PageReleaseContext {
- PageReleaseContext(uptr BlockSize, uptr RegionSize, uptr NumberOfRegions) :
- BlockSize(BlockSize),
- RegionSize(RegionSize),
- NumberOfRegions(NumberOfRegions) {
+ PageReleaseContext(uptr BlockSize, uptr RegionSize, uptr NumberOfRegions,
+ uptr ReleaseSize, uptr ReleaseOffset = 0)
+ : BlockSize(BlockSize), RegionSize(RegionSize),
+ NumberOfRegions(NumberOfRegions) {
PageSize = getPageSizeCached();
if (BlockSize <= PageSize) {
if (PageSize % BlockSize == 0) {
}
}
- PagesCount = roundUp(RegionSize, PageSize) / PageSize;
+ // TODO: For multiple regions, it's more complicated to support partial
+ // region marking (which includes the complexity of how to handle the last
+ // block in a region). We may consider this after markFreeBlocks() accepts
+ // only free blocks from the same region.
+ if (NumberOfRegions != 1) {
+ DCHECK_EQ(ReleaseSize, RegionSize);
+ DCHECK_EQ(ReleaseOffset, 0U);
+ }
+
+ PagesCount = roundUp(ReleaseSize, PageSize) / PageSize;
PageSizeLog = getLog2(PageSize);
- RoundedRegionSize = PagesCount << PageSizeLog;
+ RoundedRegionSize = roundUp(RegionSize, PageSize);
RoundedSize = NumberOfRegions * RoundedRegionSize;
+ ReleasePageOffset = ReleaseOffset >> PageSizeLog;
}
// PageMap is lazily allocated when markFreeBlocks() is invoked.
uptr NumBlocksInFirstPage =
(FromInRegion + PageSize - FirstBlockInRange + BlockSize - 1) /
BlockSize;
- PageMap.incN(RegionIndex, FromInRegion >> PageSizeLog,
+ PageMap.incN(RegionIndex, getPageIndex(FromInRegion),
NumBlocksInFirstPage);
FromInRegion = roundUp(FromInRegion + 1, PageSize);
}
// The last block is not aligned to `To`, we need to increment the
// counter of `next page` by 1.
if (LastBlockInRange + BlockSize != ToInRegion) {
- PageMap.incRange(RegionIndex, ToInRegion >> PageSizeLog,
- (LastBlockInRange + BlockSize - 1) >> PageSizeLog);
+ PageMap.incRange(RegionIndex, getPageIndex(ToInRegion),
+ getPageIndex(LastBlockInRange + BlockSize - 1));
}
} else {
ToInRegion = RegionSize;
// After handling the first page and the last block, it's safe to mark any
// page in between the range [From, To).
if (FromInRegion < ToInRegion) {
- PageMap.setAsAllCountedRange(RegionIndex, FromInRegion >> PageSizeLog,
- (ToInRegion - 1) >> PageSizeLog);
+ PageMap.setAsAllCountedRange(RegionIndex, getPageIndex(FromInRegion),
+ getPageIndex(ToInRegion - 1));
}
}
DecompactPtrT DecompactPtr, uptr Base) {
ensurePageMapAllocated();
+ const uptr LastBlockInRegion = ((RegionSize / BlockSize) - 1U) * BlockSize;
+
+ // The last block in a region may not use the entire page, so if it's free,
+ // we mark the following "pretend" memory block(s) as free.
+ auto markLastBlock = [this, LastBlockInRegion](const uptr RegionIndex) {
+ uptr PInRegion = LastBlockInRegion + BlockSize;
+ while (PInRegion < RoundedRegionSize) {
+ PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
+ getPageIndex(PInRegion + BlockSize - 1));
+ PInRegion += BlockSize;
+ }
+ };
+
// Iterate over free chunks and count how many free chunks affect each
// allocated page.
if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
continue;
const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
const uptr PInRegion = P - RegionIndex * RegionSize;
- PageMap.inc(RegionIndex, PInRegion >> PageSizeLog);
+ PageMap.inc(RegionIndex, getPageIndex(PInRegion));
+ if (PInRegion == LastBlockInRegion)
+ markLastBlock(RegionIndex);
}
}
} else {
// In all other cases chunks might affect more than one page.
DCHECK_GE(RegionSize, BlockSize);
- const uptr LastBlockInRegion =
- ((RegionSize / BlockSize) - 1U) * BlockSize;
for (const auto &It : FreeList) {
for (u16 I = 0; I < It.getCount(); I++) {
const uptr P = DecompactPtr(It.get(I)) - Base;
continue;
const uptr RegionIndex = NumberOfRegions == 1U ? 0 : P / RegionSize;
uptr PInRegion = P - RegionIndex * RegionSize;
- PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
- (PInRegion + BlockSize - 1) >> PageSizeLog);
- // The last block in a region might straddle a page, so if it's
- // free, we mark the following "pretend" memory block(s) as free.
- if (PInRegion == LastBlockInRegion) {
- PInRegion += BlockSize;
- while (PInRegion < RoundedRegionSize) {
- PageMap.incRange(RegionIndex, PInRegion >> PageSizeLog,
- (PInRegion + BlockSize - 1) >> PageSizeLog);
- PInRegion += BlockSize;
- }
- }
+ PageMap.incRange(RegionIndex, getPageIndex(PInRegion),
+ getPageIndex(PInRegion + BlockSize - 1));
+ if (PInRegion == LastBlockInRegion)
+ markLastBlock(RegionIndex);
}
}
}
}
+ uptr getPageIndex(uptr P) { return (P >> PageSizeLog) - ReleasePageOffset; }
+
uptr BlockSize;
uptr RegionSize;
uptr NumberOfRegions;
+ // For partial region marking, some pages in front are not needed to be
+ // counted.
+ uptr ReleasePageOffset;
uptr PageSize;
uptr PagesCount;
uptr PageSizeLog;
const uptr BlockSize = Context.BlockSize;
const uptr PagesCount = Context.PagesCount;
const uptr NumberOfRegions = Context.NumberOfRegions;
+ const uptr ReleasePageOffset = Context.ReleasePageOffset;
const uptr FullPagesBlockCountMax = Context.FullPagesBlockCountMax;
const bool SameBlockCountPerPage = Context.SameBlockCountPerPage;
RegionPageMap &PageMap = Context.PageMap;
}
uptr PrevPageBoundary = 0;
uptr CurrentBoundary = 0;
+ if (ReleasePageOffset > 0) {
+ PrevPageBoundary = ReleasePageOffset * PageSize;
+ CurrentBoundary = roundUpSlow(PrevPageBoundary, BlockSize);
+ }
for (uptr J = 0; J < PagesCount; J++) {
const uptr PageBoundary = PrevPageBoundary + PageSize;
uptr BlocksPerPage = Pn;
uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
ReleaseRecorderT &Recorder, DecompactPtrT DecompactPtr,
SkipRegionT SkipRegion) {
- PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions);
+ PageReleaseContext Context(BlockSize, /*ReleaseSize=*/RegionSize, RegionSize,
+ NumberOfRegions);
Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
}
class ReleasedPagesRecorder {
public:
+ ReleasedPagesRecorder() = default;
+ explicit ReleasedPagesRecorder(scudo::uptr Base) : Base(Base) {}
std::set<scudo::uptr> ReportedPages;
void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
const scudo::uptr PageSize = scudo::getPageSizeCached();
for (scudo::uptr I = From; I < To; I += PageSize)
- ReportedPages.insert(I);
+ ReportedPages.insert(I + getBase());
}
- scudo::uptr getBase() const { return 0; }
+ scudo::uptr getBase() const { return Base; }
+ scudo::uptr Base = 0;
};
// Simplified version of a TransferBatch.
ReleasedPagesRecorder Recorder;
scudo::PageReleaseContext Context(BlockSize,
/*RegionSize=*/MaxBlocks * BlockSize,
- /*NumberOfRegions=*/1U);
+ /*NumberOfRegions=*/1U,
+ /*ReleaseSize=*/MaxBlocks * BlockSize);
ASSERT_FALSE(Context.hasBlockMarked());
Context.markFreeBlocks(FreeList, DecompactPtr, Recorder.getBase());
ASSERT_TRUE(Context.hasBlockMarked());
const scudo::uptr GroupEnd = GroupBeg + GroupSize;
scudo::PageReleaseContext Context(BlockSize, RegionSize,
- /*NumberOfRegions=*/1U);
- Context.markRangeAsAllCounted(GroupBeg, GroupEnd, /*Base=*/0);
+ /*NumberOfRegions=*/1U,
+ /*ReleaseSize=*/RegionSize);
+ Context.markRangeAsAllCounted(GroupBeg, GroupEnd, /*Base=*/0U);
scudo::uptr FirstBlock =
((GroupBeg + BlockSize - 1) / BlockSize) * BlockSize;
// Release the entire region. This is to ensure the last page is counted.
scudo::PageReleaseContext Context(BlockSize, RegionSize,
- /*NumberOfRegions=*/1U);
+ /*NumberOfRegions=*/1U,
+ /*ReleaseSize=*/RegionSize);
Context.markRangeAsAllCounted(/*From=*/0U, /*To=*/RegionSize, /*Base=*/0);
for (scudo::uptr Page = 0; Page < RoundedRegionSize / PageSize; ++Page)
EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0, Page));
} // Iterate each size class
}
+template <class SizeClassMap> void testReleasePartialRegion() {
+ typedef FreeBatch<SizeClassMap> Batch;
+ const scudo::uptr PageSize = scudo::getPageSizeCached();
+ const scudo::uptr ReleaseBase = PageSize;
+ const scudo::uptr BasePageOffset = ReleaseBase / PageSize;
+
+ for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
+ // In the following, we want to ensure the region includes at least 2 pages
+ // and we will release all the pages except the first one. The handling of
+ // the last block is tricky, so we always test the case that includes the
+ // last block.
+ const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
+ const scudo::uptr RegionSize =
+ scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) * 2, BlockSize) +
+ BlockSize;
+ const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize);
+
+ scudo::SinglyLinkedList<Batch> FreeList;
+ FreeList.clear();
+
+ // Skip the blocks in the first page and add the remaining.
+ std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0);
+ for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize);
+ Block + BlockSize <= RoundedRegionSize; Block += BlockSize) {
+ for (scudo::uptr Page = Block / PageSize;
+ Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
+ ASSERT_LT(Page, Pages.size());
+ ++Pages[Page];
+ }
+ }
+
+ // This follows the logic how we count the last page. It should be
+ // consistent with how markFreeBlocks() handles the last block.
+ if (RoundedRegionSize % BlockSize != 0)
+ ++Pages.back();
+
+ Batch *CurrentBatch = nullptr;
+ for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize);
+ Block < RegionSize; Block += BlockSize) {
+ if (CurrentBatch == nullptr ||
+ CurrentBatch->getCount() == Batch::MaxCount) {
+ CurrentBatch = new Batch;
+ CurrentBatch->clear();
+ FreeList.push_back(CurrentBatch);
+ }
+ CurrentBatch->add(Block);
+ }
+
+ auto VerifyReleaseToOs = [&](scudo::PageReleaseContext &Context) {
+ auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
+ ReleasedPagesRecorder Recorder(ReleaseBase);
+ releaseFreeMemoryToOS(Context, Recorder, SkipRegion);
+ const scudo::uptr FirstBlock = scudo::roundUpSlow(PageSize, BlockSize);
+
+ for (scudo::uptr P = 0; P < RoundedRegionSize; P += PageSize) {
+ if (P < FirstBlock) {
+ // If FirstBlock is not aligned with page boundary, the first touched
+ // page will not be released either.
+ EXPECT_TRUE(Recorder.ReportedPages.find(P) ==
+ Recorder.ReportedPages.end());
+ } else {
+ EXPECT_TRUE(Recorder.ReportedPages.find(P) !=
+ Recorder.ReportedPages.end());
+ }
+ }
+ };
+
+ // Test marking by visiting each block.
+ {
+ auto DecompactPtr = [](scudo::uptr P) { return P; };
+ scudo::PageReleaseContext Context(
+ BlockSize, RegionSize, /*NumberOfRegions=*/1U,
+ /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
+ Context.markFreeBlocks(FreeList, DecompactPtr, /*Base=*/0U);
+ for (const Batch &It : FreeList) {
+ for (scudo::u16 I = 0; I < It.getCount(); I++) {
+ scudo::uptr Block = It.get(I);
+ for (scudo::uptr Page = Block / PageSize;
+ Page <= (Block + BlockSize - 1) / PageSize; ++Page) {
+ EXPECT_EQ(Pages[Page], Context.PageMap.get(/*Region=*/0U,
+ Page - BasePageOffset));
+ }
+ }
+ }
+
+ VerifyReleaseToOs(Context);
+ }
+
+ // Test range marking.
+ {
+ scudo::PageReleaseContext Context(
+ BlockSize, RegionSize, /*NumberOfRegions=*/1U,
+ /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
+ Context.markRangeAsAllCounted(ReleaseBase, RegionSize, /*Base=*/0U);
+ for (scudo::uptr Page = ReleaseBase / PageSize;
+ Page < RoundedRegionSize / PageSize; ++Page) {
+ if (Context.PageMap.get(/*Region=*/0, Page - BasePageOffset) !=
+ Pages[Page]) {
+ EXPECT_TRUE(Context.PageMap.isAllCounted(/*Region=*/0,
+ Page - BasePageOffset));
+ }
+ }
+
+ VerifyReleaseToOs(Context);
+ }
+
+ // Check the buffer size of PageMap.
+ {
+ scudo::PageReleaseContext Full(BlockSize, RegionSize,
+ /*NumberOfRegions=*/1U,
+ /*ReleaseSize=*/RegionSize);
+ Full.ensurePageMapAllocated();
+ scudo::PageReleaseContext Partial(
+ BlockSize, RegionSize, /*NumberOfRegions=*/1U,
+ /*ReleaseSize=*/RegionSize - PageSize, ReleaseBase);
+ Partial.ensurePageMapAllocated();
+
+ EXPECT_GE(Full.PageMap.getBufferSize(), Partial.PageMap.getBufferSize());
+ }
+
+ while (!FreeList.empty()) {
+ CurrentBatch = FreeList.front();
+ FreeList.pop_front();
+ delete CurrentBatch;
+ }
+ } // Iterate each size class
+}
+
TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSDefault) {
testReleaseFreeMemoryToOS<scudo::DefaultSizeClassMap>();
}
testPageMapMarkRange<scudo::FuchsiaSizeClassMap>();
testPageMapMarkRange<scudo::SvelteSizeClassMap>();
}
+
+TEST(ScudoReleaseTest, ReleasePartialRegion) {
+ testReleasePartialRegion<scudo::DefaultSizeClassMap>();
+ testReleasePartialRegion<scudo::AndroidSizeClassMap>();
+ testReleasePartialRegion<scudo::FuchsiaSizeClassMap>();
+ testReleasePartialRegion<scudo::SvelteSizeClassMap>();
+}