// as a 4-byte integer (offset from the region start shifted right by 4).
typedef u32 CompactPtrT;
static const uptr kCompactPtrScale = 4;
- CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) {
+ CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
}
- uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) {
+ uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
}
space_beg;
}
- uptr GetRegionBeginBySizeClass(uptr class_id) {
+ uptr GetRegionBeginBySizeClass(uptr class_id) const {
return SpaceBeg() + kRegionSize * class_id;
}
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
+ // A packed array of counters. Each counter occupies 2^n bits, enough to store
+ // counter's max_value. Ctor will try to allocate the required buffer via
+ // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
+ // whether the initialization was successful by checking IsAllocated() result.
+ // For the performance sake, none of the accessors check the validity of the
+ // arguments, it is assumed that index is always in [0, n) range and the value
+ // is not incremented past max_value.
+ template<class MemoryMapperT>
+ class PackedCounterArray {
+ public:
+ PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
+ : n(num_counters), memory_mapper(mapper) {
+ CHECK_GT(num_counters, 0);
+ CHECK_GT(max_value, 0);
+ constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
+ // Rounding counter storage size up to the power of two allows for using
+ // bit shifts calculating particular counter's index and offset.
+ uptr counter_size_bits =
+ RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
+ CHECK_LE(counter_size_bits, kMaxCounterBits);
+ counter_size_bits_log = Log2(counter_size_bits);
+ counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
+
+ uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
+ CHECK_GT(packing_ratio, 0);
+ packing_ratio_log = Log2(packing_ratio);
+ bit_offset_mask = packing_ratio - 1;
+
+ buffer_size =
+ (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
+ sizeof(*buffer);
+ buffer = reinterpret_cast<u64*>(
+ memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
+ }
+ ~PackedCounterArray() {
+ if (buffer) {
+ memory_mapper->UnmapPackedCounterArrayBuffer(
+ reinterpret_cast<uptr>(buffer), buffer_size);
+ }
+ }
+
+ bool IsAllocated() const {
+ return !!buffer;
+ }
+
+ u64 GetCount() const {
+ return n;
+ }
+
+ uptr Get(uptr i) const {
+ DCHECK_LT(i, n);
+ uptr index = i >> packing_ratio_log;
+ uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
+ return (buffer[index] >> bit_offset) & counter_mask;
+ }
+
+ void Inc(uptr i) const {
+ DCHECK_LT(Get(i), counter_mask);
+ uptr index = i >> packing_ratio_log;
+ uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
+ buffer[index] += 1ULL << bit_offset;
+ }
+
+ void IncRange(uptr from, uptr to) const {
+ DCHECK_LE(from, to);
+ for (uptr i = from; i <= to; i++)
+ Inc(i);
+ }
+
+ private:
+ const u64 n;
+ u64 counter_size_bits_log;
+ u64 counter_mask;
+ u64 packing_ratio_log;
+ u64 bit_offset_mask;
+
+ MemoryMapperT* const memory_mapper;
+ u64 buffer_size;
+ u64* buffer;
+ };
+
+ template<class MemoryMapperT>
+ class FreePagesRangeTracker {
+ public:
+ explicit FreePagesRangeTracker(MemoryMapperT* mapper)
+ : memory_mapper(mapper),
+ page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
+ in_the_range(false), current_page(0), current_range_start_page(0) {}
+
+ void NextPage(bool freed) {
+ if (freed) {
+ if (!in_the_range) {
+ current_range_start_page = current_page;
+ in_the_range = true;
+ }
+ } else {
+ CloseOpenedRange();
+ }
+ current_page++;
+ }
+
+ void Done() {
+ CloseOpenedRange();
+ }
+
+ private:
+ void CloseOpenedRange() {
+ if (in_the_range) {
+ memory_mapper->ReleasePageRangeToOS(
+ current_range_start_page << page_size_scaled_log,
+ current_page << page_size_scaled_log);
+ in_the_range = false;
+ }
+ }
+
+ MemoryMapperT* const memory_mapper;
+ const uptr page_size_scaled_log;
+ bool in_the_range;
+ uptr current_page;
+ uptr current_range_start_page;
+ };
+
+ // Iterates over the free_array to identify memory pages containing freed
+ // chunks only and returns these pages back to OS.
+ // allocated_pages_count is the total number of pages allocated for the
+ // current bucket.
+ template<class MemoryMapperT>
+ static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
+ uptr free_array_count, uptr chunk_size,
+ uptr allocated_pages_count,
+ MemoryMapperT *memory_mapper) {
+ const uptr page_size = GetPageSizeCached();
+
+ // Figure out the number of chunks per page and whether we can take a fast
+ // path (the number of chunks per page is the same for all pages).
+ uptr full_pages_chunk_count_max;
+ bool same_chunk_count_per_page;
+ if (chunk_size <= page_size && page_size % chunk_size == 0) {
+ // Same number of chunks per page, no cross overs.
+ full_pages_chunk_count_max = page_size / chunk_size;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
+ chunk_size % (page_size % chunk_size) == 0) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks, but all pages contain the same
+ // number of chunks.
+ full_pages_chunk_count_max = page_size / chunk_size + 1;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size <= page_size) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks.
+ full_pages_chunk_count_max = page_size / chunk_size + 2;
+ same_chunk_count_per_page = false;
+ } else if (chunk_size > page_size && chunk_size % page_size == 0) {
+ // One chunk covers multiple pages, no cross overs.
+ full_pages_chunk_count_max = 1;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size > page_size) {
+ // One chunk covers multiple pages, Some chunks are crossing page
+ // boundaries. Some pages contain one chunk, some contain two.
+ full_pages_chunk_count_max = 2;
+ same_chunk_count_per_page = false;
+ } else {
+ UNREACHABLE("All chunk_size/page_size ratios must be handled.");
+ }
+
+ PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
+ full_pages_chunk_count_max,
+ memory_mapper);
+ if (!counters.IsAllocated())
+ return;
+
+ const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
+ const uptr page_size_scaled = page_size >> kCompactPtrScale;
+ const uptr page_size_scaled_log = Log2(page_size_scaled);
+
+ // Iterate over free chunks and count how many free chunks affect each
+ // allocated page.
+ if (chunk_size <= page_size && page_size % chunk_size == 0) {
+ // Each chunk affects one page only.
+ for (uptr i = 0; i < free_array_count; i++)
+ counters.Inc(free_array[i] >> page_size_scaled_log);
+ } else {
+ // In all other cases chunks might affect more than one page.
+ for (uptr i = 0; i < free_array_count; i++) {
+ counters.IncRange(
+ free_array[i] >> page_size_scaled_log,
+ (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
+ }
+ }
+
+ // Iterate over pages detecting ranges of pages with chunk counters equal
+ // to the expected number of chunks for the particular page.
+ FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
+ if (same_chunk_count_per_page) {
+ // Fast path, every page has the same number of chunks affecting it.
+ for (uptr i = 0; i < counters.GetCount(); i++)
+ range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
+ } else {
+ // Show path, go through the pages keeping count how many chunks affect
+ // each page.
+ const uptr pn =
+ chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
+ const uptr pnc = pn * chunk_size_scaled;
+ // The idea is to increment the current page pointer by the first chunk
+ // size, middle portion size (the portion of the page covered by chunks
+ // except the first and the last one) and then the last chunk size, adding
+ // up the number of chunks on the current page and checking on every step
+ // whether the page boundary was crossed.
+ uptr prev_page_boundary = 0;
+ uptr current_boundary = 0;
+ for (uptr i = 0; i < counters.GetCount(); i++) {
+ uptr page_boundary = prev_page_boundary + page_size_scaled;
+ uptr chunks_per_page = pn;
+ if (current_boundary < page_boundary) {
+ if (current_boundary > prev_page_boundary)
+ chunks_per_page++;
+ current_boundary += pnc;
+ if (current_boundary < page_boundary) {
+ chunks_per_page++;
+ current_boundary += chunk_size_scaled;
+ }
+ }
+ prev_page_boundary = page_boundary;
+
+ range_tracker.NextPage(counters.Get(i) == chunks_per_page);
+ }
+ }
+ range_tracker.Done();
+ }
+
private:
+ friend class MemoryMapper;
+
static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
// FreeArray is the array of free-d chunks (stored as 4-byte offsets).
// In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
Swap(a[i], a[RandN(rand_state, i + 1)]);
}
- RegionInfo *GetRegionInfo(uptr class_id) {
+ RegionInfo *GetRegionInfo(uptr class_id) const {
CHECK_LT(class_id, kNumClasses);
RegionInfo *regions =
reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
return ®ions[class_id];
}
- uptr GetMetadataEnd(uptr region_beg) {
+ uptr GetMetadataEnd(uptr region_beg) const {
return region_beg + kRegionSize - kFreeArraySize;
}
- uptr GetChunkIdx(uptr chunk, uptr size) {
+ uptr GetChunkIdx(uptr chunk, uptr size) const {
if (!kUsingConstantSpaceBeg)
chunk -= SpaceBeg();
return (u32)offset / (u32)size;
}
- CompactPtrT *GetFreeArray(uptr region_beg) {
- return reinterpret_cast<CompactPtrT *>(region_beg + kRegionSize -
- kFreeArraySize);
+ CompactPtrT *GetFreeArray(uptr region_beg) const {
+ return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
}
bool MapWithCallback(uptr beg, uptr size) {
uptr num_freed_chunks) {
uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
if (region->mapped_free_array < needed_space) {
- CHECK_LE(needed_space, kFreeArraySize);
uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
+ CHECK_LE(new_mapped_free_array, kFreeArraySize);
uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
region->mapped_free_array;
uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
CHECK_LE(region->allocated_meta, region->mapped_meta);
region->exhausted = false;
+ // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
+ // MaybeReleaseToOS from releasing just allocated pages or protect these
+ // not yet used chunks some other way.
+
return true;
}
- void MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size,
- CompactPtrT first, CompactPtrT last) {
- uptr beg_ptr = CompactPtrToPointer(region_beg, first);
- uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size;
- ReleaseMemoryPagesToOS(beg_ptr, end_ptr);
- }
+ class MemoryMapper {
+ public:
+ MemoryMapper(const ThisT& base_allocator, uptr class_id)
+ : allocator(base_allocator),
+ region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
+ released_ranges_count(0) {
+ }
- // Attempts to release some RAM back to OS. The region is expected to be
- // locked.
- // Algorithm:
- // * Sort the chunks.
- // * Find ranges fully covered by free-d chunks
- // * Release them to OS with madvise.
+ uptr GetReleasedRangesCount() const {
+ return released_ranges_count;
+ }
+
+ uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
+ // TODO(alekseyshl): The idea to explore is to check if we have enough
+ // space between num_freed_chunks*sizeof(CompactPtrT) and
+ // mapped_free_array to fit buffer_size bytes and use that space instead
+ // of mapping a temporary one.
+ return reinterpret_cast<uptr>(
+ MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"));
+ }
+
+ void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
+ UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size);
+ }
+
+ // Releases [from, to) range of pages back to OS.
+ void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
+ ReleaseMemoryPagesToOS(
+ allocator.CompactPtrToPointer(region_base, from),
+ allocator.CompactPtrToPointer(region_base, to));
+ released_ranges_count++;
+ }
+
+ private:
+ const ThisT& allocator;
+ const uptr region_base;
+ uptr released_ranges_count;
+ };
+
+ // Attempts to release RAM occupied by freed chunks back to OS. The region is
+ // expected to be locked.
void MaybeReleaseToOS(uptr class_id) {
RegionInfo *region = GetRegionInfo(class_id);
const uptr chunk_size = ClassIdToSize(class_id);
if (interval_ms < 0)
return;
- u64 now_ns = NanoTime();
- if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > now_ns)
+ if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > NanoTime())
return; // Memory was returned recently.
- region->rtoi.last_release_at_ns = now_ns;
- uptr region_beg = GetRegionBeginBySizeClass(class_id);
- CompactPtrT *free_array = GetFreeArray(region_beg);
- SortArray(free_array, n);
-
- const uptr scaled_chunk_size = chunk_size >> kCompactPtrScale;
- const uptr kScaledGranularity = page_size >> kCompactPtrScale;
-
- uptr range_beg = free_array[0];
- uptr prev = free_array[0];
- for (uptr i = 1; i < n; i++) {
- uptr chunk = free_array[i];
- CHECK_GT(chunk, prev);
- if (chunk - prev != scaled_chunk_size) {
- CHECK_GT(chunk - prev, scaled_chunk_size);
- if (prev + scaled_chunk_size - range_beg >= kScaledGranularity) {
- MaybeReleaseChunkRange(region_beg, chunk_size, range_beg, prev);
- region->rtoi.n_freed_at_last_release = region->stats.n_freed;
- region->rtoi.num_releases++;
- }
- range_beg = chunk;
- }
- prev = chunk;
+ MemoryMapper memory_mapper(*this, class_id);
+
+ ReleaseFreeMemoryToOS<MemoryMapper>(
+ GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
+ RoundUpTo(region->allocated_user, page_size) / page_size,
+ &memory_mapper);
+
+ if (memory_mapper.GetReleasedRangesCount() > 0) {
+ region->rtoi.n_freed_at_last_release = region->stats.n_freed;
+ region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
}
+ region->rtoi.last_release_at_ns = NanoTime();
}
};
#include "gtest/gtest.h"
+#include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#include <vector>
#endif
+#if SANITIZER_CAN_USE_ALLOCATOR64
+
+class NoMemoryMapper {
+ public:
+ uptr last_request_buffer_size;
+
+ NoMemoryMapper() : last_request_buffer_size(0) {}
+
+ uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
+ last_request_buffer_size = buffer_size;
+ return 0;
+ }
+ void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {}
+};
+
+class RedZoneMemoryMapper {
+ public:
+ RedZoneMemoryMapper() {
+ const auto page_size = GetPageSize();
+ buffer = MmapOrDie(3ULL * page_size, "");
+ MprotectNoAccess(reinterpret_cast<uptr>(buffer), page_size);
+ MprotectNoAccess(reinterpret_cast<uptr>(buffer) + page_size * 2, page_size);
+ }
+ ~RedZoneMemoryMapper() {
+ UnmapOrDie(buffer, 3 * GetPageSize());
+ }
+
+ uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
+ const auto page_size = GetPageSize();
+ CHECK_EQ(buffer_size, page_size);
+ memset(reinterpret_cast<void*>(reinterpret_cast<uptr>(buffer) + page_size),
+ 0, page_size);
+ return reinterpret_cast<uptr>(buffer) + page_size;
+ }
+ void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {}
+
+ private:
+ void *buffer;
+};
+
+TEST(SanitizerCommon, SizeClassAllocator64PackedCounterArray) {
+ NoMemoryMapper no_memory_mapper;
+ typedef Allocator64::PackedCounterArray<NoMemoryMapper>
+ NoMemoryPackedCounterArray;
+
+ for (int i = 0; i < 64; i++) {
+ // Various valid counter's max values packed into one word.
+ NoMemoryPackedCounterArray counters_2n(1, 1ULL << i, &no_memory_mapper);
+ EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
+
+ // Check the "all bit set" values too.
+ NoMemoryPackedCounterArray counters_2n1_1(1, ~0ULL >> i, &no_memory_mapper);
+ EXPECT_EQ(8ULL, no_memory_mapper.last_request_buffer_size);
+
+ // Verify the packing ratio, the counter is expected to be packed into the
+ // closest power of 2 bits.
+ NoMemoryPackedCounterArray counters(64, 1ULL << i, &no_memory_mapper);
+ EXPECT_EQ(8ULL * RoundUpToPowerOfTwo(i + 1),
+ no_memory_mapper.last_request_buffer_size);
+ }
+
+ RedZoneMemoryMapper memory_mapper;
+ typedef Allocator64::PackedCounterArray<RedZoneMemoryMapper>
+ RedZonePackedCounterArray;
+ // Go through 1, 2, 4, 8, .. 64 bits per counter.
+ for (int i = 0; i < 7; i++) {
+ // Make sure counters request one memory page for the buffer.
+ const u64 kNumCounters = (GetPageSize() / 8) * (64 >> i);
+ RedZonePackedCounterArray counters(kNumCounters,
+ 1ULL << ((1 << i) - 1),
+ &memory_mapper);
+ counters.Inc(0);
+ for (u64 c = 1; c < kNumCounters - 1; c++) {
+ ASSERT_EQ(0ULL, counters.Get(c));
+ counters.Inc(c);
+ ASSERT_EQ(1ULL, counters.Get(c - 1));
+ }
+ ASSERT_EQ(0ULL, counters.Get(kNumCounters - 1));
+ counters.Inc(kNumCounters - 1);
+
+ if (i > 0) {
+ counters.IncRange(0, kNumCounters - 1);
+ for (u64 c = 0; c < kNumCounters; c++)
+ ASSERT_EQ(2ULL, counters.Get(c));
+ }
+ }
+}
+
+class RangeRecorder {
+ public:
+ std::string reported_pages;
+
+ RangeRecorder()
+ : page_size_scaled_log(
+ Log2(GetPageSizeCached() >> Allocator64::kCompactPtrScale)),
+ last_page_reported(0) {}
+
+ void ReleasePageRangeToOS(u32 from, u32 to) {
+ from >>= page_size_scaled_log;
+ to >>= page_size_scaled_log;
+ ASSERT_LT(from, to);
+ if (!reported_pages.empty())
+ ASSERT_LT(last_page_reported, from);
+ reported_pages.append(from - last_page_reported, '.');
+ reported_pages.append(to - from, 'x');
+ last_page_reported = to;
+ }
+ private:
+ const uptr page_size_scaled_log;
+ u32 last_page_reported;
+};
+
+TEST(SanitizerCommon, SizeClassAllocator64FreePagesRangeTracker) {
+ typedef Allocator64::FreePagesRangeTracker<RangeRecorder> RangeTracker;
+
+ // 'x' denotes a page to be released, '.' denotes a page to be kept around.
+ const char* test_cases[] = {
+ "",
+ ".",
+ "x",
+ "........",
+ "xxxxxxxxxxx",
+ "..............xxxxx",
+ "xxxxxxxxxxxxxxxxxx.....",
+ "......xxxxxxxx........",
+ "xxx..........xxxxxxxxxxxxxxx",
+ "......xxxx....xxxx........",
+ "xxx..........xxxxxxxx....xxxxxxx",
+ "x.x.x.x.x.x.x.x.x.x.x.x.",
+ ".x.x.x.x.x.x.x.x.x.x.x.x",
+ ".x.x.x.x.x.x.x.x.x.x.x.x.",
+ "x.x.x.x.x.x.x.x.x.x.x.x.x",
+ };
+
+ for (auto test_case : test_cases) {
+ RangeRecorder range_recorder;
+ RangeTracker tracker(&range_recorder);
+ for (int i = 0; test_case[i] != 0; i++)
+ tracker.NextPage(test_case[i] == 'x');
+ tracker.Done();
+ // Strip trailing '.'-pages before comparing the results as they are not
+ // going to be reported to range_recorder anyway.
+ const char* last_x = strrchr(test_case, 'x');
+ std::string expected(
+ test_case,
+ last_x == nullptr ? 0 : (last_x - test_case + 1));
+ EXPECT_STREQ(expected.c_str(), range_recorder.reported_pages.c_str());
+ }
+}
+
+class ReleasedPagesTrackingMemoryMapper {
+ public:
+ std::set<u32> reported_pages;
+
+ uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
+ reported_pages.clear();
+ return reinterpret_cast<uptr>(calloc(1, buffer_size));
+ }
+ void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
+ free(reinterpret_cast<void*>(buffer));
+ }
+
+ void ReleasePageRangeToOS(u32 from, u32 to) {
+ uptr page_size_scaled =
+ GetPageSizeCached() >> Allocator64::kCompactPtrScale;
+ for (u32 i = from; i < to; i += page_size_scaled)
+ reported_pages.insert(i);
+ }
+};
+
+template <class Allocator>
+void TestReleaseFreeMemoryToOS() {
+ ReleasedPagesTrackingMemoryMapper memory_mapper;
+ const uptr kAllocatedPagesCount = 1024;
+ const uptr page_size = GetPageSizeCached();
+ const uptr page_size_scaled = page_size >> Allocator::kCompactPtrScale;
+ std::mt19937 r;
+ uint32_t rnd_state = 42;
+
+ for (uptr class_id = 1; class_id <= Allocator::SizeClassMapT::kLargestClassID;
+ class_id++) {
+ const uptr chunk_size = Allocator::SizeClassMapT::Size(class_id);
+ const uptr chunk_size_scaled = chunk_size >> Allocator::kCompactPtrScale;
+ const uptr max_chunks =
+ kAllocatedPagesCount * GetPageSizeCached() / chunk_size;
+
+ // Generate the random free list.
+ std::vector<u32> free_array;
+ bool in_free_range = false;
+ uptr current_range_end = 0;
+ for (uptr i = 0; i < max_chunks; i++) {
+ if (i == current_range_end) {
+ in_free_range = (my_rand_r(&rnd_state) & 1U) == 1;
+ current_range_end += my_rand_r(&rnd_state) % 100 + 1;
+ }
+ if (in_free_range)
+ free_array.push_back(i * chunk_size_scaled);
+ }
+ if (free_array.empty())
+ continue;
+ // Shuffle free_list to verify that ReleaseFreeMemoryToOS does not depend on
+ // the list ordering.
+ std::shuffle(free_array.begin(), free_array.end(), r);
+
+ Allocator::ReleaseFreeMemoryToOS(&free_array[0], free_array.size(),
+ chunk_size, kAllocatedPagesCount,
+ &memory_mapper);
+
+ // Verify that there are no released pages touched by used chunks and all
+ // ranges of free chunks big enough to contain the entire memory pages had
+ // these pages released.
+ uptr verified_released_pages = 0;
+ std::set<u32> free_chunks(free_array.begin(), free_array.end());
+
+ u32 current_chunk = 0;
+ in_free_range = false;
+ u32 current_free_range_start = 0;
+ for (uptr i = 0; i <= max_chunks; i++) {
+ bool is_free_chunk = free_chunks.find(current_chunk) != free_chunks.end();
+
+ if (is_free_chunk) {
+ if (!in_free_range) {
+ in_free_range = true;
+ current_free_range_start = current_chunk;
+ }
+ } else {
+ // Verify that this used chunk does not touch any released page.
+ for (uptr i_page = current_chunk / page_size_scaled;
+ i_page <= (current_chunk + chunk_size_scaled - 1) /
+ page_size_scaled;
+ i_page++) {
+ bool page_released =
+ memory_mapper.reported_pages.find(i_page * page_size_scaled) !=
+ memory_mapper.reported_pages.end();
+ ASSERT_EQ(false, page_released);
+ }
+
+ if (in_free_range) {
+ in_free_range = false;
+ // Verify that all entire memory pages covered by this range of free
+ // chunks were released.
+ u32 page = RoundUpTo(current_free_range_start, page_size_scaled);
+ while (page + page_size_scaled <= current_chunk) {
+ bool page_released =
+ memory_mapper.reported_pages.find(page) !=
+ memory_mapper.reported_pages.end();
+ ASSERT_EQ(true, page_released);
+ verified_released_pages++;
+ page += page_size_scaled;
+ }
+ }
+ }
+
+ current_chunk += chunk_size_scaled;
+ }
+
+ ASSERT_EQ(memory_mapper.reported_pages.size(), verified_released_pages);
+ }
+}
+
+TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {
+ TestReleaseFreeMemoryToOS<Allocator64>();
+}
+
+TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
+ TestReleaseFreeMemoryToOS<Allocator64Compact>();
+}
+
+TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
+ TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();
+}
+
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
+
TEST(SanitizerCommon, TwoLevelByteMap) {
const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
const u64 n = kSize1 * kSize2;