}
void Drain(SizeClassAllocator *allocator) {
+ MemoryMapperT memory_mapper(*allocator);
for (uptr i = 1; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
- while (c->count > 0) Drain(c, allocator, i, c->count);
+ while (c->count > 0) Drain(&memory_mapper, c, allocator, i, c->count);
}
}
c->count = num_requested_chunks;
return true;
}
+
NOINLINE void DrainHalfMax(PerClass *c, SizeClassAllocator *allocator,
uptr class_id) {
- Drain(c, allocator, class_id, c->max_count / 2);
+ MemoryMapperT memory_mapper(*allocator);
+ Drain(&memory_mapper, c, allocator, class_id, c->max_count / 2);
}
- NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
- uptr count) {
+ void Drain(MemoryMapperT *memory_mapper, PerClass *c,
+ SizeClassAllocator *allocator, uptr class_id, uptr count) {
CHECK_GE(c->count, count);
const uptr first_idx_to_drain = c->count - count;
c->count -= count;
- allocator->ReturnToAllocator(&stats_, class_id,
+ allocator->ReturnToAllocator(memory_mapper, &stats_, class_id,
&c->chunks[first_idx_to_drain], count);
}
};
explicit MemoryMapper(const Allocator &allocator) : allocator_(allocator) {}
- uptr GetReleasedRangesCount() const { return released_ranges_count_; }
+ ~MemoryMapper() {
+ if (buffer_)
+ UnmapOrDie(buffer_, buffer_size_);
+ }
- uptr GetReleasedBytes() const { return released_bytes_; }
+ bool GetAndResetStats(uptr &ranges, uptr &bytes) {
+ ranges = released_ranges_count_;
+ released_ranges_count_ = 0;
+ bytes = released_bytes_;
+ released_bytes_ = 0;
+ return ranges != 0;
+ }
void *MapPackedCounterArrayBuffer(uptr buffer_size) {
// TODO(alekseyshl): The idea to explore is to check if we have enough
// space between num_freed_chunks*sizeof(CompactPtrT) and
// mapped_free_array to fit buffer_size bytes and use that space instead
// of mapping a temporary one.
- return MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters");
- }
-
- void UnmapPackedCounterArrayBuffer(void *buffer, uptr buffer_size) {
- UnmapOrDie(buffer, buffer_size);
+ if (buffer_size_ < buffer_size) {
+ if (buffer_)
+ UnmapOrDie(buffer_, buffer_size_);
+ buffer_ = MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters");
+ buffer_size_ = buffer_size;
+ } else {
+ internal_memset(buffer_, 0, buffer_size);
+ }
+ return buffer_;
}
// Releases [from, to) range of pages back to OS.
const Allocator &allocator_;
uptr released_ranges_count_ = 0;
uptr released_bytes_ = 0;
+ void *buffer_ = nullptr;
+ uptr buffer_size_ = 0;
};
template <class Params>
}
void ForceReleaseToOS() {
+ MemoryMapperT memory_mapper(*this);
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
- MaybeReleaseToOS(class_id, true /*force*/);
+ MaybeReleaseToOS(&memory_mapper, class_id, true /*force*/);
}
}
alignment <= SizeClassMap::kMaxSize;
}
- NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
+ NOINLINE void ReturnToAllocator(MemoryMapperT *memory_mapper,
+ AllocatorStats *stat, uptr class_id,
const CompactPtrT *chunks, uptr n_chunks) {
RegionInfo *region = GetRegionInfo(class_id);
uptr region_beg = GetRegionBeginBySizeClass(class_id);
region->num_freed_chunks = new_num_freed_chunks;
region->stats.n_freed += n_chunks;
- MaybeReleaseToOS(class_id, false /*force*/);
+ MaybeReleaseToOS(memory_mapper, class_id, false /*force*/);
}
NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
buffer = reinterpret_cast<u64*>(
memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
}
- ~PackedCounterArray() {
- if (buffer) {
- memory_mapper->UnmapPackedCounterArrayBuffer(buffer, buffer_size);
- }
- }
bool IsAllocated() const {
return !!buffer;
//
// TODO(morehouse): Support a callback on memory release so HWASan can release
// aliases as well.
- void MaybeReleaseToOS(uptr class_id, bool force) {
+ void MaybeReleaseToOS(MemoryMapperT *memory_mapper, uptr class_id,
+ bool force) {
RegionInfo *region = GetRegionInfo(class_id);
const uptr chunk_size = ClassIdToSize(class_id);
const uptr page_size = GetPageSizeCached();
}
}
- MemoryMapper<ThisT> memory_mapper(*this);
-
ReleaseFreeMemoryToOS(
GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
- RoundUpTo(region->allocated_user, page_size) / page_size,
- &memory_mapper, class_id);
+ RoundUpTo(region->allocated_user, page_size) / page_size, memory_mapper,
+ class_id);
- if (memory_mapper.GetReleasedRangesCount() > 0) {
+ uptr ranges, bytes;
+ if (memory_mapper->GetAndResetStats(ranges, bytes)) {
region->rtoi.n_freed_at_last_release = region->stats.n_freed;
- region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
- region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
+ region->rtoi.num_releases += ranges;
+ region->rtoi.last_released_bytes = bytes;
}
region->rtoi.last_release_at_ns = MonotonicNanoTime();
}