uptr idx = IdToOffset(id);
uptr block_idx = GetBlockIdx(idx);
CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
- const uptr *stack_trace = blocks_[block_idx].GetOrUnpack();
+ const uptr *stack_trace = blocks_[block_idx].GetOrUnpack(this);
if (!stack_trace)
return {};
stack_trace += GetInBlockIdx(idx);
}
uptr StackStore::Allocated() const {
- uptr next_block = GetBlockIdx(
- RoundUpTo(atomic_load_relaxed(&total_frames_), kBlockSizeFrames));
- uptr res = 0;
- for (uptr i = 0; i < next_block; ++i) res += blocks_[i].Allocated();
- return res + sizeof(*this);
+ return atomic_load_relaxed(&allocated_) + sizeof(*this);
}
uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
// Fits into the a single block.
CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
*idx = start;
- return blocks_[block_idx].GetOrCreate() + GetInBlockIdx(start);
+ return blocks_[block_idx].GetOrCreate(this) + GetInBlockIdx(start);
}
// Retry. We can't use range allocated in two different blocks.
}
}
+void *StackStore::Map(uptr size, const char *mem_type) {
+ atomic_fetch_add(&allocated_, size, memory_order_relaxed);
+ return MmapNoReserveOrDie(size, mem_type);
+}
+
+void StackStore::Unmap(void *addr, uptr size) {
+ atomic_fetch_sub(&allocated_, size, memory_order_relaxed);
+ UnmapOrDie(addr, size);
+}
+
uptr StackStore::Pack(Compression type) {
uptr res = 0;
- for (BlockInfo &b : blocks_) res += b.Pack(type);
+ for (BlockInfo &b : blocks_) res += b.Pack(type, this);
return res;
}
void StackStore::TestOnlyUnmap() {
- for (BlockInfo &b : blocks_) b.TestOnlyUnmap();
+ for (BlockInfo &b : blocks_) b.TestOnlyUnmap(this);
internal_memset(this, 0, sizeof(*this));
}
return reinterpret_cast<uptr *>(atomic_load_relaxed(&data_));
}
-uptr *StackStore::BlockInfo::Create() {
+uptr *StackStore::BlockInfo::Create(StackStore *store) {
SpinMutexLock l(&mtx_);
uptr *ptr = Get();
if (!ptr) {
- ptr = reinterpret_cast<uptr *>(
- MmapNoReserveOrDie(kBlockSizeBytes, "StackStore"));
+ ptr = reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStore"));
atomic_store(&data_, reinterpret_cast<uptr>(ptr), memory_order_release);
}
return ptr;
}
-uptr *StackStore::BlockInfo::GetOrCreate() {
+uptr *StackStore::BlockInfo::GetOrCreate(StackStore *store) {
uptr *ptr = Get();
if (LIKELY(ptr))
return ptr;
- return Create();
+ return Create(store);
}
class SLeb128Encoder {
};
} // namespace
-uptr *StackStore::BlockInfo::GetOrUnpack() {
+uptr *StackStore::BlockInfo::GetOrUnpack(StackStore *store) {
SpinMutexLock l(&mtx_);
switch (state) {
case State::Storing:
uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
- uptr *unpacked = reinterpret_cast<uptr *>(
- MmapNoReserveOrDie(kBlockSizeBytes, "StackStoreUnpack"));
+ uptr *unpacked =
+ reinterpret_cast<uptr *>(store->Map(kBlockSizeBytes, "StackStoreUnpack"));
uptr *unpacked_end;
switch (header->type) {
MprotectReadOnly(reinterpret_cast<uptr>(unpacked), kBlockSizeBytes);
atomic_store(&data_, reinterpret_cast<uptr>(unpacked), memory_order_release);
- UnmapOrDie(ptr, packed_size_aligned);
+ store->Unmap(ptr, packed_size_aligned);
state = State::Unpacked;
return Get();
}
-uptr StackStore::BlockInfo::Pack(Compression type) {
+uptr StackStore::BlockInfo::Pack(Compression type, StackStore *store) {
if (type == Compression::None)
return 0;
if (!ptr || !Stored(0))
return 0;
- u8 *packed = reinterpret_cast<u8 *>(
- MmapNoReserveOrDie(kBlockSizeBytes, "StackStorePack"));
+ u8 *packed =
+ reinterpret_cast<u8 *>(store->Map(kBlockSizeBytes, "StackStorePack"));
PackedHeader *header = reinterpret_cast<PackedHeader *>(packed);
u8 *alloc_end = packed + kBlockSizeBytes;
if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8) {
VPrintf(1, "Undo and keep block unpacked\n");
MprotectReadOnly(reinterpret_cast<uptr>(ptr), kBlockSizeBytes);
- UnmapOrDie(packed, kBlockSizeBytes);
+ store->Unmap(packed, kBlockSizeBytes);
state = State::Unpacked;
return 0;
}
uptr packed_size_aligned = RoundUpTo(header->size, GetPageSizeCached());
- UnmapOrDie(packed + packed_size_aligned,
- kBlockSizeBytes - packed_size_aligned);
+ store->Unmap(packed + packed_size_aligned,
+ kBlockSizeBytes - packed_size_aligned);
MprotectReadOnly(reinterpret_cast<uptr>(packed), packed_size_aligned);
atomic_store(&data_, reinterpret_cast<uptr>(packed), memory_order_release);
- UnmapOrDie(ptr, kBlockSizeBytes);
+ store->Unmap(ptr, kBlockSizeBytes);
state = State::Packed;
return kBlockSizeBytes - packed_size_aligned;
}
-uptr StackStore::BlockInfo::Allocated() const {
- SpinMutexLock l(&mtx_);
- switch (state) {
- case State::Packed: {
- const PackedHeader *ptr = reinterpret_cast<const PackedHeader *>(Get());
- CHECK_NE(nullptr, ptr);
- return RoundUpTo(ptr->size, GetPageSizeCached());
- }
- case State::Unpacked:
- case State::Storing:
- return kBlockSizeBytes;
- }
-}
-
-void StackStore::BlockInfo::TestOnlyUnmap() {
+void StackStore::BlockInfo::TestOnlyUnmap(StackStore *store) {
if (uptr *ptr = Get())
- UnmapOrDie(ptr, kBlockSizeBytes);
+ store->Unmap(ptr, kBlockSizeBytes);
}
bool StackStore::BlockInfo::Stored(uptr n) {