void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
MapPlatformData *Data) {
- DCHECK_EQ(Size % PAGE_SIZE, 0);
+ DCHECK_EQ(Size % getPageSizeCached(), 0);
const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
// For MAP_NOACCESS, just allocate a Vmar and return.
// No need to track the Vmo if we don't intend on resizing it. Close it.
if (Flags & MAP_RESIZABLE) {
DCHECK(Data);
- DCHECK_EQ(Data->Vmo, ZX_HANDLE_INVALID);
- Data->Vmo = Vmo;
+ if (Data->Vmo == ZX_HANDLE_INVALID)
+ Data->Vmo = Vmo;
+ else
+ DCHECK_EQ(Data->Vmo, Vmo);
} else {
CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
}
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
- ? sizeof(TransferBatch)
+ ? roundUpTo(sizeof(TransferBatch), 1U << CompactPtrScale)
: SizeClassMap::getSizeByClassId(ClassId);
}
memset(Buffer, 0, BufferSize);
} else {
Buffer = reinterpret_cast<uptr *>(
- map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM));
+ map(nullptr, roundUpTo(BufferSize, getPageSizeCached()),
+ "scudo:counters", MAP_ALLOWNOMEM));
}
}
~PackedCounterArray() {
if (Buffer == &StaticBuffer[0])
Mutex.unlock();
else
- unmap(reinterpret_cast<void *>(Buffer), BufferSize);
+ unmap(reinterpret_cast<void *>(Buffer),
+ roundUpTo(BufferSize, getPageSizeCached()));
}
bool isAllocated() const { return !!Buffer; }
UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
return false;
}
- void store(UNUSED Options Options, UNUSED LargeBlock::Header *H) { unmap(H); }
+ void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
bool canCache(UNUSED uptr Size) { return false; }
void disable() {}
void enable() {}