#include "chunk.h"
#include "common.h"
#include "list.h"
+#include "mem_map.h"
#include "memtag.h"
#include "mutex.h"
#include "options.h"
LargeBlock::Header *Next;
uptr CommitBase;
uptr CommitSize;
- uptr MapBase;
- uptr MapSize;
- [[no_unique_address]] MapPlatformData Data;
+ MemMapT MemMap;
};
static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
} // namespace LargeBlock
static void unmap(LargeBlock::Header *H) {
- MapPlatformData Data = H->Data;
- unmap(reinterpret_cast<void *>(H->MapBase), H->MapSize, UNMAP_ALL, &Data);
+ // Note that the `H->MapMap` is stored on the pages managed by itself. Take
+ // over the ownership before unmap() so that any operation along with unmap()
+ // won't touch inaccessible pages.
+ MemMapT MemMap = H->MemMap;
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
}
class MapAllocatorNoCache {
template <typename Config>
void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
- uptr AllocPos, uptr Flags, MapPlatformData *Data) {
+ uptr AllocPos, uptr Flags, MemMapT &MemMap) {
const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
- map(reinterpret_cast<void *>(CommitBase), UntaggedPos - CommitBase,
- "scudo:secondary", MAP_RESIZABLE | MAP_MEMTAG | Flags, Data);
- map(reinterpret_cast<void *>(UntaggedPos),
- CommitBase + CommitSize - UntaggedPos, "scudo:secondary",
- MAP_RESIZABLE | Flags, Data);
+ MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
+ MAP_RESIZABLE | MAP_MEMTAG | Flags);
+ MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
+ "scudo:secondary", MAP_RESIZABLE | Flags);
} else {
- map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
+ const uptr RemapFlags =
MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
- Flags,
- Data);
+ Flags;
+ MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
}
}
CachedBlock Entry;
Entry.CommitBase = H->CommitBase;
Entry.CommitSize = H->CommitSize;
- Entry.MapBase = H->MapBase;
- Entry.MapSize = H->MapSize;
Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
- Entry.Data = H->Data;
+ Entry.MemMap = H->MemMap;
Entry.Time = Time;
if (useMemoryTagging<Config>(Options)) {
if (Interval == 0 && !SCUDO_FUCHSIA) {
// on top so we just do the two syscalls there.
Entry.Time = 0;
mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
- Entry.CommitBase, MAP_NOACCESS, &Entry.Data);
+ Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
} else {
- setMemoryPermission(Entry.CommitBase, Entry.CommitSize, MAP_NOACCESS,
- &Entry.Data);
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
+ MAP_NOACCESS);
}
} else if (Interval == 0) {
- releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
+ Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
Entry.Time = 0;
}
do {
else if (Interval >= 0)
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
if (!EntryCached)
- unmap(reinterpret_cast<void *>(Entry.MapBase), Entry.MapSize, UNMAP_ALL,
- &Entry.Data);
+ Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
}
bool retrieve(Options Options, uptr Size, uptr Alignment,
LargeBlock::addHeaderTag<Config>(HeaderPos));
*Zeroed = Entry.Time == 0;
if (useMemoryTagging<Config>(Options))
- setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0, &Entry.Data);
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
if (useMemoryTagging<Config>(Options)) {
if (*Zeroed) {
}
(*H)->CommitBase = Entry.CommitBase;
(*H)->CommitSize = Entry.CommitSize;
- (*H)->MapBase = Entry.MapBase;
- (*H)->MapSize = Entry.MapSize;
- (*H)->Data = Entry.Data;
+ (*H)->MemMap = Entry.MemMap;
return true;
}
ScopedLock L(Mutex);
for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
if (Quarantine[I].CommitBase) {
- unmap(reinterpret_cast<void *>(Quarantine[I].MapBase),
- Quarantine[I].MapSize, UNMAP_ALL, &Quarantine[I].Data);
+ MemMapT &MemMap = Quarantine[I].MemMap;
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
Quarantine[I].CommitBase = 0;
}
}
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
- for (u32 I = 0; I < MaxCount; I++)
- if (Entries[I].CommitBase)
- setMemoryPermission(Entries[I].CommitBase, Entries[I].CommitSize, 0,
- &Entries[I].Data);
+ for (u32 I = 0; I < MaxCount; I++) {
+ if (Entries[I].CommitBase) {
+ Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
+ Entries[I].CommitSize, 0);
+ }
+ }
QuarantinePos = -1U;
}
private:
void empty() {
- struct {
- void *MapBase;
- uptr MapSize;
- MapPlatformData Data;
- } MapInfo[Config::SecondaryCacheEntriesArraySize];
+ MemMapT MapInfo[Config::SecondaryCacheEntriesArraySize];
uptr N = 0;
{
ScopedLock L(Mutex);
for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
if (!Entries[I].CommitBase)
continue;
- MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
- MapInfo[N].MapSize = Entries[I].MapSize;
- MapInfo[N].Data = Entries[I].Data;
+ MapInfo[N] = Entries[I].MemMap;
Entries[I].CommitBase = 0;
N++;
}
EntriesCount = 0;
IsFullEvents = 0;
}
- for (uptr I = 0; I < N; I++)
- unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
- &MapInfo[I].Data);
+ for (uptr I = 0; I < N; I++) {
+ MemMapT &MemMap = MapInfo[I];
+ MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ }
}
struct CachedBlock {
uptr CommitBase;
uptr CommitSize;
- uptr MapBase;
- uptr MapSize;
uptr BlockBegin;
- [[no_unique_address]] MapPlatformData Data;
+ MemMapT MemMap;
u64 Time;
};
OldestTime = Entry.Time;
return;
}
- releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
+ Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
Entry.Time = 0;
}
AllocatedBytes += BlockSize;
NumberOfAllocs++;
Stats.add(StatAllocated, BlockSize);
- Stats.add(StatMapped, H->MapSize);
+ Stats.add(StatMapped, H->MemMap.getCapacity());
}
return Ptr;
}
}
- MapPlatformData Data = {};
+ ReservedMemoryT ReservedMemory;
const uptr MapSize = RoundedSize + 2 * PageSize;
- uptr MapBase = reinterpret_cast<uptr>(
- map(nullptr, MapSize, nullptr, MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+ ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, MAP_ALLOWNOMEM);
+
+ // Take the entire ownership of reserved region.
+ MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
+ ReservedMemory.getCapacity());
+ uptr MapBase = MemMap.getBase();
if (UNLIKELY(!MapBase))
return nullptr;
uptr CommitBase = MapBase + PageSize;
// We only trim the extra memory on 32-bit platforms: 64-bit platforms
// are less constrained memory wise, and that saves us two syscalls.
if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
- unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+ MemMap.unmap(MapBase, NewMapBase - MapBase);
MapBase = NewMapBase;
}
const uptr NewMapEnd =
CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
DCHECK_LE(NewMapEnd, MapEnd);
if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
- unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+ MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
MapEnd = NewMapEnd;
}
}
const uptr CommitSize = MapEnd - PageSize - CommitBase;
const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
- mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, &Data);
+ mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, MemMap);
const uptr HeaderPos =
AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
if (useMemoryTagging<Config>(Options))
storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
reinterpret_cast<uptr>(H + 1));
- H->MapBase = MapBase;
- H->MapSize = MapEnd - MapBase;
H->CommitBase = CommitBase;
H->CommitSize = CommitSize;
- H->Data = Data;
+ H->MemMap = MemMap;
if (BlockEndPtr)
*BlockEndPtr = CommitBase + CommitSize;
{
LargestSize = CommitSize;
NumberOfAllocs++;
Stats.add(StatAllocated, CommitSize);
- Stats.add(StatMapped, H->MapSize);
+ Stats.add(StatMapped, H->MemMap.getCapacity());
}
return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
}
FreedBytes += CommitSize;
NumberOfFrees++;
Stats.sub(StatAllocated, CommitSize);
- Stats.sub(StatMapped, H->MapSize);
+ Stats.sub(StatMapped, H->MemMap.getCapacity());
}
Cache.store(Options, H);
}