}
static u8 *MmapNewPagesAndPoisonShadow(uptr size) {
- CHECK(IsAligned(size, kPageSize));
+ CHECK(IsAligned(size, GetPageSizeCached()));
u8 *res = (u8*)MmapOrDie(size, __FUNCTION__);
PoisonShadow((uptr)res, size, kAsanHeapLeftRedzoneMagic);
if (flags()->debug) {
uptr mmap_size = Max(size, kMinMmapSize);
uptr n_chunks = mmap_size / size;
CHECK(n_chunks * size == mmap_size);
- if (size < kPageSize) {
+ uptr PageSize = GetPageSizeCached();
+ if (size < PageSize) {
// Size is small, just poison the last chunk.
n_chunks--;
} else {
// Size is large, allocate an extra page at right and poison it.
- mmap_size += kPageSize;
+ mmap_size += PageSize;
}
CHECK(n_chunks > 0);
u8 *mem = MmapNewPagesAndPoisonShadow(mmap_size);
}
void *asan_valloc(uptr size, StackTrace *stack) {
- void *ptr = (void*)Allocate(kPageSize, size, stack);
+ void *ptr = (void*)Allocate(GetPageSizeCached(), size, stack);
__asan_malloc_hook(ptr, size);
return ptr;
}
void *asan_pvalloc(uptr size, StackTrace *stack) {
- size = RoundUpTo(size, kPageSize);
+ uptr PageSize = GetPageSizeCached();
+ size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
- size = kPageSize;
+ size = PageSize;
}
- void *ptr = (void*)Allocate(kPageSize, size, stack);
+ void *ptr = (void*)Allocate(PageSize, size, stack);
__asan_malloc_hook(ptr, size);
return ptr;
}
}
void FakeStack::AllocateOneSizeClass(uptr size_class) {
- CHECK(ClassMmapSize(size_class) >= kPageSize);
+ CHECK(ClassMmapSize(size_class) >= GetPageSizeCached());
uptr new_mem = (uptr)MmapOrDie(
ClassMmapSize(size_class), __FUNCTION__);
// Printf("T%d new_mem[%zu]: %p-%p mmap %zu\n",
uptr sp = (uptr)ucp->uc_stack.ss_sp;
uptr size = ucp->uc_stack.ss_size;
// Align to page size.
- uptr bottom = sp & ~(kPageSize - 1);
+ uptr PageSize = GetPageSizeCached();
+ uptr bottom = sp & ~(PageSize - 1);
size += sp - bottom;
- size = RoundUpTo(size, kPageSize);
+ size = RoundUpTo(size, PageSize);
PoisonShadow(bottom, size, 0);
}
#else
return malloc_zone_valloc(system_malloc_zone, size);
}
GET_STACK_TRACE_HERE_FOR_MALLOC;
- return asan_memalign(kPageSize, size, &stack);
+ return asan_memalign(GetPageSizeCached(), size, &stack);
}
#define GET_ZONE_FOR_PTR(ptr) \
// ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end].
static void ReserveShadowMemoryRange(uptr beg, uptr end) {
- CHECK((beg % kPageSize) == 0);
- CHECK(((end + 1) % kPageSize) == 0);
+ CHECK((beg % GetPageSizeCached()) == 0);
+ CHECK(((end + 1) % GetPageSizeCached()) == 0);
uptr size = end - beg + 1;
void *res = MmapFixedNoReserve(beg, size);
if (res != (void*)beg) {
int local_stack;
AsanThread *curr_thread = asanThreadRegistry().GetCurrent();
CHECK(curr_thread);
+ uptr PageSize = GetPageSizeCached();
uptr top = curr_thread->stack_top();
- uptr bottom = ((uptr)&local_stack - kPageSize) & ~(kPageSize-1);
+ uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1);
PoisonShadow(bottom, top - bottom, 0);
}
Printf("Stats: %zuM really freed by %zu calls\n",
really_freed>>20, real_frees);
Printf("Stats: %zuM (%zu full pages) mmaped in %zu calls\n",
- mmaped>>20, mmaped / kPageSize, mmaps);
+ mmaped>>20, mmaped / GetPageSizeCached(), mmaps);
PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size);
PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
AsanThread *AsanThread::Create(u32 parent_tid, thread_callback_t start_routine,
void *arg, StackTrace *stack) {
- uptr size = RoundUpTo(sizeof(AsanThread), kPageSize);
+ uptr PageSize = GetPageSizeCached();
+ uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
AsanThread *thread = (AsanThread*)MmapOrDie(size, __FUNCTION__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
- const uptr kSummaryAllocSize = kPageSize;
+ const uptr kSummaryAllocSize = PageSize;
CHECK_LE(sizeof(AsanThreadSummary), kSummaryAllocSize);
AsanThreadSummary *summary =
- (AsanThreadSummary*)MmapOrDie(kPageSize, "AsanThreadSummary");
+ (AsanThreadSummary*)MmapOrDie(PageSize, "AsanThreadSummary");
summary->Init(parent_tid, stack);
summary->set_thread(thread);
thread->set_summary(summary);
// and we don't want it to have any poisoned stack.
ClearShadowForThreadStack();
fake_stack().Cleanup();
- uptr size = RoundUpTo(sizeof(AsanThread), kPageSize);
+ uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
UnmapOrDie(this, size);
}
namespace __sanitizer {
+uptr GetPageSizeCached() {
+ static uptr PageSize;
+ if (!PageSize)
+ PageSize = GetPageSize();
+ return PageSize;
+}
+
// By default, dump to stderr. If report_fd is kInvalidFd, try to obtain file
// descriptor by opening file in report_path.
static fd_t report_fd = kStderrFd;
uptr ReadFileToBuffer(const char *file_name, char **buff,
uptr *buff_size, uptr max_len) {
- const uptr kMinFileLen = kPageSize;
+ uptr PageSize = GetPageSizeCached();
+ uptr kMinFileLen = PageSize;
uptr read_len = 0;
*buff = 0;
*buff_size = 0;
// Read up to one page at a time.
read_len = 0;
bool reached_eof = false;
- while (read_len + kPageSize <= size) {
- uptr just_read = internal_read(fd, *buff + read_len, kPageSize);
+ while (read_len + PageSize <= size) {
+ uptr just_read = internal_read(fd, *buff + read_len, PageSize);
if (just_read == 0) {
reached_eof = true;
break;
const uptr kMmapGranularity = 1UL << 16;
#endif
+uptr GetPageSize();
+uptr GetPageSizeCached();
+uptr GetMmapGranularity();
// Threads
int GetPid();
uptr GetTid();
namespace __sanitizer {
// ------------- sanitizer_common.h
+uptr GetPageSize() {
+ return sysconf(_SC_PAGESIZE);
+}
+
+uptr GetMmapGranularity() {
+ return GetPageSize();
+}
int GetPid() {
return getpid();
}
void *MmapOrDie(uptr size, const char *mem_type) {
- size = RoundUpTo(size, kPageSize);
+ size = RoundUpTo(size, GetPageSizeCached());
void *res = internal_mmap(0, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
}
void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
- void *p = internal_mmap((void*)(fixed_addr & ~(kPageSize - 1)),
- RoundUpTo(size, kPageSize),
+ uptr PageSize = GetPageSizeCached();
+ void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
+ RoundUpTo(size, PageSize),
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
-1, 0);
uptr fsize = internal_filesize(fd);
CHECK_NE(fsize, (uptr)-1);
CHECK_GT(fsize, 0);
- *buff_size = RoundUpTo(fsize, kPageSize);
+ *buff_size = RoundUpTo(fsize, GetPageSizeCached());
void *map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
return (map == MAP_FAILED) ? 0 : map;
}
namespace __sanitizer {
// --------------------- sanitizer_common.h
+uptr GetPageSize() {
+ return 1U << 14; // FIXME: is this configurable?
+}
+
+uptr GetMmapGranularity() {
+ return 1U << 16; // FIXME: is this configurable?
+}
+
bool FileExists(const char *filename) {
UNIMPLEMENTED();
}
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
SCOPED_TSAN_INTERCEPTOR(valloc, sz);
- return user_alloc(thr, pc, sz, kPageSize);
+ return user_alloc(thr, pc, sz, GetPageSizeCached());
}
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
SCOPED_TSAN_INTERCEPTOR(pvalloc, sz);
- sz = RoundUp(sz, kPageSize);
- return user_alloc(thr, pc, sz, kPageSize);
+ sz = RoundUp(sz, GetPageSizeCached());
+ return user_alloc(thr, pc, sz, GetPageSizeCached());
}
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {