a flag to skip cache update for cases when that's unacceptable (e.g. lsan).
Patch by Sergey Matveev (earthdok@google.com)
llvm-svn: 178000
static bool IsRunningUnderDr() {
bool result = false;
- MemoryMappingLayout proc_maps;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
const sptr kBufSize = 4095;
char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
while (proc_maps.Next(/* start */0, /* end */0, /* file_offset */0,
CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
// Find the mapping that contains a stack variable.
- MemoryMappingLayout proc_maps;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset;
uptr prev_end = 0;
while (proc_maps.Next(&start, &end, &offset, 0, 0, /* protection */0)) {
ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_;
StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized.
-MemoryMappingLayout::MemoryMappingLayout() {
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
proc_self_maps_.len =
ReadFileToBuffer("/proc/self/maps", &proc_self_maps_.data,
&proc_self_maps_.mmaped_size, 1 << 26);
- if (proc_self_maps_.mmaped_size == 0) {
- LoadFromCache();
- CHECK_GT(proc_self_maps_.len, 0);
+ if (cache_enabled) {
+ if (proc_self_maps_.mmaped_size == 0) {
+ LoadFromCache();
+ CHECK_GT(proc_self_maps_.len, 0);
+ }
+ } else {
+ CHECK_GT(proc_self_maps_.mmaped_size, 0);
}
- // internal_write(2, proc_self_maps_.data, proc_self_maps_.len);
Reset();
// FIXME: in the future we may want to cache the mappings on demand only.
- CacheMemoryMappings();
+ if (cache_enabled)
+ CacheMemoryMappings();
}
MemoryMappingLayout::~MemoryMappingLayout() {
return syscall(__NR_sigaltstack, ss, oss);
}
-
// ThreadLister implementation.
ThreadLister::ThreadLister(int pid)
: pid_(pid),
// ----------------- sanitizer_procmaps.h
-MemoryMappingLayout::MemoryMappingLayout() {
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
Reset();
}
// several worker threads on Mac, which aren't expected to map big chunks of
// memory).
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
- MemoryMappingLayout procmaps;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
- while (procmaps.Next(&start, &end,
- /*offset*/0, /*filename*/0, /*filename_size*/0,
- /*protection*/0)) {
+ while (proc_maps.Next(&start, &end,
+ /*offset*/0, /*filename*/0, /*filename_size*/0,
+ /*protection*/0)) {
if (!IntervalsAreSeparate(start, end, range_start, range_end))
return false;
}
}
void DumpProcessMap() {
- MemoryMappingLayout proc_maps;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end;
const sptr kBufSize = 4095;
char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
class MemoryMappingLayout {
public:
- MemoryMappingLayout();
+ explicit MemoryMappingLayout(bool cache_enabled);
bool Next(uptr *start, uptr *end, uptr *offset,
char filename[], uptr filename_size, uptr *protection);
void Reset();
void StackTrace::PrintStack(const uptr *addr, uptr size,
bool symbolize, const char *strip_file_prefix,
SymbolizeCallback symbolize_callback ) {
- MemoryMappingLayout proc_maps;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
InternalScopedBuffer<char> buff(GetPageSizeCached() * 2);
InternalScopedBuffer<AddressInfo> addr_frames(64);
uptr frame_num = 0;
return;
}
// Map the file into shadow of .rodata sections.
- MemoryMappingLayout proc_maps;
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
uptr start, end, offset, prot;
char name[128];
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
#ifndef TSAN_GO
static void CheckPIE() {
// Ensure that the binary is indeed compiled with -pie.
- MemoryMappingLayout proc_maps;
+ MemoryMappingLayout proc_maps(true);
uptr start, end;
if (proc_maps.Next(&start, &end,
/*offset*/0, /*filename*/0, /*filename_size*/0,
}
static void InitDataSeg() {
- MemoryMappingLayout proc_maps;
+ MemoryMappingLayout proc_maps(true);
uptr start, end, offset;
char name[128];
bool prev_is_data = false;