From: Alexander Potapenko Date: Sat, 1 Dec 2012 02:39:45 +0000 (+0000) Subject: Add caching to the MemoryMappingLayout class on Linux. This is necessary for the... X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=7811425843ab7fb2381a669064d67e45471365bf;p=platform%2Fupstream%2Fllvm.git Add caching to the MemoryMappingLayout class on Linux. This is necessary for the cases when a sandbox prevents ASan from reading the mappings from /proc/self/maps. The mappings are currently being cached on each access to /proc/self/maps. In the future we'll need to add an API that allows the client to notify ASan about the sandbox. llvm-svn: 169076 --- diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc index ebeeaba..e779f1d 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc @@ -16,6 +16,7 @@ #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" +#include "sanitizer_mutex.h" #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" @@ -217,13 +218,23 @@ void ReExec() { } // ----------------- sanitizer_procmaps.h +char *MemoryMappingLayout::cached_proc_self_maps_buff_ = NULL; +uptr MemoryMappingLayout::cached_proc_self_maps_buff_mmaped_size_ = 0; +uptr MemoryMappingLayout::cached_proc_self_maps_buff_len_ = 0; +StaticSpinMutex MemoryMappingLayout::cache_lock_; + MemoryMappingLayout::MemoryMappingLayout() { proc_self_maps_buff_len_ = ReadFileToBuffer("/proc/self/maps", &proc_self_maps_buff_, &proc_self_maps_buff_mmaped_size_, 1 << 26); - CHECK_GT(proc_self_maps_buff_len_, 0); + if (proc_self_maps_buff_mmaped_size_ == 0) { + LoadFromCache(); + CHECK_GT(proc_self_maps_buff_len_, 0); + } // internal_write(2, proc_self_maps_buff_, proc_self_maps_buff_len_); Reset(); + // FIXME: in the future we may want to cache the mappings on demand only. + CacheMemoryMappings(); } MemoryMappingLayout::~MemoryMappingLayout() { @@ -234,6 +245,39 @@ void MemoryMappingLayout::Reset() { current_ = proc_self_maps_buff_; } +// static +void MemoryMappingLayout::CacheMemoryMappings() { + SpinMutexLock l(&cache_lock_); + // Don't invalidate the cache if the mappings are unavailable. + char *old_proc_self_maps_buff_ = cached_proc_self_maps_buff_; + uptr old_proc_self_maps_buff_mmaped_size_ = + cached_proc_self_maps_buff_mmaped_size_; + uptr old_proc_self_maps_buff_len_ = cached_proc_self_maps_buff_len_; + cached_proc_self_maps_buff_len_ = + ReadFileToBuffer("/proc/self/maps", &cached_proc_self_maps_buff_, + &cached_proc_self_maps_buff_mmaped_size_, 1 << 26); + if (cached_proc_self_maps_buff_mmaped_size_ == 0) { + cached_proc_self_maps_buff_ = old_proc_self_maps_buff_; + cached_proc_self_maps_buff_mmaped_size_ = + old_proc_self_maps_buff_mmaped_size_; + cached_proc_self_maps_buff_len_ = old_proc_self_maps_buff_len_; + } else { + if (old_proc_self_maps_buff_mmaped_size_) { + UnmapOrDie(old_proc_self_maps_buff_, + old_proc_self_maps_buff_mmaped_size_); + } + } +} + +void MemoryMappingLayout::LoadFromCache() { + SpinMutexLock l(&cache_lock_); + if (cached_proc_self_maps_buff_) { + proc_self_maps_buff_ = cached_proc_self_maps_buff_; + proc_self_maps_buff_len_ = cached_proc_self_maps_buff_len_; + proc_self_maps_buff_mmaped_size_ = cached_proc_self_maps_buff_mmaped_size_; + } +} + // Parse a hex value in str and update str. static uptr ParseHex(char **str) { uptr x = 0; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cc b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cc index 3c30da3..189d997 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cc +++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cc @@ -162,6 +162,15 @@ void MemoryMappingLayout::Reset() { current_filetype_ = 0; } +// static +void MemoryMappingLayout::CacheMemoryMappings() { + // No-op on Mac for now. +} + +void MemoryMappingLayout::LoadFromCache() { + // No-op on Mac for now. +} + // Next and NextSegmentLoad were inspired by base/sysinfo.cc in // Google Perftools, http://code.google.com/p/google-perftools. diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h b/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h index f4f6241..d8b3a0e 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_procmaps.h @@ -15,6 +15,7 @@ #define SANITIZER_PROCMAPS_H #include "sanitizer_internal_defs.h" +#include "sanitizer_mutex.h" namespace __sanitizer { @@ -39,9 +40,14 @@ class MemoryMappingLayout { // address 'addr'. Returns true on success. bool GetObjectNameAndOffset(uptr addr, uptr *offset, char filename[], uptr filename_size); + // In some cases, e.g. when running under a sandbox on Linux, ASan is unable + // to obtain the memory mappings. It should fall back to pre-cached data + // instead of aborting. + static void CacheMemoryMappings(); ~MemoryMappingLayout(); private: + void LoadFromCache(); // Default implementation of GetObjectNameAndOffset. // Quite slow, because it iterates through the whole process map for each // lookup. @@ -77,6 +83,12 @@ class MemoryMappingLayout { uptr proc_self_maps_buff_mmaped_size_; uptr proc_self_maps_buff_len_; char *current_; + + // Static mappings cache. + static char *cached_proc_self_maps_buff_; + static uptr cached_proc_self_maps_buff_mmaped_size_; + static uptr cached_proc_self_maps_buff_len_; + static StaticSpinMutex cache_lock_; // protects the cache contents. # elif defined __APPLE__ template bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset,