void *AsanDlSymNext(const char *sym);
-void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
-
// Returns `true` iff most of ASan init process should be skipped due to the
// ASan library being loaded via `dlopen()`. Platforms may perform any
// `dlopen()` specific initialization inside this function.
return &_DYNAMIC; // defined in link.h
}
-static void UnmapFromTo(uptr from, uptr to) {
- CHECK(to >= from);
- if (to == from) return;
- uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
- if (UNLIKELY(internal_iserror(res))) {
- Report(
- "ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address "
- "%p\n",
- to - from, to - from, from);
- CHECK("unable to unmap" && 0);
- }
-}
-
#if ASAN_PREMAP_SHADOW
-uptr FindPremappedShadowStart() {
+uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
uptr granularity = GetMmapGranularity();
uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow);
uptr premap_shadow_size = PremapShadowSize();
- uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
+ uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
// We may have mapped too much. Release extra memory.
UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
return shadow_start;
#endif
uptr FindDynamicShadowStart() {
+ uptr shadow_size_bytes = MemToShadowSize(kHighMemEnd);
#if ASAN_PREMAP_SHADOW
if (!PremapShadowFailed())
- return FindPremappedShadowStart();
+ return FindPremappedShadowStart(shadow_size_bytes);
#endif
- uptr granularity = GetMmapGranularity();
- uptr alignment = granularity * 8;
- uptr left_padding = granularity;
- uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
- uptr map_size = shadow_size + left_padding + alignment;
-
- uptr map_start = (uptr)MmapNoAccess(map_size);
- CHECK_NE(map_start, ~(uptr)0);
-
- uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
- UnmapFromTo(map_start, shadow_start - left_padding);
- UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
-
- return shadow_start;
+ return MapDynamicShadow(shadow_size_bytes, SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
}
uptr FindDynamicShadowStart() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = 8 * granularity;
- uptr left_padding = granularity;
- uptr space_size = kHighShadowEnd + left_padding;
-
- uptr largest_gap_found = 0;
- uptr max_occupied_addr = 0;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
- uptr shadow_start =
- FindAvailableMemoryRange(space_size, alignment, granularity,
- &largest_gap_found, &max_occupied_addr);
- // If the shadow doesn't fit, restrict the address space to make it fit.
- if (shadow_start == 0) {
- VReport(
- 2,
- "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
- largest_gap_found, max_occupied_addr);
- uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment);
- if (new_max_vm < max_occupied_addr) {
- Report("Unable to find a memory range for dynamic shadow.\n");
- Report(
- "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
- "new_max_vm = %p\n",
- space_size, largest_gap_found, max_occupied_addr, new_max_vm);
- CHECK(0 && "cannot place shadow");
- }
- RestrictMemoryToMaxAddress(new_max_vm);
- kHighMemEnd = new_max_vm - 1;
- space_size = kHighShadowEnd + left_padding;
- VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
- shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
- nullptr, nullptr);
- if (shadow_start == 0) {
- Report("Unable to find a memory range after restricting VM.\n");
- CHECK(0 && "cannot place shadow after restricting vm");
- }
- }
- CHECK_NE((uptr)0, shadow_start);
- CHECK(IsAligned(shadow_start, alignment));
- return shadow_start;
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
// No-op. Mac does not support static linkage anyway.
namespace __asan {
+static inline uptr MemToShadowSize(uptr size) { return size >> SHADOW_SCALE; }
static inline bool AddrIsInLowMem(uptr a) {
PROFILE_ASAN_MAPPING();
return a <= kLowMemEnd;
// Returns an address aligned to 8 pages, such that one page on the left and
// PremapShadowSize() bytes on the right of it are mapped r/o.
uptr PremapShadow() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = granularity * 8;
- uptr left_padding = granularity;
- uptr shadow_size = PremapShadowSize();
- uptr map_size = shadow_size + left_padding + alignment;
-
- uptr map_start = (uptr)MmapNoAccess(map_size);
- CHECK_NE(map_start, ~(uptr)0);
-
- uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
- uptr shadow_end = shadow_start + shadow_size;
- internal_munmap(reinterpret_cast<void *>(map_start),
- shadow_start - left_padding - map_start);
- internal_munmap(reinterpret_cast<void *>(shadow_end),
- map_start + map_size - shadow_end);
- return shadow_start;
+ return MapDynamicShadow(PremapShadowSize(), /*mmap_alignment_scale*/ 3,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
bool PremapShadowFailed() {
kHighMemEnd = GetMaxUserVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
- kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
+ kHighMemEnd |= (GetMmapGranularity() << SHADOW_SCALE) - 1;
#endif // !ASAN_FIXED_MAPPING
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
#endif // !SANITIZER_MYRIAD2
namespace __asan {
-// ---------------------- mmap -------------------- {{{1
-// Reserve memory range [beg, end].
-// We need to use inclusive range because end+1 may not be representable.
-void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
- CHECK_EQ((beg % GetMmapGranularity()), 0);
- CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
- uptr size = end - beg + 1;
- DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
- if (!MmapFixedSuperNoReserve(beg, size, name)) {
- Report(
- "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
- "Perhaps you're using ulimit -v\n",
- size);
- Abort();
- }
- if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size);
-}
-
static void ProtectGap(uptr addr, uptr size) {
if (!flags()->protect_shadow_gap) {
// The shadow gap is unprotected, so there is a chance that someone
"unprotected gap shadow");
return;
}
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res) return;
- // A few pages at the start of the address space can not be protected.
- // But we really want to protect as much as possible, to prevent this memory
- // being returned as a result of a non-FIXED mmap().
- if (addr == kZeroBaseShadowStart) {
- uptr step = GetMmapGranularity();
- while (size > step && addr < kZeroBaseMaxShadowStart) {
- addr += step;
- size -= step;
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res) return;
- }
- }
-
- Report(
- "ERROR: Failed to protect the shadow gap. "
- "ASan cannot proceed correctly. ABORTING.\n");
- DumpProcessMap();
- Die();
+ __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
+ kZeroBaseMaxShadowStart);
}
static void MaybeReportLinuxPIEBug() {
// |kDefaultShadowSentinel|.
bool full_shadow_is_available = false;
if (shadow_start == kDefaultShadowSentinel) {
- __asan_shadow_memory_dynamic_address = 0;
- CHECK_EQ(0, kLowShadowBeg);
shadow_start = FindDynamicShadowStart();
if (SANITIZER_LINUX) full_shadow_is_available = true;
}
}
uptr FindDynamicShadowStart() {
- uptr granularity = GetMmapGranularity();
- uptr alignment = 8 * granularity;
- uptr left_padding = granularity;
- uptr space_size = kHighShadowEnd + left_padding;
- uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
- granularity, nullptr, nullptr);
- CHECK_NE((uptr)0, shadow_start);
- CHECK(IsAligned(shadow_start, alignment));
- return shadow_start;
+ return MapDynamicShadow(MemToShadowSize(kHighMemEnd), SHADOW_SCALE,
+ /*min_shadow_base_alignment*/ 0, kHighMemEnd);
}
void AsanCheckDynamicRTPrereqs() {}
// initialized when InitInstrumentation() was called.
GetCurrentThread()->InitRandomState();
- MadviseShadow();
-
SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
// This may call libc -> needs initialized shadow.
AndroidLogInit();
bool InitShadow();
void InitPrctl();
void InitThreads();
-void MadviseShadow();
void InitializeInterceptors();
void HwasanAllocatorInit();
// The code in this file needs to run in an unrelocated binary. It should not
// access any external symbol, including its own non-hidden globals.
-namespace __hwasan {
-
-static void UnmapFromTo(uptr from, uptr to) {
- if (to == from)
- return;
- CHECK(to >= from);
- uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
- if (UNLIKELY(internal_iserror(res))) {
- Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n",
- SanitizerToolName, to - from, to - from, from);
- CHECK("unable to unmap" && 0);
- }
-}
-
-// Returns an address aligned to kShadowBaseAlignment, such that
-// 2**kShadowBaseAlingment on the left and shadow_size_bytes bytes on the right
-// of it are mapped no access.
-static uptr MapDynamicShadow(uptr shadow_size_bytes) {
- const uptr granularity = GetMmapGranularity();
- const uptr min_alignment = granularity << kShadowScale;
- const uptr alignment = 1ULL << kShadowBaseAlignment;
- CHECK_GE(alignment, min_alignment);
-
- const uptr left_padding = 1ULL << kShadowBaseAlignment;
- const uptr shadow_size =
- RoundUpTo(shadow_size_bytes, granularity);
- const uptr map_size = shadow_size + left_padding + alignment;
-
- const uptr map_start = (uptr)MmapNoAccess(map_size);
- CHECK_NE(map_start, ~(uptr)0);
-
- const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
-
- UnmapFromTo(map_start, shadow_start - left_padding);
- UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
-
- return shadow_start;
-}
-
-} // namespace __hwasan
-
#if SANITIZER_ANDROID
extern "C" {
}
static uptr PremapShadow() {
- return MapDynamicShadow(PremapShadowSize());
+ return MapDynamicShadow(PremapShadowSize(), kShadowScale,
+ kShadowBaseAlignment, kHighMemEnd);
}
static bool IsPremapShadowAvailable() {
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
if (IsPremapShadowAvailable())
return FindPremappedShadowStart(shadow_size_bytes);
- return MapDynamicShadow(shadow_size_bytes);
+ return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
+ kHighMemEnd);
}
} // namespace __hwasan
void InitShadowGOT() {}
uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
- return MapDynamicShadow(shadow_size_bytes);
+ return MapDynamicShadow(shadow_size_bytes, kShadowScale, kShadowBaseAlignment,
+ kHighMemEnd);
}
} // namespace __hwasan
namespace __hwasan {
-static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
- CHECK_EQ((beg % GetMmapGranularity()), 0);
- CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
- uptr size = end - beg + 1;
- DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
- if (!MmapFixedNoReserve(beg, size, name)) {
- Report(
- "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
- "Perhaps you're using ulimit -v\n",
- size);
- Abort();
- }
-}
+// With the zero shadow base we can not actually map pages starting from 0.
+// This constant is somewhat arbitrary.
+constexpr uptr kZeroBaseShadowStart = 0;
+constexpr uptr kZeroBaseMaxShadowStart = 1 << 18;
static void ProtectGap(uptr addr, uptr size) {
- if (!size)
- return;
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res)
- return;
- // A few pages at the start of the address space can not be protected.
- // But we really want to protect as much as possible, to prevent this memory
- // being returned as a result of a non-FIXED mmap().
- if (addr == 0) {
- uptr step = GetMmapGranularity();
- while (size > step) {
- addr += step;
- size -= step;
- void *res = MmapFixedNoAccess(addr, size, "shadow gap");
- if (addr == (uptr)res)
- return;
- }
- }
-
- Report(
- "ERROR: Failed to protect shadow gap [%p, %p]. "
- "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr,
- (void *)(addr + size));
- DumpProcessMap();
- Die();
+ __sanitizer::ProtectGap(addr, size, kZeroBaseShadowStart,
+ kZeroBaseMaxShadowStart);
}
-static uptr kLowMemStart;
-static uptr kLowMemEnd;
-static uptr kLowShadowEnd;
-static uptr kLowShadowStart;
-static uptr kHighShadowStart;
-static uptr kHighShadowEnd;
-static uptr kHighMemStart;
-static uptr kHighMemEnd;
+uptr kLowMemStart;
+uptr kLowMemEnd;
+uptr kLowShadowEnd;
+uptr kLowShadowStart;
+uptr kHighShadowStart;
+uptr kHighShadowEnd;
+uptr kHighMemStart;
+uptr kHighMemEnd;
static void PrintRange(uptr start, uptr end, const char *name) {
Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
uptr thread_space_end =
__hwasan_shadow_memory_dynamic_address - guard_page_size;
ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
- "hwasan threads");
+ "hwasan threads", /*madvise_shadow*/ false);
ProtectGap(thread_space_end,
__hwasan_shadow_memory_dynamic_address - thread_space_end);
InitThreadList(thread_space_start, thread_space_end - thread_space_start);
}
-static void MadviseShadowRegion(uptr beg, uptr end) {
- uptr size = end - beg + 1;
- SetShadowRegionHugePageMode(beg, size);
- if (common_flags()->use_madv_dontdump)
- DontDumpShadowMemory(beg, size);
-}
-
-void MadviseShadow() {
- MadviseShadowRegion(kLowShadowStart, kLowShadowEnd);
- MadviseShadowRegion(kHighShadowStart, kHighShadowEnd);
-}
-
bool MemIsApp(uptr p) {
CHECK(GetTagFromPointer(p) == 0);
return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
namespace __hwasan {
+extern uptr kLowMemStart;
+extern uptr kLowMemEnd;
+extern uptr kLowShadowEnd;
+extern uptr kLowShadowStart;
+extern uptr kHighShadowStart;
+extern uptr kHighShadowEnd;
+extern uptr kHighMemStart;
+extern uptr kHighMemEnd;
+
inline uptr MemToShadow(uptr untagged_addr) {
return (untagged_addr >> kShadowScale) +
__hwasan_shadow_memory_dynamic_address;
void MprotectMallocZones(void *addr, int prot);
+// Get the max address, taking into account alignment due to the mmap
+// granularity and shadow size.
+uptr GetHighMemEnd(uptr shadow_scale);
+
+// Maps shadow_size_bytes of shadow memory and returns shadow address. It will
+// be aligned to the mmap granularity * 2^shadow_scale, or to
+// 2^min_shadow_base_alignment if that is larger. The returned address will
+// have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
+// shadow_size_bytes bytes on the right, mapped no access.
+// The high_mem_end may be updated if the original shadow size doesn't fit.
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment, uptr &high_mem_end);
+
+// Reserve memory range [beg, end]. If madvise_shadow is true then apply
+// madvise (e.g. hugepages, core dumping) requested by options.
+void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
+ bool madvise_shadow = true);
+
+// Protect size bytes of memory starting at addr. Also try to protect
+// several pages at the start of the address space as specified by
+// zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
+void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
+ uptr zero_base_max_shadow_start);
+
// Find an available address space.
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found, uptr *max_occupied_addr);
return start;
}
+// Reserve memory range [beg, end].
+// We need to use inclusive range because end+1 may not be representable.
+void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
+ bool madvise_shadow) {
+ CHECK_EQ((beg % GetMmapGranularity()), 0);
+ CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
+ uptr size = end - beg + 1;
+ DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
+ if (madvise_shadow ? !MmapFixedSuperNoReserve(beg, size, name)
+ : !MmapFixedNoReserve(beg, size, name)) {
+ Report(
+ "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
+ "Perhaps you're using ulimit -v\n",
+ size);
+ Abort();
+ }
+ if (madvise_shadow && common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(beg, size);
+}
+
+void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
+ uptr zero_base_max_shadow_start) {
+ if (!size)
+ return;
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res)
+ return;
+ // A few pages at the start of the address space can not be protected.
+ // But we really want to protect as much as possible, to prevent this memory
+ // being returned as a result of a non-FIXED mmap().
+ if (addr == zero_base_shadow_start) {
+ uptr step = GetMmapGranularity();
+ while (size > step && addr < zero_base_max_shadow_start) {
+ addr += step;
+ size -= step;
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res)
+ return;
+ }
+ }
+
+ Report(
+ "ERROR: Failed to protect the shadow gap. "
+ "%s cannot proceed correctly. ABORTING.\n",
+ SanitizerToolName);
+ DumpProcessMap();
+ Die();
+}
+
} // namespace __sanitizer
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
}
#endif // !SANITIZER_OPENBSD
+static void UnmapFromTo(uptr from, uptr to) {
+ if (to == from)
+ return;
+ CHECK(to >= from);
+ uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
+ if (UNLIKELY(internal_iserror(res))) {
+ Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, to - from, to - from, (void *)from);
+ CHECK("unable to unmap" && 0);
+ }
+}
+
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment,
+ UNUSED uptr &high_mem_end) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr alignment =
+ Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
+ const uptr left_padding =
+ Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
+
+ const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
+ const uptr map_size = shadow_size + left_padding + alignment;
+
+ const uptr map_start = (uptr)MmapNoAccess(map_size);
+ CHECK_NE(map_start, ~(uptr)0);
+
+ const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
+
+ UnmapFromTo(map_start, shadow_start - left_padding);
+ UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
+
+ return shadow_start;
+}
+
} // namespace __sanitizer
#endif
return GetMaxUserVirtualAddress();
}
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment, uptr &high_mem_end) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr alignment =
+ Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
+ const uptr left_padding =
+ Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
+
+ uptr space_size = shadow_size_bytes + left_padding;
+
+ uptr largest_gap_found = 0;
+ uptr max_occupied_addr = 0;
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ uptr shadow_start =
+ FindAvailableMemoryRange(space_size, alignment, granularity,
+ &largest_gap_found, &max_occupied_addr);
+ // If the shadow doesn't fit, restrict the address space to make it fit.
+ if (shadow_start == 0) {
+ VReport(
+ 2,
+ "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
+ largest_gap_found, max_occupied_addr);
+ uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);
+ if (new_max_vm < max_occupied_addr) {
+ Report("Unable to find a memory range for dynamic shadow.\n");
+ Report(
+ "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
+ "new_max_vm = %p\n",
+ space_size, largest_gap_found, max_occupied_addr, new_max_vm);
+ CHECK(0 && "cannot place shadow");
+ }
+ RestrictMemoryToMaxAddress(new_max_vm);
+ high_mem_end = new_max_vm - 1;
+ space_size = (high_mem_end >> shadow_scale) + left_padding;
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
+ nullptr, nullptr);
+ if (shadow_start == 0) {
+ Report("Unable to find a memory range after restricting VM.\n");
+ CHECK(0 && "cannot place shadow after restricting vm");
+ }
+ }
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ return shadow_start;
+}
+
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found,
uptr *max_occupied_addr) {
return true;
}
+uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
+ uptr min_shadow_base_alignment,
+ UNUSED uptr &high_mem_end) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr alignment =
+ Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
+ const uptr left_padding =
+ Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
+ uptr space_size = shadow_size_bytes + left_padding;
+ uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
+ granularity, nullptr, nullptr);
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ return shadow_start;
+}
+
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
uptr *largest_gap_found,
uptr *max_occupied_addr) {