// will co-exist in the source base for a while. The actual allocator is chosen
// at build time by redefining this macro.
#ifndef ASAN_ALLOCATOR_VERSION
-# if (ASAN_LINUX && !ASAN_ANDROID) || ASAN_MAC
+# if (ASAN_LINUX && !ASAN_ANDROID) || ASAN_MAC || ASAN_WINDOWS
# define ASAN_ALLOCATOR_VERSION 2
# else
# define ASAN_ALLOCATOR_VERSION 1
CHECK_GE(rz_size, 16);
CHECK_LE(rz_size, 2048);
CHECK(IsPowerOfTwo(rz_size));
- u32 res = __builtin_ctz(rz_size) - 4;
+ u32 res = Log2(rz_size) - 4;
CHECK_EQ(rz_size, RZLog2Size(res));
return res;
}
if (size <= kMidSize)
return (size + kMinSize - 1) >> kMinSizeLog;
if (size > kMaxSize) return 0;
- uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(size);
+ uptr l = MostSignificantSetBitIndex(size);
uptr hbits = (size >> (l - S)) & M;
uptr lbits = size & ((1 << (l - S)) - 1);
uptr l1 = l - kMidSizeLog;
Printf("\n");
uptr d = s - prev_s;
uptr p = prev_s ? (d * 100 / prev_s) : 0;
- uptr l = SANITIZER_WORDSIZE - 1 - __builtin_clzl(s);
+ uptr l = MostSignificantSetBitIndex(s);
uptr cached = MaxCached(i) * s;
Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
"cached: %zd %zd; id %zd\n",
h->size = size;
h->map_beg = map_beg;
h->map_size = map_size;
- uptr size_log = SANITIZER_WORDSIZE - __builtin_clzl(map_size) - 1;
+ uptr size_log = MostSignificantSetBitIndex(map_size);
CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
{
SpinMutexLock l(&mutex_);
atomic_store(&size_, Size() + add, memory_order_relaxed);
}
- QuarantineBatch *NOINLINE AllocBatch(Callback cb) {
+ NOINLINE QuarantineBatch* AllocBatch(Callback cb) {
QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
b->count = 0;
b->size = 0;
MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
}
+void FlushUnneededShadowMemory(uptr addr, uptr size) {
+ // This is almost useless on 32-bits.
+ // FIXME: add madvice-analog when we move to 64-bits.
+}
+
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
// FIXME: shall we do anything here on Windows?
return true;