1 // Copyright (c) 2005, 2007, Google Inc.
2 // All rights reserved.
3 // Copyright (C) 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 // Author: Sanjay Ghemawat <opensource@google.com>
34 // A malloc that uses a per-thread cache to satisfy small malloc requests.
35 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
37 // See doc/tcmalloc.html for a high-level
38 // description of how this malloc works.
41 // 1. The thread-specific lists are accessed without acquiring any locks.
42 // This is safe because each such list is only accessed by one thread.
43 // 2. We have a lock per central free-list, and hold it while manipulating
44 // the central free list for a particular size.
45 // 3. The central page allocator is protected by "pageheap_lock".
46 // 4. The pagemap (which maps from page-number to descriptor),
47 // can be read without holding any locks, and written while holding
48 // the "pageheap_lock".
49 // 5. To improve performance, a subset of the information one can get
50 // from the pagemap is cached in a data structure, pagemap_cache_,
51 // that atomically reads and writes its entries. This cache can be
52 // read and written without locking.
54 // This multi-threaded access to the pagemap is safe for fairly
55 // subtle reasons. We basically assume that when an object X is
56 // allocated by thread A and deallocated by thread B, there must
57 // have been appropriate synchronization in the handoff of object
58 // X from thread A to thread B. The same logic applies to pagemap_cache_.
60 // THE PAGEID-TO-SIZECLASS CACHE
61 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
62 // returns 0 for a particular PageID then that means "no information," not that
63 // the sizeclass is 0. The cache may have stale information for pages that do
64 // not hold the beginning of any free()'able object. Staleness is eliminated
65 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
66 // do_memalign() for all other relevant pages.
68 // TODO: Bias reclamation to larger addresses
69 // TODO: implement mallinfo/mallopt
70 // TODO: Better testing
72 // 9/28/2003 (new page-level allocator replaces ptmalloc2):
73 // * malloc/free of small objects goes from ~300 ns to ~50 ns.
74 // * allocation of a reasonably complicated struct
75 // goes from about 1100 ns to about 300 ns.
78 #include "wtf/FastMalloc.h"
80 #include "wtf/Assertions.h"
82 #include "wtf/StdLibExtras.h"
83 #include "wtf/UnusedParam.h"
86 #include <AvailabilityMacros.h>
98 #ifndef NO_TCMALLOC_SAMPLES
99 #define NO_TCMALLOC_SAMPLES
102 #if !USE(SYSTEM_MALLOC) && defined(NDEBUG)
103 #define FORCE_SYSTEM_MALLOC 0
105 #define FORCE_SYSTEM_MALLOC 1
108 // Harden the pointers stored in the TCMalloc linked lists
110 #define ENABLE_TCMALLOC_HARDENING 1
113 // Use a background thread to periodically scavenge memory to release back to the system
114 #define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1
121 // TLS_OUT_OF_INDEXES is not defined on WinCE.
122 #ifndef TLS_OUT_OF_INDEXES
123 #define TLS_OUT_OF_INDEXES 0xffffffff
126 static DWORD isForibiddenTlsIndex = TLS_OUT_OF_INDEXES;
127 static const LPVOID kTlsAllowValue = reinterpret_cast<LPVOID>(0); // Must be zero.
128 static const LPVOID kTlsForbiddenValue = reinterpret_cast<LPVOID>(1);
131 static bool isForbidden()
133 // By default, fastMalloc is allowed so we don't allocate the
134 // tls index unless we're asked to make it forbidden. If TlsSetValue
135 // has not been called on a thread, the value returned by TlsGetValue is 0.
136 return (isForibiddenTlsIndex != TLS_OUT_OF_INDEXES) && (TlsGetValue(isForibiddenTlsIndex) == kTlsForbiddenValue);
140 void fastMallocForbid()
142 if (isForibiddenTlsIndex == TLS_OUT_OF_INDEXES)
143 isForibiddenTlsIndex = TlsAlloc(); // a little racey, but close enough for debug only
144 TlsSetValue(isForibiddenTlsIndex, kTlsForbiddenValue);
147 void fastMallocAllow()
149 if (isForibiddenTlsIndex == TLS_OUT_OF_INDEXES)
151 TlsSetValue(isForibiddenTlsIndex, kTlsAllowValue);
156 static pthread_key_t isForbiddenKey;
157 static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT;
158 static void initializeIsForbiddenKey()
160 pthread_key_create(&isForbiddenKey, 0);
164 static bool isForbidden()
166 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
167 return !!pthread_getspecific(isForbiddenKey);
171 void fastMallocForbid()
173 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
174 pthread_setspecific(isForbiddenKey, &isForbiddenKey);
177 void fastMallocAllow()
179 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
180 pthread_setspecific(isForbiddenKey, 0);
189 void* fastZeroedMalloc(size_t n)
191 void* result = fastMalloc(n);
192 memset(result, 0, n);
196 char* fastStrDup(const char* src)
198 size_t len = strlen(src) + 1;
199 char* dup = static_cast<char*>(fastMalloc(len));
200 memcpy(dup, src, len);
206 #if FORCE_SYSTEM_MALLOC
209 #include <malloc/malloc.h>
216 void* fastMalloc(size_t n)
218 ASSERT(!isForbidden());
220 void* result = malloc(n);
221 ASSERT(result); // We expect tcmalloc underneath, which would crash instead of getting here.
226 void* fastCalloc(size_t n_elements, size_t element_size)
228 ASSERT(!isForbidden());
230 void* result = calloc(n_elements, element_size);
231 ASSERT(result); // We expect tcmalloc underneath, which would crash instead of getting here.
236 void fastFree(void* p)
238 ASSERT(!isForbidden());
243 void* fastRealloc(void* p, size_t n)
245 ASSERT(!isForbidden());
247 void* result = realloc(p, n);
248 ASSERT(result); // We expect tcmalloc underneath, which would crash instead of getting here.
253 void releaseFastMallocFreeMemory() { }
255 FastMallocStatistics fastMallocStatistics()
257 FastMallocStatistics statistics = { 0, 0, 0 };
264 // This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
265 // It will never be used in this case, so it's type and value are less interesting than its presence.
266 extern "C" const int jscore_fastmalloc_introspection = 0;
269 #else // FORCE_SYSTEM_MALLOC
271 #include "Compiler.h"
272 #include "TCPackedCache.h"
273 #include "TCPageMap.h"
274 #include "TCSpinLock.h"
275 #include "TCSystemAlloc.h"
285 #ifndef WIN32_LEAN_AND_MEAN
286 #define WIN32_LEAN_AND_MEAN
292 #include "MallocZoneSupport.h"
293 #include "wtf/HashSet.h"
294 #include "wtf/Vector.h"
296 #include "wtf/CurrentTime.h"
300 #include <dispatch/dispatch.h>
307 // Calling pthread_getspecific through a global function pointer is faster than a normal
308 // call to the function on Mac OS X, and it's used in performance-critical code. So we
309 // use a function pointer. But that's not necessarily faster on other platforms, and we had
310 // problems with this technique on Windows, so we'll do this only on Mac OS X.
312 static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific;
313 #define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
316 #define DEFINE_VARIABLE(type, name, value, meaning) \
317 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
318 type FLAGS_##name(value); \
319 char FLAGS_no##name; \
321 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
323 #define DEFINE_int64(name, value, meaning) \
324 DEFINE_VARIABLE(int64_t, name, value, meaning)
326 #define DEFINE_double(name, value, meaning) \
327 DEFINE_VARIABLE(double, name, value, meaning)
331 #define malloc fastMalloc
332 #define calloc fastCalloc
333 #define free fastFree
334 #define realloc fastRealloc
336 #define MESSAGE LOG_ERROR
337 #define CHECK_CONDITION ASSERT
339 static const char kLLHardeningMask = 0;
340 template <unsigned> struct EntropySource;
341 template <> struct EntropySource<4> {
342 static uint32_t value()
347 return static_cast<uint32_t>(static_cast<uintptr_t>(currentTime() * 10000) ^ reinterpret_cast<uintptr_t>(&kLLHardeningMask));
352 template <> struct EntropySource<8> {
353 static uint64_t value()
355 return EntropySource<4>::value() | (static_cast<uint64_t>(EntropySource<4>::value()) << 32);
359 #if ENABLE(TCMALLOC_HARDENING)
361 * To make it harder to exploit use-after free style exploits
362 * we mask the addresses we put into our linked lists with the
363 * address of kLLHardeningMask. Due to ASLR the address of
364 * kLLHardeningMask should be sufficiently randomized to make direct
365 * freelist manipulation much more difficult.
371 static ALWAYS_INLINE uintptr_t internalEntropyValue()
373 static uintptr_t value = EntropySource<sizeof(uintptr_t)>::value() | 1;
378 #define HARDENING_ENTROPY internalEntropyValue()
379 #define ROTATE_VALUE(value, amount) (((value) >> (amount)) | ((value) << (sizeof(value) * 8 - (amount))))
380 #define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (reinterpret_cast<typeof(ptr)>(reinterpret_cast<uintptr_t>(ptr)^(ROTATE_VALUE(reinterpret_cast<uintptr_t>(key), MaskKeyShift)^entropy)))
383 static ALWAYS_INLINE uint32_t freedObjectStartPoison()
385 static uint32_t value = EntropySource<sizeof(uint32_t)>::value() | 1;
390 static ALWAYS_INLINE uint32_t freedObjectEndPoison()
392 static uint32_t value = EntropySource<sizeof(uint32_t)>::value() | 1;
397 #define PTR_TO_UINT32(ptr) static_cast<uint32_t>(reinterpret_cast<uintptr_t>(ptr))
398 #define END_POISON_INDEX(allocationSize) (((allocationSize) - sizeof(uint32_t)) / sizeof(uint32_t))
399 #define POISON_ALLOCATION(allocation, allocationSize) do { \
400 ASSERT((allocationSize) >= 2 * sizeof(uint32_t)); \
401 reinterpret_cast<uint32_t*>(allocation)[0] = 0xbadbeef1; \
402 reinterpret_cast<uint32_t*>(allocation)[1] = 0xbadbeef3; \
403 if ((allocationSize) < 4 * sizeof(uint32_t)) \
405 reinterpret_cast<uint32_t*>(allocation)[2] = 0xbadbeef5; \
406 reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] = 0xbadbeef7; \
409 #define POISON_DEALLOCATION_EXPLICIT(allocation, allocationSize, startPoison, endPoison) do { \
410 ASSERT((allocationSize) >= 2 * sizeof(uint32_t)); \
411 reinterpret_cast<uint32_t*>(allocation)[0] = 0xbadbeef9; \
412 reinterpret_cast<uint32_t*>(allocation)[1] = 0xbadbeefb; \
413 if ((allocationSize) < 4 * sizeof(uint32_t)) \
415 reinterpret_cast<uint32_t*>(allocation)[2] = (startPoison) ^ PTR_TO_UINT32(allocation); \
416 reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] = (endPoison) ^ PTR_TO_UINT32(allocation); \
419 #define POISON_DEALLOCATION(allocation, allocationSize) \
420 POISON_DEALLOCATION_EXPLICIT(allocation, (allocationSize), freedObjectStartPoison(), freedObjectEndPoison())
422 #define MAY_BE_POISONED(allocation, allocationSize) (((allocationSize) >= 4 * sizeof(uint32_t)) && ( \
423 (reinterpret_cast<uint32_t*>(allocation)[2] == (freedObjectStartPoison() ^ PTR_TO_UINT32(allocation))) || \
424 (reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] == (freedObjectEndPoison() ^ PTR_TO_UINT32(allocation))) \
427 #define IS_DEFINITELY_POISONED(allocation, allocationSize) (((allocationSize) < 4 * sizeof(uint32_t)) || ( \
428 (reinterpret_cast<uint32_t*>(allocation)[2] == (freedObjectStartPoison() ^ PTR_TO_UINT32(allocation))) && \
429 (reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] == (freedObjectEndPoison() ^ PTR_TO_UINT32(allocation))) \
434 #define POISON_ALLOCATION(allocation, allocationSize)
435 #define POISON_DEALLOCATION(allocation, allocationSize)
436 #define POISON_DEALLOCATION_EXPLICIT(allocation, allocationSize, startPoison, endPoison)
437 #define MAY_BE_POISONED(allocation, allocationSize) (false)
438 #define IS_DEFINITELY_POISONED(allocation, allocationSize) (true)
439 #define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (((void)entropy), ((void)key), ptr)
441 #define HARDENING_ENTROPY 0
445 //-------------------------------------------------------------------
447 //-------------------------------------------------------------------
449 // Not all possible combinations of the following parameters make
450 // sense. In particular, if kMaxSize increases, you may have to
451 // increase kNumClasses as well.
452 static const size_t kPageShift = 12;
453 static const size_t kPageSize = 1 << kPageShift;
454 static const size_t kMaxSize = 8u * kPageSize;
455 static const size_t kAlignShift = 3;
456 static const size_t kAlignment = 1 << kAlignShift;
457 static const size_t kNumClasses = 68;
459 // Allocates a big block of memory for the pagemap once we reach more than
461 static const size_t kPageMapBigAllocationThreshold = 128 << 20;
463 // Minimum number of pages to fetch from system at a time. Must be
464 // significantly bigger than kPageSize to amortize system-call
465 // overhead, and also to reduce external fragementation. Also, we
466 // should keep this value big because various incarnations of Linux
467 // have small limits on the number of mmap() regions per
469 static const size_t kMinSystemAlloc = 1 << (20 - kPageShift);
471 // Number of objects to move between a per-thread list and a central
472 // list in one shot. We want this to be not too small so we can
473 // amortize the lock overhead for accessing the central list. Making
474 // it too big may temporarily cause unnecessary memory wastage in the
475 // per-thread free list until the scavenger cleans up the list.
476 static int num_objects_to_move[kNumClasses];
478 // Maximum length we allow a per-thread free-list to have before we
479 // move objects from it into the corresponding central free-list. We
480 // want this big to avoid locking the central free-list too often. It
481 // should not hurt to make this list somewhat big because the
482 // scavenging code will shrink it down when its contents are not in use.
483 static const int kMaxFreeListLength = 256;
485 // Lower and upper bounds on the per-thread cache sizes
486 static const size_t kMinThreadCacheSize = kMaxSize * 2;
487 static const size_t kMaxThreadCacheSize = 2 << 20;
489 // Default bound on the total amount of thread caches
490 static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
492 // For all span-lengths < kMaxPages we keep an exact-size list.
493 // REQUIRED: kMaxPages >= kMinSystemAlloc;
494 static const size_t kMaxPages = kMinSystemAlloc;
496 /* The smallest prime > 2^n */
497 static int primes_list[] = {
498 // Small values might cause high rates of sampling
499 // and hence commented out.
500 // 2, 5, 11, 17, 37, 67, 131, 257,
501 // 521, 1031, 2053, 4099, 8209, 16411,
502 32771, 65537, 131101, 262147, 524309, 1048583,
503 2097169, 4194319, 8388617, 16777259, 33554467 };
505 // Twice the approximate gap between sampling actions.
506 // I.e., we take one sample approximately once every
507 // tcmalloc_sample_parameter/2
508 // bytes of allocation, i.e., ~ once every 128KB.
509 // Must be a prime number.
510 #ifdef NO_TCMALLOC_SAMPLES
511 DEFINE_int64(tcmalloc_sample_parameter, 0,
512 "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
513 static size_t sample_period = 0;
515 DEFINE_int64(tcmalloc_sample_parameter, 262147,
516 "Twice the approximate gap between sampling actions."
517 " Must be a prime number. Otherwise will be rounded up to a "
518 " larger prime number");
519 static size_t sample_period = 262147;
522 // Protects sample_period above
523 static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
525 // Parameters for controlling how fast memory is returned to the OS.
527 DEFINE_double(tcmalloc_release_rate, 1,
528 "Rate at which we release unused memory to the system. "
529 "Zero means we never release memory back to the system. "
530 "Increase this flag to return memory faster; decrease it "
531 "to return memory slower. Reasonable rates are in the "
534 //-------------------------------------------------------------------
535 // Mapping from size to size_class and vice versa
536 //-------------------------------------------------------------------
538 // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
539 // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
540 // So for these larger sizes we have an array indexed by ceil(size/128).
542 // We flatten both logical arrays into one physical array and use
543 // arithmetic to compute an appropriate index. The constants used by
544 // ClassIndex() were selected to make the flattening work.
547 // Size Expression Index
548 // -------------------------------------------------------
552 // 1024 (1024 + 7) / 8 128
553 // 1025 (1025 + 127 + (120<<7)) / 128 129
555 // 32768 (32768 + 127 + (120<<7)) / 128 376
556 static const size_t kMaxSmallSize = 1024;
557 static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128
558 static const int add_amount[2] = { 7, 127 + (120 << 7) };
559 static unsigned char class_array[377];
561 // Compute index of the class_array[] entry for a given size
562 static inline int ClassIndex(size_t s) {
563 const int i = (s > kMaxSmallSize);
564 return static_cast<int>((s + add_amount[i]) >> shift_amount[i]);
567 // Mapping from size class to max size storable in that class
568 static size_t class_to_size[kNumClasses];
570 // Mapping from size class to number of pages to allocate at a time
571 static size_t class_to_pages[kNumClasses];
573 // Hardened singly linked list. We make this a class to allow compiler to
574 // statically prevent mismatching hardened and non-hardened list
577 static ALWAYS_INLINE HardenedSLL create(void* value)
580 result.m_value = value;
584 static ALWAYS_INLINE HardenedSLL null()
591 ALWAYS_INLINE void setValue(void* value) { m_value = value; }
592 ALWAYS_INLINE void* value() const { return m_value; }
593 ALWAYS_INLINE bool operator!() const { return !m_value; }
594 typedef void* (HardenedSLL::*UnspecifiedBoolType);
595 ALWAYS_INLINE operator UnspecifiedBoolType() const { return m_value ? &HardenedSLL::m_value : 0; }
597 bool operator!=(const HardenedSLL& other) const { return m_value != other.m_value; }
598 bool operator==(const HardenedSLL& other) const { return m_value == other.m_value; }
604 // TransferCache is used to cache transfers of num_objects_to_move[size_class]
605 // back and forth between thread caches and the central cache for a given size
608 HardenedSLL head; // Head of chain of objects.
609 HardenedSLL tail; // Tail of chain of objects.
611 // A central cache freelist can have anywhere from 0 to kNumTransferEntries
612 // slots to put link list chains into. To keep memory usage bounded the total
613 // number of TCEntries across size classes is fixed. Currently each size
614 // class is initially given one TCEntry which also means that the maximum any
615 // one class can have is kNumClasses.
616 static const int kNumTransferEntries = kNumClasses;
618 // Note: the following only works for "n"s that fit in 32-bits, but
619 // that is fine since we only use it for small sizes.
620 static inline int LgFloor(size_t n) {
622 for (int i = 4; i >= 0; --i) {
623 int shift = (1 << i);
624 size_t x = n >> shift;
634 // Functions for using our simple hardened singly linked list
635 static ALWAYS_INLINE HardenedSLL SLL_Next(HardenedSLL t, uintptr_t entropy) {
636 return HardenedSLL::create(XOR_MASK_PTR_WITH_KEY(*(reinterpret_cast<void**>(t.value())), t.value(), entropy));
639 static ALWAYS_INLINE void SLL_SetNext(HardenedSLL t, HardenedSLL n, uintptr_t entropy) {
640 *(reinterpret_cast<void**>(t.value())) = XOR_MASK_PTR_WITH_KEY(n.value(), t.value(), entropy);
643 static ALWAYS_INLINE void SLL_Push(HardenedSLL* list, HardenedSLL element, uintptr_t entropy) {
644 SLL_SetNext(element, *list, entropy);
648 static ALWAYS_INLINE HardenedSLL SLL_Pop(HardenedSLL *list, uintptr_t entropy) {
649 HardenedSLL result = *list;
650 *list = SLL_Next(*list, entropy);
654 // Remove N elements from a linked list to which head points. head will be
655 // modified to point to the new head. start and end will point to the first
656 // and last nodes of the range. Note that end will point to NULL after this
657 // function is called.
659 static ALWAYS_INLINE void SLL_PopRange(HardenedSLL* head, int N, HardenedSLL *start, HardenedSLL *end, uintptr_t entropy) {
661 *start = HardenedSLL::null();
662 *end = HardenedSLL::null();
666 HardenedSLL tmp = *head;
667 for (int i = 1; i < N; ++i) {
668 tmp = SLL_Next(tmp, entropy);
673 *head = SLL_Next(tmp, entropy);
674 // Unlink range from list.
675 SLL_SetNext(tmp, HardenedSLL::null(), entropy);
678 static ALWAYS_INLINE void SLL_PushRange(HardenedSLL *head, HardenedSLL start, HardenedSLL end, uintptr_t entropy) {
680 SLL_SetNext(end, *head, entropy);
684 static ALWAYS_INLINE size_t SLL_Size(HardenedSLL head, uintptr_t entropy) {
688 head = SLL_Next(head, entropy);
693 // Setup helper functions.
695 static ALWAYS_INLINE size_t SizeClass(size_t size) {
696 return class_array[ClassIndex(size)];
699 // Get the byte-size for a specified class
700 static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) {
701 return class_to_size[cl];
703 static int NumMoveSize(size_t size) {
704 if (size == 0) return 0;
705 // Use approx 64k transfers between thread and central caches.
706 int num = static_cast<int>(64.0 * 1024.0 / size);
707 if (num < 2) num = 2;
708 // Clamp well below kMaxFreeListLength to avoid ping pong between central
709 // and thread caches.
710 if (num > static_cast<int>(0.8 * kMaxFreeListLength))
711 num = static_cast<int>(0.8 * kMaxFreeListLength);
713 // Also, avoid bringing in too many objects into small object free
714 // lists. There are lots of such lists, and if we allow each one to
715 // fetch too many at a time, we end up having to scavenge too often
716 // (especially when there are lots of threads and each thread gets a
717 // small allowance for its thread cache).
719 // TODO: Make thread cache free list sizes dynamic so that we do not
720 // have to equally divide a fixed resource amongst lots of threads.
721 if (num > 32) num = 32;
726 // Initialize the mapping arrays
727 static void InitSizeClasses() {
728 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
729 if (ClassIndex(0) < 0) {
730 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
733 if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) {
734 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
738 // Compute the size classes we want to use
739 size_t sc = 1; // Next size class to assign
740 unsigned char alignshift = kAlignShift;
742 for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
743 int lg = LgFloor(size);
745 // Increase alignment every so often.
747 // Since we double the alignment every time size doubles and
748 // size >= 128, this means that space wasted due to alignment is
749 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
750 // bytes, so the space wasted as a percentage starts falling for
752 if ((lg >= 7) && (alignshift < 8)) {
758 // Allocate enough pages so leftover is less than 1/8 of total.
759 // This bounds wasted space to at most 12.5%.
760 size_t psize = kPageSize;
761 while ((psize % size) > (psize >> 3)) {
764 const size_t my_pages = psize >> kPageShift;
766 if (sc > 1 && my_pages == class_to_pages[sc-1]) {
767 // See if we can merge this into the previous class without
768 // increasing the fragmentation of the previous class.
769 const size_t my_objects = (my_pages << kPageShift) / size;
770 const size_t prev_objects = (class_to_pages[sc-1] << kPageShift)
771 / class_to_size[sc-1];
772 if (my_objects == prev_objects) {
773 // Adjust last class to include this size
774 class_to_size[sc-1] = size;
780 class_to_pages[sc] = my_pages;
781 class_to_size[sc] = size;
784 if (sc != kNumClasses) {
785 MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n",
786 sc, int(kNumClasses));
790 // Initialize the mapping arrays
792 for (unsigned char c = 1; c < kNumClasses; c++) {
793 const size_t max_size_in_class = class_to_size[c];
794 for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) {
795 class_array[ClassIndex(s)] = c;
797 next_size = static_cast<int>(max_size_in_class + kAlignment);
800 // Double-check sizes just to be safe
801 for (size_t size = 0; size <= kMaxSize; size++) {
802 const size_t sc = SizeClass(size);
804 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
807 if (sc > 1 && size <= class_to_size[sc-1]) {
808 MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS
812 if (sc >= kNumClasses) {
813 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
816 const size_t s = class_to_size[sc];
818 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
822 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
827 // Initialize the num_objects_to_move array.
828 for (size_t cl = 1; cl < kNumClasses; ++cl) {
829 num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
833 // -------------------------------------------------------------------------
834 // Simple allocator for objects of a specified type. External locking
835 // is required before accessing one of these objects.
836 // -------------------------------------------------------------------------
838 // Metadata allocator -- keeps stats about how many bytes allocated
839 static uint64_t metadata_system_bytes = 0;
840 static void* MetaDataAlloc(size_t bytes) {
841 void* result = TCMalloc_SystemAlloc(bytes, 0);
842 if (result != NULL) {
843 metadata_system_bytes += bytes;
849 class PageHeapAllocator {
851 // How much to allocate from system at a time
852 static const size_t kAllocIncrement = 32 << 10;
855 static const size_t kAlignedSize
856 = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
858 // Free area from which to carve new objects
862 // Linked list of all regions allocated by this allocator
863 HardenedSLL allocated_regions_;
865 // Free list of already carved objects
866 HardenedSLL free_list_;
868 // Number of allocated but unfreed objects
873 void Init(uintptr_t entropy) {
874 ASSERT(kAlignedSize <= kAllocIncrement);
876 allocated_regions_ = HardenedSLL::null();
879 free_list_.setValue(NULL);
887 result = free_list_.value();
888 free_list_ = SLL_Next(free_list_, entropy_);
890 if (free_avail_ < kAlignedSize) {
892 char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
896 HardenedSLL new_head = HardenedSLL::create(new_allocation);
897 SLL_SetNext(new_head, allocated_regions_, entropy_);
898 allocated_regions_ = new_head;
899 free_area_ = new_allocation + kAlignedSize;
900 free_avail_ = kAllocIncrement - kAlignedSize;
903 free_area_ += kAlignedSize;
904 free_avail_ -= kAlignedSize;
907 return reinterpret_cast<T*>(result);
911 HardenedSLL new_head = HardenedSLL::create(p);
912 SLL_SetNext(new_head, free_list_, entropy_);
913 free_list_ = new_head;
917 int inuse() const { return inuse_; }
920 template <class Recorder>
921 void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
923 for (HardenedSLL adminAllocation = allocated_regions_; adminAllocation; adminAllocation.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(adminAllocation.value()), entropy_)))
924 recorder.recordRegion(reinterpret_cast<vm_address_t>(adminAllocation.value()), kAllocIncrement);
929 // -------------------------------------------------------------------------
930 // Span - a contiguous run of pages
931 // -------------------------------------------------------------------------
933 // Type that can hold a page number
934 typedef uintptr_t PageID;
936 // Type that can hold the length of a run of pages
937 typedef uintptr_t Length;
939 static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
941 // Convert byte size into pages. This won't overflow, but may return
942 // an unreasonably large value if bytes is huge enough.
943 static inline Length pages(size_t bytes) {
944 return (bytes >> kPageShift) +
945 ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
948 // Convert a user size into the number of bytes that will actually be
950 static size_t AllocationSize(size_t bytes) {
951 if (bytes > kMaxSize) {
952 // Large object: we allocate an integral number of pages
953 ASSERT(bytes <= (kMaxValidPages << kPageShift));
954 return pages(bytes) << kPageShift;
956 // Small object: find the size class to which it belongs
957 return ByteSizeForClass(SizeClass(bytes));
962 kSpanCookieBits = 10,
963 kSpanCookieMask = (1 << 10) - 1,
967 static uint32_t spanValidationCookie;
968 static uint32_t spanInitializerCookie()
970 static uint32_t value = EntropySource<sizeof(uint32_t)>::value() & kSpanCookieMask;
971 spanValidationCookie = value;
975 // Information kept for a span (a contiguous run of pages).
977 PageID start; // Starting page number
978 Length length; // Number of pages in span
979 Span* next(uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_next, this, entropy); }
980 Span* remoteNext(const Span* remoteSpanPointer, uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_next, remoteSpanPointer, entropy); }
981 Span* prev(uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_prev, this, entropy); }
982 void setNext(Span* next, uintptr_t entropy) { m_next = XOR_MASK_PTR_WITH_KEY(next, this, entropy); }
983 void setPrev(Span* prev, uintptr_t entropy) { m_prev = XOR_MASK_PTR_WITH_KEY(prev, this, entropy); }
986 Span* m_next; // Used when in link list
987 Span* m_prev; // Used when in link list
989 HardenedSLL objects; // Linked list of free objects
990 unsigned int free : 1; // Is the span free
991 #ifndef NO_TCMALLOC_SAMPLES
992 unsigned int sample : 1; // Sampled object?
994 unsigned int sizeclass : 8; // Size-class for small objects (or 0)
995 unsigned int refcount : 11; // Number of non-free objects
996 bool decommitted : 1;
999 m_cookie = ((reinterpret_cast<uintptr_t>(this) >> kSpanThisShift) & kSpanCookieMask) ^ spanInitializerCookie();
1001 void clearCookie() { m_cookie = 0; }
1002 bool isValid() const
1004 return (((reinterpret_cast<uintptr_t>(this) >> kSpanThisShift) & kSpanCookieMask) ^ m_cookie) == spanValidationCookie;
1007 uint32_t m_cookie : kSpanCookieBits;
1011 // For debugging, we can keep a log events per span
1018 #define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
1021 void Event(Span* span, char op, int v = 0) {
1022 span->history[span->nexthistory] = op;
1023 span->value[span->nexthistory] = v;
1024 span->nexthistory++;
1025 if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
1028 #define Event(s,o,v) ((void) 0)
1031 // Allocator/deallocator for spans
1032 static PageHeapAllocator<Span> span_allocator;
1033 static Span* NewSpan(PageID p, Length len) {
1034 Span* result = span_allocator.New();
1035 memset(result, 0, sizeof(*result));
1037 result->length = len;
1038 result->initCookie();
1040 result->nexthistory = 0;
1045 static inline void DeleteSpan(Span* span) {
1046 RELEASE_ASSERT(span->isValid());
1048 // In debug mode, trash the contents of deleted Spans
1049 memset(span, 0x3f, sizeof(*span));
1051 span->clearCookie();
1052 span_allocator.Delete(span);
1055 // -------------------------------------------------------------------------
1056 // Doubly linked list of spans.
1057 // -------------------------------------------------------------------------
1059 static inline void DLL_Init(Span* list, uintptr_t entropy) {
1060 list->setNext(list, entropy);
1061 list->setPrev(list, entropy);
1064 static inline void DLL_Remove(Span* span, uintptr_t entropy) {
1065 span->prev(entropy)->setNext(span->next(entropy), entropy);
1066 span->next(entropy)->setPrev(span->prev(entropy), entropy);
1067 span->setPrev(NULL, entropy);
1068 span->setNext(NULL, entropy);
1071 static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list, uintptr_t entropy) {
1072 return list->next(entropy) == list;
1075 static int DLL_Length(const Span* list, uintptr_t entropy) {
1077 for (Span* s = list->next(entropy); s != list; s = s->next(entropy)) {
1083 #if 0 /* Not needed at the moment -- causes compiler warnings if not used */
1084 static void DLL_Print(const char* label, const Span* list) {
1085 MESSAGE("%-10s %p:", label, list);
1086 for (const Span* s = list->next; s != list; s = s->next) {
1087 MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
1093 static inline void DLL_Prepend(Span* list, Span* span, uintptr_t entropy) {
1094 span->setNext(list->next(entropy), entropy);
1095 span->setPrev(list, entropy);
1096 list->next(entropy)->setPrev(span, entropy);
1097 list->setNext(span, entropy);
1100 //-------------------------------------------------------------------
1101 // Data kept per size-class in central cache
1102 //-------------------------------------------------------------------
1104 class TCMalloc_Central_FreeList {
1106 void Init(size_t cl, uintptr_t entropy);
1108 // These methods all do internal locking.
1110 // Insert the specified range into the central freelist. N is the number of
1111 // elements in the range.
1112 void InsertRange(HardenedSLL start, HardenedSLL end, int N);
1114 // Returns the actual number of fetched elements into N.
1115 void RemoveRange(HardenedSLL* start, HardenedSLL* end, int *N);
1117 // Returns the number of free objects in cache.
1119 SpinLockHolder h(&lock_);
1123 // Returns the number of free objects in the transfer cache.
1125 SpinLockHolder h(&lock_);
1126 return used_slots_ * num_objects_to_move[size_class_];
1129 template <class Finder, class Reader>
1130 void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList)
1133 static const ptrdiff_t emptyOffset = reinterpret_cast<const char*>(&empty_) - reinterpret_cast<const char*>(this);
1134 Span* remoteEmpty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + emptyOffset);
1135 Span* remoteSpan = nonempty_.remoteNext(remoteEmpty, entropy_);
1136 for (Span* span = reader(remoteEmpty); span && span != &empty_; remoteSpan = span->remoteNext(remoteSpan, entropy_), span = (remoteSpan ? reader(remoteSpan) : 0))
1137 ASSERT(!span->objects);
1140 ASSERT(!nonempty_.objects);
1141 static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonempty_) - reinterpret_cast<const char*>(this);
1143 Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset);
1144 Span* remoteSpan = nonempty_.remoteNext(remoteNonempty, entropy_);
1146 for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->remoteNext(remoteSpan, entropy_), span = (remoteSpan ? reader(remoteSpan) : 0)) {
1147 for (HardenedSLL nextObject = span->objects; nextObject; nextObject.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), entropy_))) {
1148 finder.visit(nextObject.value());
1153 uintptr_t entropy() const { return entropy_; }
1155 // REQUIRES: lock_ is held
1156 // Remove object from cache and return.
1157 // Return NULL if no free entries in cache.
1158 HardenedSLL FetchFromSpans();
1160 // REQUIRES: lock_ is held
1161 // Remove object from cache and return. Fetches
1162 // from pageheap if cache is empty. Only returns
1163 // NULL on allocation failure.
1164 HardenedSLL FetchFromSpansSafe();
1166 // REQUIRES: lock_ is held
1167 // Release a linked list of objects to spans.
1168 // May temporarily release lock_.
1169 void ReleaseListToSpans(HardenedSLL start);
1171 // REQUIRES: lock_ is held
1172 // Release an object to spans.
1173 // May temporarily release lock_.
1174 ALWAYS_INLINE void ReleaseToSpans(HardenedSLL object);
1176 // REQUIRES: lock_ is held
1177 // Populate cache by fetching from the page heap.
1178 // May temporarily release lock_.
1179 ALWAYS_INLINE void Populate();
1181 // REQUIRES: lock is held.
1182 // Tries to make room for a TCEntry. If the cache is full it will try to
1183 // expand it at the cost of some other cache size. Return false if there is
1185 bool MakeCacheSpace();
1187 // REQUIRES: lock_ for locked_size_class is held.
1188 // Picks a "random" size class to steal TCEntry slot from. In reality it
1189 // just iterates over the sizeclasses but does so without taking a lock.
1190 // Returns true on success.
1191 // May temporarily lock a "random" size class.
1192 static ALWAYS_INLINE bool EvictRandomSizeClass(size_t locked_size_class, bool force);
1194 // REQUIRES: lock_ is *not* held.
1195 // Tries to shrink the Cache. If force is true it will relase objects to
1196 // spans if it allows it to shrink the cache. Return false if it failed to
1197 // shrink the cache. Decrements cache_size_ on succeess.
1198 // May temporarily take lock_. If it takes lock_, the locked_size_class
1199 // lock is released to the thread from holding two size class locks
1200 // concurrently which could lead to a deadlock.
1201 bool ShrinkCache(int locked_size_class, bool force);
1203 // This lock protects all the data members. cached_entries and cache_size_
1204 // may be looked at without holding the lock.
1207 // We keep linked lists of empty and non-empty spans.
1208 size_t size_class_; // My size class
1209 Span empty_; // Dummy header for list of empty spans
1210 Span nonempty_; // Dummy header for list of non-empty spans
1211 size_t counter_; // Number of free objects in cache entry
1213 // Here we reserve space for TCEntry cache slots. Since one size class can
1214 // end up getting all the TCEntries quota in the system we just preallocate
1215 // sufficient number of entries here.
1216 TCEntry tc_slots_[kNumTransferEntries];
1218 // Number of currently used cached entries in tc_slots_. This variable is
1219 // updated under a lock but can be read without one.
1220 int32_t used_slots_;
1221 // The current number of slots for this size class. This is an
1222 // adaptive value that is increased if there is lots of traffic
1223 // on a given size class.
1224 int32_t cache_size_;
1228 #if COMPILER(CLANG) && defined(__has_warning)
1229 #pragma clang diagnostic push
1230 #if __has_warning("-Wunused-private-field")
1231 #pragma clang diagnostic ignored "-Wunused-private-field"
1235 // Pad each CentralCache object to multiple of 64 bytes
1236 template <size_t SizeToPad>
1237 class TCMalloc_Central_FreeListPadded_Template : public TCMalloc_Central_FreeList {
1239 char pad[64 - SizeToPad];
1242 // Zero-size specialization to avoid compiler error when TCMalloc_Central_FreeList happens
1243 // to be exactly 64 bytes.
1244 template <> class TCMalloc_Central_FreeListPadded_Template<0> : public TCMalloc_Central_FreeList {
1247 typedef TCMalloc_Central_FreeListPadded_Template<sizeof(TCMalloc_Central_FreeList) % 64> TCMalloc_Central_FreeListPadded;
1249 #if COMPILER(CLANG) && defined(__has_warning)
1250 #pragma clang diagnostic pop
1255 class TCMalloc_PageHeap;
1256 class TCMalloc_ThreadCache;
1257 template <typename T> class PageHeapAllocator;
1259 class FastMallocZone {
1263 static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address_t zoneAddress, memory_reader_t, vm_range_recorder_t);
1264 static size_t goodSize(malloc_zone_t*, size_t size) { return size; }
1265 static boolean_t check(malloc_zone_t*) { return true; }
1266 static void print(malloc_zone_t*, boolean_t) { }
1267 static void log(malloc_zone_t*, void*) { }
1268 static void forceLock(malloc_zone_t*) { }
1269 static void forceUnlock(malloc_zone_t*) { }
1270 static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); }
1273 FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*);
1274 static size_t size(malloc_zone_t*, const void*);
1275 static void* zoneMalloc(malloc_zone_t*, size_t);
1276 static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size);
1277 static void zoneFree(malloc_zone_t*, void*);
1278 static void* zoneRealloc(malloc_zone_t*, void*, size_t);
1279 static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
1280 static void zoneDestroy(malloc_zone_t*) { }
1282 malloc_zone_t m_zone;
1283 TCMalloc_PageHeap* m_pageHeap;
1284 TCMalloc_ThreadCache** m_threadHeaps;
1285 TCMalloc_Central_FreeListPadded* m_centralCaches;
1286 PageHeapAllocator<Span>* m_spanAllocator;
1287 PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator;
1292 // Even if we have support for thread-local storage in the compiler
1293 // and linker, the OS may not support it. We need to check that at
1294 // runtime. Right now, we have to keep a manual set of "bad" OSes.
1295 #if defined(HAVE_TLS)
1296 static bool kernel_supports_tls = false; // be conservative
1297 static inline bool KernelSupportsTLS() {
1298 return kernel_supports_tls;
1300 # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
1301 static void CheckIfKernelSupportsTLS() {
1302 kernel_supports_tls = false;
1305 # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
1306 static void CheckIfKernelSupportsTLS() {
1308 if (uname(&buf) != 0) { // should be impossible
1309 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno);
1310 kernel_supports_tls = false;
1311 } else if (strcasecmp(buf.sysname, "linux") == 0) {
1312 // The linux case: the first kernel to support TLS was 2.6.0
1313 if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x
1314 kernel_supports_tls = false;
1315 else if (buf.release[0] == '2' && buf.release[1] == '.' &&
1316 buf.release[2] >= '0' && buf.release[2] < '6' &&
1317 buf.release[3] == '.') // 2.0 - 2.5
1318 kernel_supports_tls = false;
1320 kernel_supports_tls = true;
1321 } else { // some other kernel, we'll be optimisitic
1322 kernel_supports_tls = true;
1324 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
1326 # endif // HAVE_DECL_UNAME
1329 // __THROW is defined in glibc systems. It means, counter-intuitively,
1330 // "This function will never throw an exception." It's an optional
1331 // optimization tool, but we may need to use it to match glibc prototypes.
1332 #ifndef __THROW // I guess we're not on a glibc system
1333 # define __THROW // __THROW is just an optimization, so ok to make it ""
1336 // -------------------------------------------------------------------------
1337 // Stack traces kept for sampled allocations
1338 // The following state is protected by pageheap_lock_.
1339 // -------------------------------------------------------------------------
1341 // size/depth are made the same size as a pointer so that some generic
1342 // code below can conveniently cast them back and forth to void*.
1343 static const int kMaxStackDepth = 31;
1345 uintptr_t size; // Size of object
1346 uintptr_t depth; // Number of PC values stored in array below
1347 void* stack[kMaxStackDepth];
1349 static PageHeapAllocator<StackTrace> stacktrace_allocator;
1350 static Span sampled_objects;
1352 // -------------------------------------------------------------------------
1353 // Map from page-id to per-page data
1354 // -------------------------------------------------------------------------
1356 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
1357 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
1358 // because sometimes the sizeclass is all the information we need.
1360 // Selector class -- general selector uses 3-level map
1361 template <int BITS> class MapSelector {
1363 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
1364 typedef PackedCache<BITS, uint64_t> CacheType;
1368 // On all known X86-64 platforms, the upper 16 bits are always unused and therefore
1369 // can be excluded from the PageMap key.
1370 // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
1372 static const size_t kBitsUnusedOn64Bit = 16;
1374 static const size_t kBitsUnusedOn64Bit = 0;
1377 // A three-level map for 64-bit machines
1378 template <> class MapSelector<64> {
1380 typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type;
1381 typedef PackedCache<64, uint64_t> CacheType;
1384 // A two-level map for 32-bit machines
1385 template <> class MapSelector<32> {
1387 typedef TCMalloc_PageMap2<32 - kPageShift> Type;
1388 typedef PackedCache<32 - kPageShift, uint16_t> CacheType;
1391 // -------------------------------------------------------------------------
1392 // Page-level allocator
1393 // * Eager coalescing
1395 // Heap for page-level allocation. We allow allocating and freeing a
1396 // contiguous runs of pages (called a "span").
1397 // -------------------------------------------------------------------------
1399 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1400 // The page heap maintains a free list for spans that are no longer in use by
1401 // the central cache or any thread caches. We use a background thread to
1402 // periodically scan the free list and release a percentage of it back to the OS.
1404 // If free_committed_pages_ exceeds kMinimumFreeCommittedPageCount, the
1405 // background thread:
1407 // - pauses for kScavengeDelayInSeconds
1408 // - returns to the OS a percentage of the memory that remained unused during
1409 // that pause (kScavengePercentage * min_free_committed_pages_since_last_scavenge_)
1410 // The goal of this strategy is to reduce memory pressure in a timely fashion
1411 // while avoiding thrashing the OS allocator.
1413 // Time delay before the page heap scavenger will consider returning pages to
1415 static const int kScavengeDelayInSeconds = 2;
1417 // Approximate percentage of free committed pages to return to the OS in one
1419 static const float kScavengePercentage = .5f;
1421 // number of span lists to keep spans in when memory is returned.
1422 static const int kMinSpanListsWithSpans = 32;
1424 // Number of free committed pages that we want to keep around. The minimum number of pages used when there
1425 // is 1 span in each of the first kMinSpanListsWithSpans spanlists. Currently 528 pages.
1426 static const size_t kMinimumFreeCommittedPageCount = kMinSpanListsWithSpans * ((1.0f+kMinSpanListsWithSpans) / 2.0f);
1430 static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
1432 class TCMalloc_PageHeap {
1436 // Allocate a run of "n" pages. Returns zero if out of memory.
1437 Span* New(Length n);
1439 // Delete the span "[p, p+n-1]".
1440 // REQUIRES: span was returned by earlier call to New() and
1441 // has not yet been deleted.
1442 void Delete(Span* span);
1444 // Mark an allocated span as being used for small objects of the
1445 // specified size-class.
1446 // REQUIRES: span was returned by an earlier call to New()
1447 // and has not yet been deleted.
1448 void RegisterSizeClass(Span* span, size_t sc);
1450 // Split an allocated span into two spans: one of length "n" pages
1451 // followed by another span of length "span->length - n" pages.
1452 // Modifies "*span" to point to the first span of length "n" pages.
1453 // Returns a pointer to the second span.
1455 // REQUIRES: "0 < n < span->length"
1456 // REQUIRES: !span->free
1457 // REQUIRES: span->sizeclass == 0
1458 Span* Split(Span* span, Length n);
1460 // Return the descriptor for the specified page.
1461 inline Span* GetDescriptor(PageID p) const {
1462 return reinterpret_cast<Span*>(pagemap_.get(p));
1465 inline Span* GetDescriptorEnsureSafe(PageID p)
1467 pagemap_.Ensure(p, 1);
1468 return GetDescriptor(p);
1471 size_t ReturnedBytes() const;
1473 // Return number of bytes allocated from system
1474 inline uint64_t SystemBytes() const { return system_bytes_; }
1476 // Return number of free bytes in heap
1477 uint64_t FreeBytes() const {
1478 return (static_cast<uint64_t>(free_pages_) << kPageShift);
1482 size_t CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted);
1484 // Release all pages on the free list for reuse by the OS:
1485 void ReleaseFreePages();
1486 void ReleaseFreeList(Span*, Span*);
1488 // Return 0 if we have no information, or else the correct sizeclass for p.
1489 // Reads and writes to pagemap_cache_ do not require locking.
1490 // The entries are 64 bits on 64-bit hardware and 16 bits on
1491 // 32-bit hardware, and we don't mind raciness as long as each read of
1492 // an entry yields a valid entry, not a partially updated entry.
1493 size_t GetSizeClassIfCached(PageID p) const {
1494 return pagemap_cache_.GetOrDefault(p, 0);
1496 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
1499 // Pick the appropriate map and cache types based on pointer size
1500 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
1501 typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
1503 mutable PageMapCache pagemap_cache_;
1505 // We segregate spans of a given size into two circular linked
1506 // lists: one for normal spans, and one for spans whose memory
1507 // has been returned to the system.
1513 // List of free spans of length >= kMaxPages
1516 // Array mapping from span length to a doubly linked list of free spans
1517 SpanList free_[kMaxPages];
1519 // Number of pages kept in free lists
1520 uintptr_t free_pages_;
1522 // Used for hardening
1525 // Bytes allocated from system
1526 uint64_t system_bytes_;
1528 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1529 // Number of pages kept in free lists that are still committed.
1530 Length free_committed_pages_;
1532 // Minimum number of free committed pages since last scavenge. (Can be 0 if
1533 // we've committed new pages since the last scavenge.)
1534 Length min_free_committed_pages_since_last_scavenge_;
1537 bool GrowHeap(Length n);
1539 // REQUIRES span->length >= n
1540 // Remove span from its free list, and move any leftover part of
1541 // span into appropriate free lists. Also update "span" to have
1542 // length exactly "n" and mark it as non-free so it can be returned
1545 // "released" is true iff "span" was found on a "returned" list.
1546 void Carve(Span* span, Length n, bool released);
1548 void RecordSpan(Span* span) {
1549 pagemap_.set(span->start, span);
1550 if (span->length > 1) {
1551 pagemap_.set(span->start + span->length - 1, span);
1555 // Allocate a large span of length == n. If successful, returns a
1556 // span of exactly the specified length. Else, returns NULL.
1557 Span* AllocLarge(Length n);
1559 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1560 // Incrementally release some memory to the system.
1561 // IncrementalScavenge(n) is called whenever n pages are freed.
1562 void IncrementalScavenge(Length n);
1565 // Number of pages to deallocate before doing more scavenging
1566 int64_t scavenge_counter_;
1568 // Index of last free list we scavenged
1569 size_t scavenge_index_;
1572 friend class FastMallocZone;
1575 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1576 void initializeScavenger();
1577 ALWAYS_INLINE void signalScavenger();
1579 ALWAYS_INLINE bool shouldScavenge() const;
1581 #if HAVE(DISPATCH_H) || OS(WIN)
1582 void periodicScavenge();
1583 ALWAYS_INLINE bool isScavengerSuspended();
1584 ALWAYS_INLINE void scheduleScavenger();
1585 ALWAYS_INLINE void rescheduleScavenger();
1586 ALWAYS_INLINE void suspendScavenger();
1589 #if HAVE(DISPATCH_H)
1590 dispatch_queue_t m_scavengeQueue;
1591 dispatch_source_t m_scavengeTimer;
1592 bool m_scavengingSuspended;
1594 static void CALLBACK scavengerTimerFired(void*, BOOLEAN);
1595 HANDLE m_scavengeQueueTimer;
1597 static NO_RETURN_WITH_VALUE void* runScavengerThread(void*);
1598 NO_RETURN void scavengerThread();
1600 // Keeps track of whether the background thread is actively scavenging memory every kScavengeDelayInSeconds, or
1601 // it's blocked waiting for more pages to be deleted.
1602 bool m_scavengeThreadActive;
1604 pthread_mutex_t m_scavengeMutex;
1605 pthread_cond_t m_scavengeCondition;
1608 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1611 void TCMalloc_PageHeap::init()
1613 pagemap_.init(MetaDataAlloc);
1614 pagemap_cache_ = PageMapCache(0);
1617 entropy_ = HARDENING_ENTROPY;
1619 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1620 free_committed_pages_ = 0;
1621 min_free_committed_pages_since_last_scavenge_ = 0;
1622 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1624 scavenge_counter_ = 0;
1625 // Start scavenging at kMaxPages list
1626 scavenge_index_ = kMaxPages-1;
1627 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
1628 DLL_Init(&large_.normal, entropy_);
1629 DLL_Init(&large_.returned, entropy_);
1630 for (size_t i = 0; i < kMaxPages; i++) {
1631 DLL_Init(&free_[i].normal, entropy_);
1632 DLL_Init(&free_[i].returned, entropy_);
1635 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1636 initializeScavenger();
1637 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1640 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1642 #if HAVE(DISPATCH_H)
1644 void TCMalloc_PageHeap::initializeScavenger()
1646 m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL);
1647 m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue);
1648 uint64_t scavengeDelayInNanoseconds = kScavengeDelayInSeconds * NSEC_PER_SEC;
1649 dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, scavengeDelayInNanoseconds);
1650 dispatch_source_set_timer(m_scavengeTimer, startTime, scavengeDelayInNanoseconds, scavengeDelayInNanoseconds / 10);
1651 dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); });
1652 m_scavengingSuspended = true;
1655 ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
1657 ASSERT(pageheap_lock.IsHeld());
1658 return m_scavengingSuspended;
1661 ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
1663 ASSERT(pageheap_lock.IsHeld());
1664 m_scavengingSuspended = false;
1665 dispatch_resume(m_scavengeTimer);
1668 ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
1670 // Nothing to do here for libdispatch.
1673 ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
1675 ASSERT(pageheap_lock.IsHeld());
1676 m_scavengingSuspended = true;
1677 dispatch_suspend(m_scavengeTimer);
1682 void TCMalloc_PageHeap::scavengerTimerFired(void* context, BOOLEAN)
1684 static_cast<TCMalloc_PageHeap*>(context)->periodicScavenge();
1687 void TCMalloc_PageHeap::initializeScavenger()
1689 m_scavengeQueueTimer = 0;
1692 ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
1694 ASSERT(pageheap_lock.IsHeld());
1695 return !m_scavengeQueueTimer;
1698 ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
1700 // We need to use WT_EXECUTEONLYONCE here and reschedule the timer, because
1701 // Windows will fire the timer event even when the function is already running.
1702 ASSERT(pageheap_lock.IsHeld());
1703 CreateTimerQueueTimer(&m_scavengeQueueTimer, 0, scavengerTimerFired, this, kScavengeDelayInSeconds * 1000, 0, WT_EXECUTEONLYONCE);
1706 ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
1708 // We must delete the timer and create it again, because it is not possible to retrigger a timer on Windows.
1710 scheduleScavenger();
1713 ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
1715 ASSERT(pageheap_lock.IsHeld());
1716 HANDLE scavengeQueueTimer = m_scavengeQueueTimer;
1717 m_scavengeQueueTimer = 0;
1718 DeleteTimerQueueTimer(0, scavengeQueueTimer, 0);
1723 void TCMalloc_PageHeap::initializeScavenger()
1725 // Create a non-recursive mutex.
1726 #if !defined(PTHREAD_MUTEX_NORMAL) || PTHREAD_MUTEX_NORMAL == PTHREAD_MUTEX_DEFAULT
1727 pthread_mutex_init(&m_scavengeMutex, 0);
1729 pthread_mutexattr_t attr;
1730 pthread_mutexattr_init(&attr);
1731 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
1733 pthread_mutex_init(&m_scavengeMutex, &attr);
1735 pthread_mutexattr_destroy(&attr);
1738 pthread_cond_init(&m_scavengeCondition, 0);
1739 m_scavengeThreadActive = true;
1741 pthread_create(&thread, 0, runScavengerThread, this);
1744 void* TCMalloc_PageHeap::runScavengerThread(void* context)
1746 static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
1748 // Without this, Visual Studio will complain that this method does not return a value.
1753 ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
1755 // shouldScavenge() should be called only when the pageheap_lock spinlock is held, additionally,
1756 // m_scavengeThreadActive is only set to false whilst pageheap_lock is held. The caller must ensure this is
1757 // taken prior to calling this method. If the scavenger thread is sleeping and shouldScavenge() indicates there
1758 // is memory to free the scavenger thread is signalled to start.
1759 ASSERT(pageheap_lock.IsHeld());
1760 if (!m_scavengeThreadActive && shouldScavenge())
1761 pthread_cond_signal(&m_scavengeCondition);
1766 void TCMalloc_PageHeap::scavenge()
1768 size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kScavengePercentage;
1769 size_t targetPageCount = std::max<size_t>(kMinimumFreeCommittedPageCount, free_committed_pages_ - pagesToRelease);
1771 Length lastFreeCommittedPages = free_committed_pages_;
1772 while (free_committed_pages_ > targetPageCount) {
1774 for (int i = kMaxPages; i > 0 && free_committed_pages_ >= targetPageCount; i--) {
1775 SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
1776 // If the span size is bigger than kMinSpanListsWithSpans pages return all the spans in the list, else return all but 1 span.
1777 // Return only 50% of a spanlist at a time so spans of size 1 are not the only ones left.
1778 size_t length = DLL_Length(&slist->normal, entropy_);
1779 size_t numSpansToReturn = (i > kMinSpanListsWithSpans) ? length : length / 2;
1780 for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_IsEmpty(&slist->normal, entropy_) && free_committed_pages_ > targetPageCount; j++) {
1781 Span* s = slist->normal.prev(entropy_);
1782 DLL_Remove(s, entropy_);
1783 ASSERT(!s->decommitted);
1784 if (!s->decommitted) {
1785 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
1786 static_cast<size_t>(s->length << kPageShift));
1787 ASSERT(free_committed_pages_ >= s->length);
1788 free_committed_pages_ -= s->length;
1789 s->decommitted = true;
1791 DLL_Prepend(&slist->returned, s, entropy_);
1795 if (lastFreeCommittedPages == free_committed_pages_)
1797 lastFreeCommittedPages = free_committed_pages_;
1800 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
1803 ALWAYS_INLINE bool TCMalloc_PageHeap::shouldScavenge() const
1805 return free_committed_pages_ > kMinimumFreeCommittedPageCount;
1808 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1810 inline Span* TCMalloc_PageHeap::New(Length n) {
1814 // Find first size >= n that has a non-empty list
1815 for (Length s = n; s < kMaxPages; s++) {
1817 bool released = false;
1818 if (!DLL_IsEmpty(&free_[s].normal, entropy_)) {
1819 // Found normal span
1820 ll = &free_[s].normal;
1821 } else if (!DLL_IsEmpty(&free_[s].returned, entropy_)) {
1822 // Found returned span; reallocate it
1823 ll = &free_[s].returned;
1826 // Keep looking in larger classes
1830 Span* result = ll->next(entropy_);
1831 Carve(result, n, released);
1832 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1833 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1834 // free committed pages count.
1835 ASSERT(free_committed_pages_ >= n);
1836 free_committed_pages_ -= n;
1837 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
1838 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
1839 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1845 Span* result = AllocLarge(n);
1846 if (result != NULL) {
1847 ASSERT_SPAN_COMMITTED(result);
1851 // Grow the heap and try again
1860 Span* TCMalloc_PageHeap::AllocLarge(Length n) {
1861 // find the best span (closest to n in size).
1862 // The following loops implements address-ordered best-fit.
1863 bool from_released = false;
1866 // Search through normal list
1867 for (Span* span = large_.normal.next(entropy_);
1868 span != &large_.normal;
1869 span = span->next(entropy_)) {
1870 if (span->length >= n) {
1872 || (span->length < best->length)
1873 || ((span->length == best->length) && (span->start < best->start))) {
1875 from_released = false;
1880 // Search through released list in case it has a better fit
1881 for (Span* span = large_.returned.next(entropy_);
1882 span != &large_.returned;
1883 span = span->next(entropy_)) {
1884 if (span->length >= n) {
1886 || (span->length < best->length)
1887 || ((span->length == best->length) && (span->start < best->start))) {
1889 from_released = true;
1895 Carve(best, n, from_released);
1896 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1897 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1898 // free committed pages count.
1899 ASSERT(free_committed_pages_ >= n);
1900 free_committed_pages_ -= n;
1901 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
1902 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
1903 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1911 Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
1913 ASSERT(n < span->length);
1914 ASSERT(!span->free);
1915 ASSERT(span->sizeclass == 0);
1916 Event(span, 'T', n);
1918 const Length extra = span->length - n;
1919 Span* leftover = NewSpan(span->start + n, extra);
1920 Event(leftover, 'U', extra);
1921 RecordSpan(leftover);
1922 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
1928 inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
1930 DLL_Remove(span, entropy_);
1932 Event(span, 'A', n);
1935 // If the span chosen to carve from is decommited, commit the entire span at once to avoid committing spans 1 page at a time.
1936 ASSERT(span->decommitted);
1937 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), static_cast<size_t>(span->length << kPageShift));
1938 span->decommitted = false;
1939 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1940 free_committed_pages_ += span->length;
1944 const int extra = static_cast<int>(span->length - n);
1947 Span* leftover = NewSpan(span->start + n, extra);
1949 leftover->decommitted = false;
1950 Event(leftover, 'S', extra);
1951 RecordSpan(leftover);
1953 // Place leftover span on appropriate free list
1954 SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_;
1955 Span* dst = &listpair->normal;
1956 DLL_Prepend(dst, leftover, entropy_);
1959 pagemap_.set(span->start + n - 1, span);
1963 static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other)
1965 if (destination->decommitted && !other->decommitted) {
1966 TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShift),
1967 static_cast<size_t>(other->length << kPageShift));
1968 } else if (other->decommitted && !destination->decommitted) {
1969 TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPageShift),
1970 static_cast<size_t>(destination->length << kPageShift));
1971 destination->decommitted = true;
1975 inline void TCMalloc_PageHeap::Delete(Span* span) {
1977 ASSERT(!span->free);
1978 ASSERT(span->length > 0);
1979 ASSERT(GetDescriptor(span->start) == span);
1980 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
1981 span->sizeclass = 0;
1982 #ifndef NO_TCMALLOC_SAMPLES
1986 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
1987 // necessary. We do not bother resetting the stale pagemap
1988 // entries for the pieces we are merging together because we only
1989 // care about the pagemap entries for the boundaries.
1990 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1991 // Track the total size of the neighboring free spans that are committed.
1992 Length neighboringCommittedSpansLength = 0;
1994 const PageID p = span->start;
1995 const Length n = span->length;
1996 Span* prev = GetDescriptor(p-1);
1997 if (prev != NULL && prev->free) {
1998 // Merge preceding span into this span
1999 ASSERT(prev->start + prev->length == p);
2000 const Length len = prev->length;
2001 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2002 if (!prev->decommitted)
2003 neighboringCommittedSpansLength += len;
2005 mergeDecommittedStates(span, prev);
2006 DLL_Remove(prev, entropy_);
2009 span->length += len;
2010 pagemap_.set(span->start, span);
2011 Event(span, 'L', len);
2013 Span* next = GetDescriptor(p+n);
2014 if (next != NULL && next->free) {
2015 // Merge next span into this span
2016 ASSERT(next->start == p+n);
2017 const Length len = next->length;
2018 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2019 if (!next->decommitted)
2020 neighboringCommittedSpansLength += len;
2022 mergeDecommittedStates(span, next);
2023 DLL_Remove(next, entropy_);
2025 span->length += len;
2026 pagemap_.set(span->start + span->length - 1, span);
2027 Event(span, 'R', len);
2030 Event(span, 'D', span->length);
2032 if (span->decommitted) {
2033 if (span->length < kMaxPages)
2034 DLL_Prepend(&free_[span->length].returned, span, entropy_);
2036 DLL_Prepend(&large_.returned, span, entropy_);
2038 if (span->length < kMaxPages)
2039 DLL_Prepend(&free_[span->length].normal, span, entropy_);
2041 DLL_Prepend(&large_.normal, span, entropy_);
2045 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2046 if (span->decommitted) {
2047 // If the merged span is decommitted, that means we decommitted any neighboring spans that were
2048 // committed. Update the free committed pages count.
2049 free_committed_pages_ -= neighboringCommittedSpansLength;
2050 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
2051 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
2053 // If the merged span remains committed, add the deleted span's size to the free committed pages count.
2054 free_committed_pages_ += n;
2057 // Make sure the scavenge thread becomes active if we have enough freed pages to release some back to the system.
2060 IncrementalScavenge(n);
2066 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2067 void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
2068 // Fast path; not yet time to release memory
2069 scavenge_counter_ -= n;
2070 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
2072 // If there is nothing to release, wait for so many pages before
2073 // scavenging again. With 4K pages, this comes to 16MB of memory.
2074 static const size_t kDefaultReleaseDelay = 1 << 8;
2076 // Find index of free list to scavenge
2077 size_t index = scavenge_index_ + 1;
2078 uintptr_t entropy = entropy_;
2079 for (size_t i = 0; i < kMaxPages+1; i++) {
2080 if (index > kMaxPages) index = 0;
2081 SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
2082 if (!DLL_IsEmpty(&slist->normal, entropy)) {
2083 // Release the last span on the normal portion of this list
2084 Span* s = slist->normal.prev(entropy);
2085 DLL_Remove(s, entropy_);
2086 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
2087 static_cast<size_t>(s->length << kPageShift));
2088 s->decommitted = true;
2089 DLL_Prepend(&slist->returned, s, entropy);
2091 scavenge_counter_ = std::max<size_t>(64UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay)));
2093 if (index == kMaxPages && !DLL_IsEmpty(&slist->normal, entropy))
2094 scavenge_index_ = index - 1;
2096 scavenge_index_ = index;
2102 // Nothing to scavenge, delay for a while
2103 scavenge_counter_ = kDefaultReleaseDelay;
2107 void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
2108 // Associate span object with all interior pages as well
2109 ASSERT(!span->free);
2110 ASSERT(GetDescriptor(span->start) == span);
2111 ASSERT(GetDescriptor(span->start+span->length-1) == span);
2112 Event(span, 'C', sc);
2113 span->sizeclass = static_cast<unsigned int>(sc);
2114 for (Length i = 1; i < span->length-1; i++) {
2115 pagemap_.set(span->start+i, span);
2119 size_t TCMalloc_PageHeap::ReturnedBytes() const {
2121 for (unsigned s = 0; s < kMaxPages; s++) {
2122 const int r_length = DLL_Length(&free_[s].returned, entropy_);
2123 unsigned r_pages = s * r_length;
2124 result += r_pages << kPageShift;
2127 for (Span* s = large_.returned.next(entropy_); s != &large_.returned; s = s->next(entropy_))
2128 result += s->length << kPageShift;
2132 bool TCMalloc_PageHeap::GrowHeap(Length n) {
2133 ASSERT(kMaxPages >= kMinSystemAlloc);
2134 if (n > kMaxValidPages) return false;
2135 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
2137 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
2140 // Try growing just "n" pages
2142 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
2144 if (ptr == NULL) return false;
2146 ask = actual_size >> kPageShift;
2148 uint64_t old_system_bytes = system_bytes_;
2149 system_bytes_ += (ask << kPageShift);
2150 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
2153 // If we have already a lot of pages allocated, just pre allocate a bunch of
2154 // memory for the page map. This prevents fragmentation by pagemap metadata
2155 // when a program keeps allocating and freeing large blocks.
2157 if (old_system_bytes < kPageMapBigAllocationThreshold
2158 && system_bytes_ >= kPageMapBigAllocationThreshold) {
2159 pagemap_.PreallocateMoreMemory();
2162 // Make sure pagemap_ has entries for all of the new pages.
2163 // Plus ensure one before and one after so coalescing code
2164 // does not need bounds-checking.
2165 if (pagemap_.Ensure(p-1, ask+2)) {
2166 // Pretend the new area is allocated and then Delete() it to
2167 // cause any necessary coalescing to occur.
2169 // We do not adjust free_pages_ here since Delete() will do it for us.
2170 Span* span = NewSpan(p, ask);
2176 // We could not allocate memory within "pagemap_"
2177 // TODO: Once we can return memory to the system, return the new span
2182 bool TCMalloc_PageHeap::Check() {
2183 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2184 size_t totalFreeCommitted = 0;
2186 ASSERT(free_[0].normal.next(entropy_) == &free_[0].normal);
2187 ASSERT(free_[0].returned.next(entropy_) == &free_[0].returned);
2188 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2189 totalFreeCommitted = CheckList(&large_.normal, kMaxPages, 1000000000, false);
2191 CheckList(&large_.normal, kMaxPages, 1000000000, false);
2193 CheckList(&large_.returned, kMaxPages, 1000000000, true);
2194 for (Length s = 1; s < kMaxPages; s++) {
2195 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2196 totalFreeCommitted += CheckList(&free_[s].normal, s, s, false);
2198 CheckList(&free_[s].normal, s, s, false);
2200 CheckList(&free_[s].returned, s, s, true);
2202 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2203 ASSERT(totalFreeCommitted == free_committed_pages_);
2209 size_t TCMalloc_PageHeap::CheckList(Span*, Length, Length, bool) {
2213 size_t TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted) {
2214 size_t freeCount = 0;
2215 for (Span* s = list->next(entropy_); s != list; s = s->next(entropy_)) {
2216 CHECK_CONDITION(s->free);
2217 CHECK_CONDITION(s->length >= min_pages);
2218 CHECK_CONDITION(s->length <= max_pages);
2219 CHECK_CONDITION(GetDescriptor(s->start) == s);
2220 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
2221 CHECK_CONDITION(s->decommitted == decommitted);
2222 freeCount += s->length;
2228 void TCMalloc_PageHeap::ReleaseFreeList(Span* list, Span* returned) {
2229 // Walk backwards through list so that when we push these
2230 // spans on the "returned" list, we preserve the order.
2231 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2232 size_t freePageReduction = 0;
2235 while (!DLL_IsEmpty(list, entropy_)) {
2236 Span* s = list->prev(entropy_);
2238 DLL_Remove(s, entropy_);
2239 s->decommitted = true;
2240 DLL_Prepend(returned, s, entropy_);
2241 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
2242 static_cast<size_t>(s->length << kPageShift));
2243 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2244 freePageReduction += s->length;
2248 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2249 free_committed_pages_ -= freePageReduction;
2250 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
2251 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
2255 void TCMalloc_PageHeap::ReleaseFreePages() {
2256 for (Length s = 0; s < kMaxPages; s++) {
2257 ReleaseFreeList(&free_[s].normal, &free_[s].returned);
2259 ReleaseFreeList(&large_.normal, &large_.returned);
2263 //-------------------------------------------------------------------
2265 //-------------------------------------------------------------------
2267 class TCMalloc_ThreadCache_FreeList {
2269 HardenedSLL list_; // Linked list of nodes
2270 uint16_t length_; // Current length
2271 uint16_t lowater_; // Low water mark for list length
2272 uintptr_t entropy_; // Entropy source for hardening
2275 void Init(uintptr_t entropy) {
2276 list_.setValue(NULL);
2280 #if ENABLE(TCMALLOC_HARDENING)
2285 // Return current length of list
2286 int length() const {
2291 bool empty() const {
2295 // Low-water mark management
2296 int lowwatermark() const { return lowater_; }
2297 void clear_lowwatermark() { lowater_ = length_; }
2299 ALWAYS_INLINE void Push(HardenedSLL ptr) {
2300 SLL_Push(&list_, ptr, entropy_);
2304 void PushRange(int N, HardenedSLL start, HardenedSLL end) {
2305 SLL_PushRange(&list_, start, end, entropy_);
2306 length_ = length_ + static_cast<uint16_t>(N);
2309 void PopRange(int N, HardenedSLL* start, HardenedSLL* end) {
2310 SLL_PopRange(&list_, N, start, end, entropy_);
2311 ASSERT(length_ >= N);
2312 length_ = length_ - static_cast<uint16_t>(N);
2313 if (length_ < lowater_) lowater_ = length_;
2316 ALWAYS_INLINE void* Pop() {
2319 if (length_ < lowater_) lowater_ = length_;
2320 return SLL_Pop(&list_, entropy_).value();
2323 // Runs through the linked list to ensure that
2324 // we can do that, and ensures that 'missing'
2326 NEVER_INLINE void Validate(HardenedSLL missing, size_t size) {
2327 HardenedSLL node = list_;
2330 RELEASE_ASSERT(node != missing);
2331 RELEASE_ASSERT(IS_DEFINITELY_POISONED(node.value(), size));
2332 node = SLL_Next(node, entropy_);
2336 template <class Finder, class Reader>
2337 void enumerateFreeObjects(Finder& finder, const Reader& reader)
2339 for (HardenedSLL nextObject = list_; nextObject; nextObject.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), entropy_)))
2340 finder.visit(nextObject.value());
2344 //-------------------------------------------------------------------
2345 // Data kept per thread
2346 //-------------------------------------------------------------------
2348 class TCMalloc_ThreadCache {
2350 typedef TCMalloc_ThreadCache_FreeList FreeList;
2352 typedef DWORD ThreadIdentifier;
2354 typedef pthread_t ThreadIdentifier;
2357 size_t size_; // Combined size of data
2358 ThreadIdentifier tid_; // Which thread owns it
2359 bool in_setspecific_; // Called pthread_setspecific?
2360 FreeList list_[kNumClasses]; // Array indexed by size-class
2362 // We sample allocations, biased by the size of the allocation
2363 uint32_t rnd_; // Cheap random number generator
2364 size_t bytes_until_sample_; // Bytes until we sample next
2366 uintptr_t entropy_; // Entropy value used for hardening
2368 // Allocate a new heap. REQUIRES: pageheap_lock is held.
2369 static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid, uintptr_t entropy);
2371 // Use only as pthread thread-specific destructor function.
2372 static void DestroyThreadCache(void* ptr);
2374 // All ThreadCache objects are kept in a linked list (for stats collection)
2375 TCMalloc_ThreadCache* next_;
2376 TCMalloc_ThreadCache* prev_;
2378 void Init(ThreadIdentifier tid, uintptr_t entropy);
2381 // Accessors (mostly just for printing stats)
2382 int freelist_length(size_t cl) const { return list_[cl].length(); }
2384 // Total byte size in cache
2385 size_t Size() const { return size_; }
2387 ALWAYS_INLINE void* Allocate(size_t size);
2388 void Deallocate(HardenedSLL ptr, size_t size_class);
2390 ALWAYS_INLINE void FetchFromCentralCache(size_t cl, size_t allocationSize);
2391 void ReleaseToCentralCache(size_t cl, int N);
2395 // Record allocation of "k" bytes. Return true iff allocation
2396 // should be sampled
2397 bool SampleAllocation(size_t k);
2399 // Pick next sampling point
2400 void PickNextSample(size_t k);
2402 static void InitModule();
2403 static void InitTSD();
2404 static TCMalloc_ThreadCache* GetThreadHeap();
2405 static TCMalloc_ThreadCache* GetCache();
2406 static TCMalloc_ThreadCache* GetCacheIfPresent();
2407 static TCMalloc_ThreadCache* CreateCacheIfNecessary();
2408 static void DeleteCache(TCMalloc_ThreadCache* heap);
2409 static void BecomeIdle();
2410 static void RecomputeThreadCacheSize();
2412 template <class Finder, class Reader>
2413 void enumerateFreeObjects(Finder& finder, const Reader& reader)
2415 for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++)
2416 list_[sizeClass].enumerateFreeObjects(finder, reader);
2420 //-------------------------------------------------------------------
2422 //-------------------------------------------------------------------
2424 // Central cache -- a collection of free-lists, one per size-class.
2425 // We have a separate lock per free-list to reduce contention.
2426 static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
2428 // Page-level allocator
2429 static AllocAlignmentInteger pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(AllocAlignmentInteger) - 1) / sizeof(AllocAlignmentInteger)];
2430 static bool phinited = false;
2432 // Avoid extra level of indirection by making "pageheap" be just an alias
2433 // of pageheap_memory.
2436 TCMalloc_PageHeap* m_pageHeap;
2439 static inline TCMalloc_PageHeap* getPageHeap()
2441 PageHeapUnion u = { &pageheap_memory[0] };
2442 return u.m_pageHeap;
2445 #define pageheap getPageHeap()
2447 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2449 #if HAVE(DISPATCH_H) || OS(WIN)
2451 void TCMalloc_PageHeap::periodicScavenge()
2453 SpinLockHolder h(&pageheap_lock);
2454 pageheap->scavenge();
2456 if (shouldScavenge()) {
2457 rescheduleScavenger();
2464 ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
2466 ASSERT(pageheap_lock.IsHeld());
2467 if (isScavengerSuspended() && shouldScavenge())
2468 scheduleScavenger();
2473 void TCMalloc_PageHeap::scavengerThread()
2475 #if HAVE(PTHREAD_SETNAME_NP)
2476 pthread_setname_np("JavaScriptCore: FastMalloc scavenger");
2480 pageheap_lock.Lock();
2481 if (!shouldScavenge()) {
2482 // Set to false so that signalScavenger() will check whether we need to be siganlled.
2483 m_scavengeThreadActive = false;
2485 // We need to unlock now, as this thread will block on the condvar until scavenging is required.
2486 pageheap_lock.Unlock();
2488 // Block until there are enough free committed pages to release back to the system.
2489 pthread_mutex_lock(&m_scavengeMutex);
2490 pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex);
2491 // After exiting the pthread_cond_wait, we hold the lock on m_scavengeMutex. Unlock it to prevent
2492 // deadlock next time round the loop.
2493 pthread_mutex_unlock(&m_scavengeMutex);
2495 // Set to true to prevent unnecessary signalling of the condvar.
2496 m_scavengeThreadActive = true;
2498 pageheap_lock.Unlock();
2500 // Wait for a while to calculate how much memory remains unused during this pause.
2501 sleep(kScavengeDelayInSeconds);
2504 SpinLockHolder h(&pageheap_lock);
2505 pageheap->scavenge();
2514 // If TLS is available, we also store a copy
2515 // of the per-thread object in a __thread variable
2516 // since __thread variables are faster to read
2517 // than pthread_getspecific(). We still need
2518 // pthread_setspecific() because __thread
2519 // variables provide no way to run cleanup
2520 // code when a thread is destroyed.
2522 static __thread TCMalloc_ThreadCache *threadlocal_heap;
2524 // Thread-specific key. Initialization here is somewhat tricky
2525 // because some Linux startup code invokes malloc() before it
2526 // is in a good enough state to handle pthread_keycreate().
2527 // Therefore, we use TSD keys only after tsd_inited is set to true.
2528 // Until then, we use a slow path to get the heap object.
2529 static bool tsd_inited = false;
2530 static pthread_key_t heap_key;
2532 DWORD tlsIndex = TLS_OUT_OF_INDEXES;
2535 static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap)
2537 // Still do pthread_setspecific even if there's an alternate form
2538 // of thread-local storage in use, to benefit from the delete callback.
2539 pthread_setspecific(heap_key, heap);
2542 TlsSetValue(tlsIndex, heap);
2546 // Allocator for thread heaps
2547 static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
2549 // Linked list of heap objects. Protected by pageheap_lock.
2550 static TCMalloc_ThreadCache* thread_heaps = NULL;
2551 static int thread_heap_count = 0;
2553 // Overall thread cache size. Protected by pageheap_lock.
2554 static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
2556 // Global per-thread cache size. Writes are protected by
2557 // pageheap_lock. Reads are done without any locking, which should be
2558 // fine as long as size_t can be written atomically and we don't place
2559 // invariants between this variable and other pieces of state.
2560 static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
2562 //-------------------------------------------------------------------
2563 // Central cache implementation
2564 //-------------------------------------------------------------------
2566 void TCMalloc_Central_FreeList::Init(size_t cl, uintptr_t entropy) {
2570 #if ENABLE(TCMALLOC_HARDENING)
2573 DLL_Init(&empty_, entropy_);
2574 DLL_Init(&nonempty_, entropy_);
2579 ASSERT(cache_size_ <= kNumTransferEntries);
2582 void TCMalloc_Central_FreeList::ReleaseListToSpans(HardenedSLL start) {
2584 HardenedSLL next = SLL_Next(start, entropy_);
2585 ReleaseToSpans(start);
2590 ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(HardenedSLL object) {
2591 const PageID p = reinterpret_cast<uintptr_t>(object.value()) >> kPageShift;
2592 Span* span = pageheap->GetDescriptor(p);
2593 ASSERT(span != NULL);
2594 ASSERT(span->refcount > 0);
2596 // If span is empty, move it to non-empty list
2597 if (!span->objects) {
2598 DLL_Remove(span, entropy_);
2599 DLL_Prepend(&nonempty_, span, entropy_);
2600 Event(span, 'N', 0);
2603 // The following check is expensive, so it is disabled by default
2605 // Check that object does not occur in list
2607 for (HardenedSLL p = span->objects; !p; SLL_Next(p, entropy_)) {
2608 ASSERT(p.value() != object.value());
2611 ASSERT(got + span->refcount ==
2612 (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
2617 if (span->refcount == 0) {
2618 Event(span, '#', 0);
2619 counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
2620 DLL_Remove(span, entropy_);
2622 // Release central list lock while operating on pageheap
2625 SpinLockHolder h(&pageheap_lock);
2626 pageheap->Delete(span);
2630 SLL_SetNext(object, span->objects, entropy_);
2631 span->objects.setValue(object.value());
2635 ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
2636 size_t locked_size_class, bool force) {
2637 static int race_counter = 0;
2638 int t = race_counter++; // Updated without a lock, but who cares.
2639 if (t >= static_cast<int>(kNumClasses)) {
2640 while (t >= static_cast<int>(kNumClasses)) {
2646 ASSERT(t < static_cast<int>(kNumClasses));
2647 if (t == static_cast<int>(locked_size_class)) return false;
2648 return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force);
2651 bool TCMalloc_Central_FreeList::MakeCacheSpace() {
2652 // Is there room in the cache?
2653 if (used_slots_ < cache_size_) return true;
2654 // Check if we can expand this cache?
2655 if (cache_size_ == kNumTransferEntries) return false;
2656 // Ok, we'll try to grab an entry from some other size class.
2657 if (EvictRandomSizeClass(size_class_, false) ||
2658 EvictRandomSizeClass(size_class_, true)) {
2659 // Succeeded in evicting, we're going to make our cache larger.
2668 class LockInverter {
2670 SpinLock *held_, *temp_;
2672 inline explicit LockInverter(SpinLock* held, SpinLock *temp)
2673 : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
2674 inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
2678 bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
2679 // Start with a quick check without taking a lock.
2680 if (cache_size_ == 0) return false;
2681 // We don't evict from a full cache unless we are 'forcing'.
2682 if (force == false && used_slots_ == cache_size_) return false;
2684 // Grab lock, but first release the other lock held by this thread. We use
2685 // the lock inverter to ensure that we never hold two size class locks
2686 // concurrently. That can create a deadlock because there is no well
2687 // defined nesting order.
2688 LockInverter li(¢ral_cache[locked_size_class].lock_, &lock_);
2689 ASSERT(used_slots_ <= cache_size_);
2690 ASSERT(0 <= cache_size_);
2691 if (cache_size_ == 0) return false;
2692 if (used_slots_ == cache_size_) {
2693 if (force == false) return false;
2694 // ReleaseListToSpans releases the lock, so we have to make all the
2695 // updates to the central list before calling it.
2698 ReleaseListToSpans(tc_slots_[used_slots_].head);
2705 void TCMalloc_Central_FreeList::InsertRange(HardenedSLL start, HardenedSLL end, int N) {
2706 SpinLockHolder h(&lock_);
2707 if (N == num_objects_to_move[size_class_] &&
2709 int slot = used_slots_++;
2711 ASSERT(slot < kNumTransferEntries);
2712 TCEntry *entry = &tc_slots_[slot];
2713 entry->head = start;
2717 ReleaseListToSpans(start);
2720 void TCMalloc_Central_FreeList::RemoveRange(HardenedSLL* start, HardenedSLL* end, int *N) {
2724 SpinLockHolder h(&lock_);
2725 if (num == num_objects_to_move[size_class_] && used_slots_ > 0) {
2726 int slot = --used_slots_;
2728 TCEntry *entry = &tc_slots_[slot];
2729 *start = entry->head;
2734 // TODO: Prefetch multiple TCEntries?
2735 HardenedSLL tail = FetchFromSpansSafe();
2737 // We are completely out of memory.
2738 *start = *end = HardenedSLL::null();
2743 SLL_SetNext(tail, HardenedSLL::null(), entropy_);
2744 HardenedSLL head = tail;
2746 while (count < num) {
2747 HardenedSLL t = FetchFromSpans();
2749 SLL_Push(&head, t, entropy_);
2758 HardenedSLL TCMalloc_Central_FreeList::FetchFromSpansSafe() {
2759 HardenedSLL t = FetchFromSpans();
2762 t = FetchFromSpans();
2767 HardenedSLL TCMalloc_Central_FreeList::FetchFromSpans() {
2768 if (DLL_IsEmpty(&nonempty_, entropy_)) return HardenedSLL::null();
2769 Span* span = nonempty_.next(entropy_);
2771 ASSERT(span->objects);
2772 ASSERT_SPAN_COMMITTED(span);
2774 HardenedSLL result = span->objects;
2775 span->objects = SLL_Next(result, entropy_);
2776 if (!span->objects) {
2777 // Move to empty list
2778 DLL_Remove(span, entropy_);
2779 DLL_Prepend(&empty_, span, entropy_);
2780 Event(span, 'E', 0);
2786 // Fetch memory from the system and add to the central cache freelist.
2787 ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() {
2788 // Release central list lock while operating on pageheap
2790 const size_t npages = class_to_pages[size_class_];
2794 SpinLockHolder h(&pageheap_lock);
2795 span = pageheap->New(npages);
2796 if (span) pageheap->RegisterSizeClass(span, size_class_);
2800 MESSAGE("allocation failed: %d\n", ::GetLastError());
2802 MESSAGE("allocation failed: %d\n", errno);
2807 ASSERT_SPAN_COMMITTED(span);
2808 ASSERT(span->length == npages);
2809 // Cache sizeclass info eagerly. Locking is not necessary.
2810 // (Instead of being eager, we could just replace any stale info
2811 // about this span, but that seems to be no better in practice.)
2812 for (size_t i = 0; i < npages; i++) {
2813 pageheap->CacheSizeClass(span->start + i, size_class_);
2816 // Split the block into pieces and add to the free-list
2817 // TODO: coloring of objects to avoid cache conflicts?
2818 HardenedSLL head = HardenedSLL::null();
2819 char* start = reinterpret_cast<char*>(span->start << kPageShift);
2820 const size_t size = ByteSizeForClass(size_class_);
2821 char* ptr = start + (npages << kPageShift) - ((npages << kPageShift) % size);
2823 #if ENABLE(TCMALLOC_HARDENING)
2824 uint32_t startPoison = freedObjectStartPoison();
2825 uint32_t endPoison = freedObjectEndPoison();
2828 while (ptr > start) {
2830 HardenedSLL node = HardenedSLL::create(ptr);
2831 POISON_DEALLOCATION_EXPLICIT(ptr, size, startPoison, endPoison);
2832 SLL_SetNext(node, head, entropy_);
2836 ASSERT(ptr == start);
2837 ASSERT(ptr == head.value());
2840 HardenedSLL node = head;
2842 ASSERT(IS_DEFINITELY_POISONED(node.value(), size));
2843 node = SLL_Next(node, entropy_);
2847 span->objects = head;
2848 ASSERT(span->objects.value() == head.value());
2849 span->refcount = 0; // No sub-object in use yet
2851 // Add span to list of non-empty spans
2853 DLL_Prepend(&nonempty_, span, entropy_);
2857 //-------------------------------------------------------------------
2858 // TCMalloc_ThreadCache implementation
2859 //-------------------------------------------------------------------
2861 inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
2862 if (bytes_until_sample_ < k) {
2866 bytes_until_sample_ -= k;
2871 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid, uintptr_t entropy) {
2876 in_setspecific_ = false;
2878 #if ENABLE(TCMALLOC_HARDENING)
2881 for (size_t cl = 0; cl < kNumClasses; ++cl) {
2882 list_[cl].Init(entropy_);
2885 // Initialize RNG -- run it for a bit to get to good values
2886 bytes_until_sample_ = 0;
2887 rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
2888 for (int i = 0; i < 100; i++) {
2889 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2));
2893 void TCMalloc_ThreadCache::Cleanup() {
2894 // Put unused memory back into central cache
2895 for (size_t cl = 0; cl < kNumClasses; ++cl) {
2896 if (list_[cl].length() > 0) {
2897 ReleaseToCentralCache(cl, list_[cl].length());
2902 ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) {
2903 ASSERT(size <= kMaxSize);
2904 const size_t cl = SizeClass(size);
2905 FreeList* list = &list_[cl];
2906 size_t allocationSize = ByteSizeForClass(cl);
2907 if (list->empty()) {
2908 FetchFromCentralCache(cl, allocationSize);
2909 if (list->empty()) return NULL;
2911 size_ -= allocationSize;
2912 void* result = list->Pop();
2915 RELEASE_ASSERT(IS_DEFINITELY_POISONED(result, allocationSize));
2916 POISON_ALLOCATION(result, allocationSize);
2920 inline void TCMalloc_ThreadCache::Deallocate(HardenedSLL ptr, size_t cl) {
2921 size_t allocationSize = ByteSizeForClass(cl);
2922 size_ += allocationSize;
2923 FreeList* list = &list_[cl];
2924 if (MAY_BE_POISONED(ptr.value(), allocationSize))
2925 list->Validate(ptr, allocationSize);
2927 POISON_DEALLOCATION(ptr.value(), allocationSize);
2929 // If enough data is free, put back into central cache
2930 if (list->length() > kMaxFreeListLength) {
2931 ReleaseToCentralCache(cl, num_objects_to_move[cl]);
2933 if (size_ >= per_thread_cache_size) Scavenge();
2936 // Remove some objects of class "cl" from central cache and add to thread heap
2937 ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) {
2938 int fetch_count = num_objects_to_move[cl];
2939 HardenedSLL start, end;
2940 central_cache[cl].RemoveRange(&start, &end, &fetch_count);
2941 list_[cl].PushRange(fetch_count, start, end);
2942 size_ += allocationSize * fetch_count;
2945 // Remove some objects of class "cl" from thread heap and add to central cache
2946 inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
2948 FreeList* src = &list_[cl];
2949 if (N > src->length()) N = src->length();
2950 size_ -= N*ByteSizeForClass(cl);
2952 // We return prepackaged chains of the correct size to the central cache.
2953 // TODO: Use the same format internally in the thread caches?
2954 int batch_size = num_objects_to_move[cl];
2955 while (N > batch_size) {
2956 HardenedSLL tail, head;
2957 src->PopRange(batch_size, &head, &tail);
2958 central_cache[cl].InsertRange(head, tail, batch_size);
2961 HardenedSLL tail, head;
2962 src->PopRange(N, &head, &tail);
2963 central_cache[cl].InsertRange(head, tail, N);
2966 // Release idle memory to the central cache
2967 inline void TCMalloc_ThreadCache::Scavenge() {
2968 // If the low-water mark for the free list is L, it means we would
2969 // not have had to allocate anything from the central cache even if
2970 // we had reduced the free list size by L. We aim to get closer to
2971 // that situation by dropping L/2 nodes from the free list. This
2972 // may not release much memory, but if so we will call scavenge again
2973 // pretty soon and the low-water marks will be high on that call.
2974 //int64 start = CycleClock::Now();
2976 for (size_t cl = 0; cl < kNumClasses; cl++) {
2977 FreeList* list = &list_[cl];
2978 const int lowmark = list->lowwatermark();
2980 const int drop = (lowmark > 1) ? lowmark/2 : 1;
2981 ReleaseToCentralCache(cl, drop);
2983 list->clear_lowwatermark();
2986 //int64 finish = CycleClock::Now();
2988 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
2991 void TCMalloc_ThreadCache::PickNextSample(size_t k) {
2992 // Make next "random" number
2993 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
2994 static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
2996 rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
2998 // Next point is "rnd_ % (sample_period)". I.e., average
2999 // increment is "sample_period/2".
3000 const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter);
3001 static int last_flag_value = -1;
3003 if (flag_value != last_flag_value) {
3004 SpinLockHolder h(&sample_period_lock);
3006 for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])) - 1); i++) {
3007 if (primes_list[i] >= flag_value) {
3011 sample_period = primes_list[i];
3012 last_flag_value = flag_value;
3015 bytes_until_sample_ += rnd_ % sample_period;
3017 if (k > (static_cast<size_t>(-1) >> 2)) {
3018 // If the user has asked for a huge allocation then it is possible
3019 // for the code below to loop infinitely. Just return (note that
3020 // this throws off the sampling accuracy somewhat, but a user who
3021 // is allocating more than 1G of memory at a time can live with a
3022 // minor inaccuracy in profiling of small allocations, and also
3023 // would rather not wait for the loop below to terminate).
3027 while (bytes_until_sample_ < k) {
3028 // Increase bytes_until_sample_ by enough average sampling periods
3029 // (sample_period >> 1) to allow us to sample past the current
3031 bytes_until_sample_ += (sample_period >> 1);
3034 bytes_until_sample_ -= k;
3037 void TCMalloc_ThreadCache::InitModule() {
3038 // There is a slight potential race here because of double-checked
3039 // locking idiom. However, as long as the program does a small
3040 // allocation before switching to multi-threaded mode, we will be
3041 // fine. We increase the chances of doing such a small allocation
3042 // by doing one in the constructor of the module_enter_exit_hook
3043 // object declared below.
3044 SpinLockHolder h(&pageheap_lock);
3046 uintptr_t entropy = HARDENING_ENTROPY;
3049 threadheap_allocator.Init(entropy);
3050 span_allocator.Init(entropy);
3051 span_allocator.New(); // Reduce cache conflicts
3052 span_allocator.New(); // Reduce cache conflicts
3053 stacktrace_allocator.Init(entropy);
3054 DLL_Init(&sampled_objects, entropy);
3055 for (size_t i = 0; i < kNumClasses; ++i) {
3056 central_cache[i].Init(i, entropy);
3061 FastMallocZone::init();
3066 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid, uintptr_t entropy) {
3067 // Create the heap and add it to the linked list
3068 TCMalloc_ThreadCache *heap = threadheap_allocator.New();
3069 heap->Init(tid, entropy);
3070 heap->next_ = thread_heaps;
3072 if (thread_heaps != NULL) thread_heaps->prev_ = heap;
3073 thread_heaps = heap;
3074 thread_heap_count++;
3075 RecomputeThreadCacheSize();
3079 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() {
3081 // __thread is faster, but only when the kernel supports it
3082 if (KernelSupportsTLS())
3083 return threadlocal_heap;
3085 return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex));
3087 return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key));
3091 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
3092 TCMalloc_ThreadCache* ptr = NULL;
3096 ptr = GetThreadHeap();
3098 if (ptr == NULL) ptr = CreateCacheIfNecessary();
3102 // In deletion paths, we do not try to create a thread-cache. This is
3103 // because we may be in the thread destruction code and may have
3104 // already cleaned up the cache for this thread.
3105 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
3106 if (!tsd_inited) return NULL;
3107 void* const p = GetThreadHeap();
3108 return reinterpret_cast<TCMalloc_ThreadCache*>(p);
3111 void TCMalloc_ThreadCache::InitTSD() {
3112 ASSERT(!tsd_inited);
3113 pthread_key_create(&heap_key, DestroyThreadCache);
3115 tlsIndex = TlsAlloc();
3120 // We may have used a fake pthread_t for the main thread. Fix it.
3122 memset(&zero, 0, sizeof(zero));
3124 ASSERT(pageheap_lock.IsHeld());
3125 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
3128 h->tid_ = GetCurrentThreadId();
3131 if (pthread_equal(h->tid_, zero)) {
3132 h->tid_ = pthread_self();
3138 TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
3139 // Initialize per-thread data if necessary
3140 TCMalloc_ThreadCache* heap = NULL;
3142 SpinLockHolder h(&pageheap_lock);
3149 me = GetCurrentThreadId();
3152 // Early on in glibc's life, we cannot even call pthread_self()
3155 memset(&me, 0, sizeof(me));
3157 me = pthread_self();
3161 // This may be a recursive malloc call from pthread_setspecific()
3162 // In that case, the heap for this thread has already been created
3163 // and added to the linked list. So we search for that first.
3164 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
3166 if (h->tid_ == me) {
3168 if (pthread_equal(h->tid_, me)) {
3175 if (heap == NULL) heap = NewHeap(me, HARDENING_ENTROPY);
3178 // We call pthread_setspecific() outside the lock because it may
3179 // call malloc() recursively. The recursive call will never get
3180 // here again because it will find the already allocated heap in the
3181 // linked list of heaps.
3182 if (!heap->in_setspecific_ && tsd_inited) {
3183 heap->in_setspecific_ = true;
3184 setThreadHeap(heap);
3189 void TCMalloc_ThreadCache::BecomeIdle() {
3190 if (!tsd_inited) return; // No caches yet
3191 TCMalloc_ThreadCache* heap = GetThreadHeap();
3192 if (heap == NULL) return; // No thread cache to remove
3193 if (heap->in_setspecific_) return; // Do not disturb the active caller
3195 heap->in_setspecific_ = true;
3196 setThreadHeap(NULL);
3198 // Also update the copy in __thread
3199 threadlocal_heap = NULL;
3201 heap->in_setspecific_ = false;
3202 if (GetThreadHeap() == heap) {
3203 // Somehow heap got reinstated by a recursive call to malloc
3204 // from pthread_setspecific. We give up in this case.
3208 // We can now get rid of the heap
3212 void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) {
3213 // Note that "ptr" cannot be NULL since pthread promises not
3214 // to invoke the destructor on NULL values, but for safety,
3216 if (ptr == NULL) return;
3218 // Prevent fast path of GetThreadHeap() from returning heap.
3219 threadlocal_heap = NULL;
3221 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr));
3224 void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) {
3225 // Remove all memory from heap
3228 // Remove from linked list
3229 SpinLockHolder h(&pageheap_lock);
3230 if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
3231 if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
3232 if (thread_heaps == heap) thread_heaps = heap->next_;
3233 thread_heap_count--;
3234 RecomputeThreadCacheSize();
3236 threadheap_allocator.Delete(heap);
3239 void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
3240 // Divide available space across threads
3241 int n = thread_heap_count > 0 ? thread_heap_count : 1;
3242 size_t space = overall_thread_cache_size / n;
3244 // Limit to allowed range
3245 if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
3246 if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
3248 per_thread_cache_size = space;
3251 void TCMalloc_ThreadCache::Print() const {
3252 for (size_t cl = 0; cl < kNumClasses; ++cl) {
3253 MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n",
3254 ByteSizeForClass(cl),
3256 list_[cl].lowwatermark());
3260 // Extract interesting stats
3261 struct TCMallocStats {
3262 uint64_t system_bytes; // Bytes alloced from system
3263 uint64_t thread_bytes; // Bytes in thread caches
3264 uint64_t central_bytes; // Bytes in central cache
3265 uint64_t transfer_bytes; // Bytes in central transfer cache
3266 uint64_t pageheap_bytes; // Bytes in page heap
3267 uint64_t metadata_bytes; // Bytes alloced for metadata
3270 // The constructor allocates an object to ensure that initialization
3271 // runs before main(), and therefore we do not have a chance to become
3272 // multi-threaded before initialization. We also create the TSD key
3273 // here. Presumably by the time this constructor runs, glibc is in
3274 // good enough shape to handle pthread_key_create().
3276 // The constructor also takes the opportunity to tell STL to use
3277 // tcmalloc. We want to do this early, before construct time, so
3278 // all user STL allocations go through tcmalloc (which works really
3281 // The destructor prints stats when the program exits.
3282 class TCMallocGuard {
3286 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
3287 // Check whether the kernel also supports TLS (needs to happen at runtime)
3288 CheckIfKernelSupportsTLS();
3291 TCMalloc_ThreadCache::InitTSD();
3296 //-------------------------------------------------------------------
3297 // Helpers for the exported routines below
3298 //-------------------------------------------------------------------
3300 static inline bool CheckCachedSizeClass(void *ptr) {
3301 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
3302 size_t cached_value = pageheap->GetSizeClassIfCached(p);
3303 return cached_value == 0 ||
3304 cached_value == pageheap->GetDescriptor(p)->sizeclass;
3307 static inline void* CheckedMallocResult(void *result)
3309 ASSERT(result == 0 || CheckCachedSizeClass(result));
3313 static inline void* SpanToMallocResult(Span *span) {
3314 ASSERT_SPAN_COMMITTED(span);
3315 pageheap->CacheSizeClass(span->start, 0);
3316 void* result = reinterpret_cast<void*>(span->start << kPageShift);
3317 POISON_ALLOCATION(result, span->length << kPageShift);
3318 return CheckedMallocResult(result);
3321 static ALWAYS_INLINE void* do_malloc(size_t size) {
3324 ASSERT(!isForbidden());
3326 // The following call forces module initialization
3327 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
3328 if (size > kMaxSize) {
3329 // Use page-level allocator
3330 SpinLockHolder h(&pageheap_lock);
3331 Span* span = pageheap->New(pages(size));
3333 ret = SpanToMallocResult(span);
3335 // The common case, and also the simplest. This just pops the
3336 // size-appropriate freelist, afer replenishing it if it's empty.
3337 ret = CheckedMallocResult(heap->Allocate(size));
3344 static ALWAYS_INLINE void do_free(void* ptr) {
3345 if (ptr == NULL) return;
3346 ASSERT(pageheap != NULL); // Should not call free() before malloc()
3347 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
3349 size_t cl = pageheap->GetSizeClassIfCached(p);
3352 span = pageheap->GetDescriptor(p);
3353 RELEASE_ASSERT(span->isValid());
3354 cl = span->sizeclass;
3355 pageheap->CacheSizeClass(p, cl);
3358 #ifndef NO_TCMALLOC_SAMPLES
3359 ASSERT(!pageheap->GetDescriptor(p)->sample);
3361 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
3363 heap->Deallocate(HardenedSLL::create(ptr), cl);
3365 // Delete directly into central cache
3366 POISON_DEALLOCATION(ptr, ByteSizeForClass(cl));
3367 SLL_SetNext(HardenedSLL::create(ptr), HardenedSLL::null(), central_cache[cl].entropy());
3368 central_cache[cl].InsertRange(HardenedSLL::create(ptr), HardenedSLL::create(ptr), 1);
3371 SpinLockHolder h(&pageheap_lock);
3372 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
3373 ASSERT(span != NULL && span->start == p);
3374 #ifndef NO_TCMALLOC_SAMPLES
3377 stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
3378 span->objects = NULL;
3382 POISON_DEALLOCATION(ptr, span->length << kPageShift);
3383 pageheap->Delete(span);
3387 // Helpers for use by exported routines below:
3389 static inline int do_mallopt(int, int) {
3390 return 1; // Indicates error
3393 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
3394 static inline struct mallinfo do_mallinfo() {
3395 TCMallocStats stats;
3396 ExtractStats(&stats, NULL);
3398 // Just some of the fields are filled in.
3399 struct mallinfo info;
3400 memset(&info, 0, sizeof(info));
3402 // Unfortunately, the struct contains "int" field, so some of the
3403 // size values will be truncated.
3404 info.arena = static_cast<int>(stats.system_bytes);
3405 info.fsmblks = static_cast<int>(stats.thread_bytes
3406 + stats.central_bytes
3407 + stats.transfer_bytes);
3408 info.fordblks = static_cast<int>(stats.pageheap_bytes);
3409 info.uordblks = static_cast<int>(stats.system_bytes
3410 - stats.thread_bytes
3411 - stats.central_bytes
3412 - stats.transfer_bytes
3413 - stats.pageheap_bytes);
3419 //-------------------------------------------------------------------
3420 // Exported routines
3421 //-------------------------------------------------------------------
3423 // CAVEAT: The code structure below ensures that MallocHook methods are always
3424 // called from the stack frame of the invoked allocation function.
3425 // heap-checker.cc depends on this to start a stack trace from
3426 // the call to the (de)allocation function.
3428 void* fastMalloc(size_t size)
3430 return do_malloc(size);
3433 void fastFree(void* ptr)
3438 void* fastCalloc(size_t n, size_t elem_size)
3440 size_t totalBytes = n * elem_size;
3442 // Protect against overflow
3443 if (n > 1 && elem_size && (totalBytes / elem_size) != n)
3446 void* result = do_malloc(totalBytes);
3447 memset(result, 0, totalBytes);
3452 void* fastRealloc(void* old_ptr, size_t new_size)
3454 if (old_ptr == NULL) {
3455 return do_malloc(new_size);
3457 if (new_size == 0) {
3462 // Get the size of the old entry
3463 const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
3464 size_t cl = pageheap->GetSizeClassIfCached(p);
3468 span = pageheap->GetDescriptor(p);
3469 cl = span->sizeclass;
3470 pageheap->CacheSizeClass(p, cl);
3473 old_size = ByteSizeForClass(cl);
3475 ASSERT(span != NULL);
3476 old_size = span->length << kPageShift;
3479 // Reallocate if the new size is larger than the old size,
3480 // or if the new size is significantly smaller than the old size.
3481 if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) {
3482 // Need to reallocate
3483 void* new_ptr = do_malloc(new_size);
3484 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
3485 // We could use a variant of do_free() that leverages the fact
3486 // that we already know the sizeclass of old_ptr. The benefit
3487 // would be small, so don't bother.
3495 void releaseFastMallocFreeMemory()
3497 // Flush free pages in the current thread cache back to the page heap.
3498 if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent())
3499 threadCache->Cleanup();
3501 SpinLockHolder h(&pageheap_lock);
3502 pageheap->ReleaseFreePages();
3505 FastMallocStatistics fastMallocStatistics()
3507 FastMallocStatistics statistics;
3509 SpinLockHolder lockHolder(&pageheap_lock);
3510 statistics.reservedVMBytes = static_cast<size_t>(pageheap->SystemBytes());
3511 statistics.committedVMBytes = statistics.reservedVMBytes - pageheap->ReturnedBytes();
3513 statistics.freeListBytes = 0;
3514 for (unsigned cl = 0; cl < kNumClasses; ++cl) {
3515 const int length = central_cache[cl].length();
3516 const int tc_length = central_cache[cl].tc_length();
3518 statistics.freeListBytes += ByteSizeForClass(cl) * (length + tc_length);
3520 for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
3521 statistics.freeListBytes += threadCache->Size();
3528 template <typename T>
3529 T* RemoteMemoryReader::nextEntryInHardenedLinkedList(T** remoteAddress, uintptr_t entropy) const
3531 T** localAddress = (*this)(remoteAddress);
3534 T* hardenedNext = *localAddress;
3535 if (!hardenedNext || hardenedNext == (void*)entropy)
3537 return XOR_MASK_PTR_WITH_KEY(hardenedNext, remoteAddress, entropy);
3540 class FreeObjectFinder {
3541 const RemoteMemoryReader& m_reader;
3542 HashSet<void*> m_freeObjects;
3545 FreeObjectFinder(const RemoteMemoryReader& reader) : m_reader(reader) { }
3547 void visit(void* ptr) { m_freeObjects.add(ptr); }
3548 bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); }
3549 bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_cast<void*>(ptr)); }
3550 size_t freeObjectCount() const { return m_freeObjects.size(); }
3552 void findFreeObjects(TCMalloc_ThreadCache* threadCache)
3554 for (; threadCache; threadCache = (threadCache->next_ ? m_reader(threadCache->next_) : 0))
3555 threadCache->enumerateFreeObjects(*this, m_reader);
3558 void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes, TCMalloc_Central_FreeListPadded* remoteCentralFreeList)
3560 for (unsigned i = 0; i < numSizes; i++)
3561 centralFreeList[i].enumerateFreeObjects(*this, m_reader, remoteCentralFreeList + i);
3565 class PageMapFreeObjectFinder {
3566 const RemoteMemoryReader& m_reader;
3567 FreeObjectFinder& m_freeObjectFinder;
3568 uintptr_t m_entropy;
3571 PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder, uintptr_t entropy)
3573 , m_freeObjectFinder(freeObjectFinder)
3574 , m_entropy(entropy)
3576 #if ENABLE(TCMALLOC_HARDENING)
3581 int visit(void* ptr) const
3586 Span* span = m_reader(reinterpret_cast<Span*>(ptr));
3591 void* ptr = reinterpret_cast<void*>(span->start << kPageShift);
3592 m_freeObjectFinder.visit(ptr);
3593 } else if (span->sizeclass) {
3594 // Walk the free list of the small-object span, keeping track of each object seen
3595 for (HardenedSLL nextObject = span->objects; nextObject; nextObject.setValue(m_reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), m_entropy)))
3596 m_freeObjectFinder.visit(nextObject.value());
3598 return span->length;
3602 class PageMapMemoryUsageRecorder {
3605 unsigned m_typeMask;
3606 vm_range_recorder_t* m_recorder;
3607 const RemoteMemoryReader& m_reader;
3608 const FreeObjectFinder& m_freeObjectFinder;
3610 HashSet<void*> m_seenPointers;
3611 Vector<Span*> m_coalescedSpans;
3614 PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder)
3616 , m_context(context)
3617 , m_typeMask(typeMask)
3618 , m_recorder(recorder)
3620 , m_freeObjectFinder(freeObjectFinder)
3623 ~PageMapMemoryUsageRecorder()
3625 ASSERT(!m_coalescedSpans.size());
3628 void recordPendingRegions()
3630 if (!(m_typeMask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE))) {
3631 m_coalescedSpans.clear();
3635 Vector<vm_range_t, 1024> allocatedPointers;
3636 for (size_t i = 0; i < m_coalescedSpans.size(); ++i) {
3637 Span *theSpan = m_coalescedSpans[i];
3641 vm_address_t spanStartAddress = theSpan->start << kPageShift;
3642 vm_size_t spanSizeInBytes = theSpan->length * kPageSize;
3644 if (!theSpan->sizeclass) {
3645 // If it's an allocated large object span, mark it as in use
3646 if (!m_freeObjectFinder.isFreeObject(spanStartAddress))
3647 allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes});
3649 const size_t objectSize = ByteSizeForClass(theSpan->sizeclass);
3651 // Mark each allocated small object within the span as in use
3652 const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes;
3653 for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) {
3654 if (!m_freeObjectFinder.isFreeObject(object))
3655 allocatedPointers.append((vm_range_t){object, objectSize});
3660 (*m_recorder)(m_task, m_context, m_typeMask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE), allocatedPointers.data(), allocatedPointers.size());
3662 m_coalescedSpans.clear();
3665 int visit(void* ptr)
3670 Span* span = m_reader(reinterpret_cast<Span*>(ptr));
3671 if (!span || !span->start)
3674 if (!m_seenPointers.add(ptr).isNewEntry)
3675 return span->length;
3677 if (!m_coalescedSpans.size()) {
3678 m_coalescedSpans.append(span);
3679 return span->length;
3682 Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
3683 vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift;
3684 vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize;
3686 // If the new span is adjacent to the previous span, do nothing for now.
3687 vm_address_t spanStartAddress = span->start << kPageShift;
3688 if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) {
3689 m_coalescedSpans.append(span);
3690 return span->length;
3693 // New span is not adjacent to previous span, so record the spans coalesced so far.
3694 recordPendingRegions();
3695 m_coalescedSpans.append(span);
3697 return span->length;
3701 class AdminRegionRecorder {
3704 unsigned m_typeMask;
3705 vm_range_recorder_t* m_recorder;
3707 Vector<vm_range_t, 1024> m_pendingRegions;
3710 AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder)
3712 , m_context(context)
3713 , m_typeMask(typeMask)
3714 , m_recorder(recorder)
3717 void recordRegion(vm_address_t ptr, size_t size)
3719 if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE)
3720 m_pendingRegions.append((vm_range_t){ ptr, size });
3723 void visit(void *ptr, size_t size)
3725 recordRegion(reinterpret_cast<vm_address_t>(ptr), size);
3728 void recordPendingRegions()
3730 if (m_pendingRegions.size()) {
3731 (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size());
3732 m_pendingRegions.clear();
3736 ~AdminRegionRecorder()
3738 ASSERT(!m_pendingRegions.size());
3742 kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typeMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t recorder)
3744 RemoteMemoryReader memoryReader(task, reader);
3748 FastMallocZone* mzone = memoryReader(reinterpret_cast<FastMallocZone*>(zoneAddress));
3749 TCMalloc_PageHeap* pageHeap = memoryReader(mzone->m_pageHeap);
3750 TCMalloc_ThreadCache** threadHeapsPointer = memoryReader(mzone->m_threadHeaps);
3751 TCMalloc_ThreadCache* threadHeaps = memoryReader(*threadHeapsPointer);
3753 TCMalloc_Central_FreeListPadded* centralCaches = memoryReader(mzone->m_centralCaches, sizeof(TCMalloc_Central_FreeListPadded) * kNumClasses);
3755 FreeObjectFinder finder(memoryReader);
3756 finder.findFreeObjects(threadHeaps);
3757 finder.findFreeObjects(centralCaches, kNumClasses, mzone->m_centralCaches);
3759 TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_;
3760 PageMapFreeObjectFinder pageMapFinder(memoryReader, finder, pageHeap->entropy_);
3761 pageMap->visitValues(pageMapFinder, memoryReader);
3763 PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder);
3764 pageMap->visitValues(usageRecorder, memoryReader);
3765 usageRecorder.recordPendingRegions();
3767 AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder);
3768 pageMap->visitAllocations(adminRegionRecorder, memoryReader);
3770 PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator);
3771 PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator);
3773 spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
3774 pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
3776 adminRegionRecorder.recordPendingRegions();
3781 size_t FastMallocZone::size(malloc_zone_t*, const void*)
3786 void* FastMallocZone::zoneMalloc(malloc_zone_t*, size_t)
3791 void* FastMallocZone::zoneCalloc(malloc_zone_t*, size_t, size_t)
3796 void FastMallocZone::zoneFree(malloc_zone_t*, void* ptr)
3798 // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
3799 // is not in this zone. When this happens, the pointer being freed was not allocated by any
3800 // zone so we need to print a useful error for the application developer.
3801 malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr);
3804 void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t)
3816 malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
3817 &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
3819 #if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1060
3820 , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
3822 #if __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070
3823 , 0, 0, 0, 0 // These members will not be used unless the zone advertises itself as version seven or higher.
3829 FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator)
3830 : m_pageHeap(pageHeap)
3831 , m_threadHeaps(threadHeaps)
3832 , m_centralCaches(centralCaches)
3833 , m_spanAllocator(spanAllocator)
3834 , m_pageHeapAllocator(pageHeapAllocator)
3836 memset(&m_zone, 0, sizeof(m_zone));
3838 m_zone.zone_name = "JavaScriptCore FastMalloc";
3839 m_zone.size = &FastMallocZone::size;
3840 m_zone.malloc = &FastMallocZone::zoneMalloc;
3841 m_zone.calloc = &FastMallocZone::zoneCalloc;
3842 m_zone.realloc = &FastMallocZone::zoneRealloc;
3843 m_zone.free = &FastMallocZone::zoneFree;
3844 m_zone.valloc = &FastMallocZone::zoneValloc;
3845 m_zone.destroy = &FastMallocZone::zoneDestroy;
3846 m_zone.introspect = &jscore_fastmalloc_introspection;
3847 malloc_zone_register(&m_zone);
3851 void FastMallocZone::init()
3853 static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
3856 #endif // OS(MACOSX)
3860 #endif // FORCE_SYSTEM_MALLOC