Update To 11.40.268.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / platform / heap / Heap.cpp
1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *
8  *     * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  *     * Neither the name of Google Inc. nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include "config.h"
32 #include "platform/heap/Heap.h"
33
34 #include "platform/ScriptForbiddenScope.h"
35 #include "platform/Task.h"
36 #include "platform/TraceEvent.h"
37 #include "platform/heap/CallbackStack.h"
38 #include "platform/heap/ThreadState.h"
39 #include "public/platform/Platform.h"
40 #include "wtf/AddressSpaceRandomization.h"
41 #include "wtf/Assertions.h"
42 #include "wtf/LeakAnnotations.h"
43 #include "wtf/PassOwnPtr.h"
44 #if ENABLE(GC_PROFILE_MARKING)
45 #include "wtf/HashMap.h"
46 #include "wtf/HashSet.h"
47 #include "wtf/text/StringBuilder.h"
48 #include "wtf/text/StringHash.h"
49 #include <stdio.h>
50 #include <utility>
51 #endif
52 #if ENABLE(GC_PROFILE_HEAP)
53 #include "platform/TracedValue.h"
54 #endif
55
56 #if OS(POSIX)
57 #include <sys/mman.h>
58 #include <unistd.h>
59 #elif OS(WIN)
60 #include <windows.h>
61 #endif
62
63 namespace blink {
64
65 #if ENABLE(GC_PROFILE_MARKING)
66 static String classOf(const void* object)
67 {
68     const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_cast<void*>(object)));
69     if (gcInfo)
70         return gcInfo->m_className;
71
72     return "unknown";
73 }
74 #endif
75
76 static bool vTableInitialized(void* objectPointer)
77 {
78     return !!(*reinterpret_cast<Address*>(objectPointer));
79 }
80
81 #if OS(WIN)
82 static bool IsPowerOf2(size_t power)
83 {
84     return !((power - 1) & power);
85 }
86 #endif
87
88 static Address roundToBlinkPageBoundary(void* base)
89 {
90     return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
91 }
92
93 static size_t roundToOsPageSize(size_t size)
94 {
95     return (size + osPageSize() - 1) & ~(osPageSize() - 1);
96 }
97
98 size_t osPageSize()
99 {
100 #if OS(POSIX)
101     static const size_t pageSize = getpagesize();
102 #else
103     static size_t pageSize = 0;
104     if (!pageSize) {
105         SYSTEM_INFO info;
106         GetSystemInfo(&info);
107         pageSize = info.dwPageSize;
108         ASSERT(IsPowerOf2(pageSize));
109     }
110 #endif
111     return pageSize;
112 }
113
114 class MemoryRegion {
115 public:
116     MemoryRegion(Address base, size_t size)
117         : m_base(base)
118         , m_size(size)
119     {
120         ASSERT(size > 0);
121     }
122
123     bool contains(Address addr) const
124     {
125         return m_base <= addr && addr < (m_base + m_size);
126     }
127
128     bool contains(const MemoryRegion& other) const
129     {
130         return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
131     }
132
133     void release()
134     {
135 #if OS(POSIX)
136         int err = munmap(m_base, m_size);
137         RELEASE_ASSERT(!err);
138 #else
139         bool success = VirtualFree(m_base, 0, MEM_RELEASE);
140         RELEASE_ASSERT(success);
141 #endif
142     }
143
144     WARN_UNUSED_RETURN bool commit()
145     {
146 #if OS(POSIX)
147         return !mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
148 #else
149         void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
150         return !!result;
151 #endif
152     }
153
154     void decommit()
155     {
156 #if OS(POSIX)
157         int err = mprotect(m_base, m_size, PROT_NONE);
158         RELEASE_ASSERT(!err);
159         // FIXME: Consider using MADV_FREE on MacOS.
160         madvise(m_base, m_size, MADV_DONTNEED);
161 #else
162         bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
163         RELEASE_ASSERT(success);
164 #endif
165     }
166
167     Address base() const { return m_base; }
168     size_t size() const { return m_size; }
169
170 private:
171     Address m_base;
172     size_t m_size;
173 };
174
175 // A PageMemoryRegion represents a chunk of reserved virtual address
176 // space containing a number of blink heap pages. On Windows, reserved
177 // virtual address space can only be given back to the system as a
178 // whole. The PageMemoryRegion allows us to do that by keeping track
179 // of the number of pages using it in order to be able to release all
180 // of the virtual address space when there are no more pages using it.
181 class PageMemoryRegion : public MemoryRegion {
182 public:
183     ~PageMemoryRegion()
184     {
185         release();
186     }
187
188     void pageDeleted(Address page)
189     {
190         markPageUnused(page);
191         if (!--m_numPages) {
192             Heap::removePageMemoryRegion(this);
193             delete this;
194         }
195     }
196
197     void markPageUsed(Address page)
198     {
199         ASSERT(!m_inUse[index(page)]);
200         m_inUse[index(page)] = true;
201     }
202
203     void markPageUnused(Address page)
204     {
205         m_inUse[index(page)] = false;
206     }
207
208     static PageMemoryRegion* allocateLargePage(size_t size)
209     {
210         return allocate(size, 1);
211     }
212
213     static PageMemoryRegion* allocateNormalPages()
214     {
215         return allocate(blinkPageSize * blinkPagesPerRegion, blinkPagesPerRegion);
216     }
217
218     BaseHeapPage* pageFromAddress(Address address)
219     {
220         ASSERT(contains(address));
221         if (!m_inUse[index(address)])
222             return 0;
223         if (m_isLargePage)
224             return pageHeaderFromObject(base());
225         return pageHeaderFromObject(address);
226     }
227
228 private:
229     PageMemoryRegion(Address base, size_t size, unsigned numPages)
230         : MemoryRegion(base, size)
231         , m_isLargePage(numPages == 1)
232         , m_numPages(numPages)
233     {
234         for (size_t i = 0; i < blinkPagesPerRegion; ++i)
235             m_inUse[i] = false;
236     }
237
238     unsigned index(Address address)
239     {
240         ASSERT(contains(address));
241         if (m_isLargePage)
242             return 0;
243         size_t offset = blinkPageAddress(address) - base();
244         ASSERT(offset % blinkPageSize == 0);
245         return offset / blinkPageSize;
246     }
247
248     static PageMemoryRegion* allocate(size_t size, unsigned numPages)
249     {
250         // Compute a random blink page aligned address for the page memory
251         // region and attempt to get the memory there.
252         Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase());
253         Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress);
254
255 #if OS(POSIX)
256         Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0));
257         RELEASE_ASSERT(base != MAP_FAILED);
258         if (base == roundToBlinkPageBoundary(base))
259             return new PageMemoryRegion(base, size, numPages);
260
261         // We failed to get a blink page aligned chunk of
262         // memory. Unmap the chunk that we got and fall back to
263         // overallocating and selecting an aligned sub part of what
264         // we allocate.
265         int error = munmap(base, size);
266         RELEASE_ASSERT(!error);
267         size_t allocationSize = size + blinkPageSize;
268         base = static_cast<Address>(mmap(alignedRandomAddress, allocationSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0));
269         RELEASE_ASSERT(base != MAP_FAILED);
270
271         Address end = base + allocationSize;
272         Address alignedBase = roundToBlinkPageBoundary(base);
273         Address regionEnd = alignedBase + size;
274
275         // If the allocated memory was not blink page aligned release
276         // the memory before the aligned address.
277         if (alignedBase != base)
278             MemoryRegion(base, alignedBase - base).release();
279
280         // Free the additional memory at the end of the page if any.
281         if (regionEnd < end)
282             MemoryRegion(regionEnd, end - regionEnd).release();
283
284         return new PageMemoryRegion(alignedBase, size, numPages);
285 #else
286         Address base = static_cast<Address>(VirtualAlloc(alignedRandomAddress, size, MEM_RESERVE, PAGE_NOACCESS));
287         if (base) {
288             ASSERT(base == alignedRandomAddress);
289             return new PageMemoryRegion(base, size, numPages);
290         }
291
292         // We failed to get the random aligned address that we asked
293         // for. Fall back to overallocating. On Windows it is
294         // impossible to partially release a region of memory
295         // allocated by VirtualAlloc. To avoid wasting virtual address
296         // space we attempt to release a large region of memory
297         // returned as a whole and then allocate an aligned region
298         // inside this larger region.
299         size_t allocationSize = size + blinkPageSize;
300         for (int attempt = 0; attempt < 3; attempt++) {
301             base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
302             RELEASE_ASSERT(base);
303             VirtualFree(base, 0, MEM_RELEASE);
304
305             Address alignedBase = roundToBlinkPageBoundary(base);
306             base = static_cast<Address>(VirtualAlloc(alignedBase, size, MEM_RESERVE, PAGE_NOACCESS));
307             if (base) {
308                 ASSERT(base == alignedBase);
309                 return new PageMemoryRegion(alignedBase, size, numPages);
310             }
311         }
312
313         // We failed to avoid wasting virtual address space after
314         // several attempts.
315         base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
316         RELEASE_ASSERT(base);
317
318         // FIXME: If base is by accident blink page size aligned
319         // here then we can create two pages out of reserved
320         // space. Do this.
321         Address alignedBase = roundToBlinkPageBoundary(base);
322
323         return new PageMemoryRegion(alignedBase, size, numPages);
324 #endif
325     }
326
327     bool m_isLargePage;
328     bool m_inUse[blinkPagesPerRegion];
329     unsigned m_numPages;
330 };
331
332 // Representation of the memory used for a Blink heap page.
333 //
334 // The representation keeps track of two memory regions:
335 //
336 // 1. The virtual memory reserved from the system in order to be able
337 //    to free all the virtual memory reserved. Multiple PageMemory
338 //    instances can share the same reserved memory region and
339 //    therefore notify the reserved memory region on destruction so
340 //    that the system memory can be given back when all PageMemory
341 //    instances for that memory are gone.
342 //
343 // 2. The writable memory (a sub-region of the reserved virtual
344 //    memory region) that is used for the actual heap page payload.
345 //
346 // Guard pages are created before and after the writable memory.
347 class PageMemory {
348 public:
349     ~PageMemory()
350     {
351         __lsan_unregister_root_region(m_writable.base(), m_writable.size());
352         m_reserved->pageDeleted(writableStart());
353     }
354
355     WARN_UNUSED_RETURN bool commit()
356     {
357         m_reserved->markPageUsed(writableStart());
358         return m_writable.commit();
359     }
360
361     void decommit()
362     {
363         m_reserved->markPageUnused(writableStart());
364         m_writable.decommit();
365     }
366
367     void markUnused() { m_reserved->markPageUnused(writableStart()); }
368
369     PageMemoryRegion* region() { return m_reserved; }
370
371     Address writableStart() { return m_writable.base(); }
372
373     static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize)
374     {
375         // Setup the payload one OS page into the page memory. The
376         // first os page is the guard page.
377         Address payloadAddress = region->base() + pageOffset + osPageSize();
378         return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize));
379     }
380
381     // Allocate a virtual address space for one blink page with the
382     // following layout:
383     //
384     //    [ guard os page | ... payload ... | guard os page ]
385     //    ^---{ aligned to blink page size }
386     //
387     static PageMemory* allocate(size_t payloadSize)
388     {
389         ASSERT(payloadSize > 0);
390
391         // Virtual memory allocation routines operate in OS page sizes.
392         // Round up the requested size to nearest os page size.
393         payloadSize = roundToOsPageSize(payloadSize);
394
395         // Overallocate by 2 times OS page size to have space for a
396         // guard page at the beginning and end of blink heap page.
397         size_t allocationSize = payloadSize + 2 * osPageSize();
398         PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocateLargePage(allocationSize);
399         PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, payloadSize);
400         RELEASE_ASSERT(storage->commit());
401         return storage;
402     }
403
404 private:
405     PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable)
406         : m_reserved(reserved)
407         , m_writable(writable)
408     {
409         ASSERT(reserved->contains(writable));
410
411         // Register the writable area of the memory as part of the LSan root set.
412         // Only the writable area is mapped and can contain C++ objects. Those
413         // C++ objects can contain pointers to objects outside of the heap and
414         // should therefore be part of the LSan root set.
415         __lsan_register_root_region(m_writable.base(), m_writable.size());
416     }
417
418
419     PageMemoryRegion* m_reserved;
420     MemoryRegion m_writable;
421 };
422
423 class GCScope {
424 public:
425     explicit GCScope(ThreadState::StackState stackState)
426         : m_state(ThreadState::current())
427         , m_safePointScope(stackState)
428         , m_parkedAllThreads(false)
429     {
430         TRACE_EVENT0("blink_gc", "Heap::GCScope");
431         const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
432         if (m_state->isMainThread())
433             TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting");
434
435         m_state->checkThread();
436
437         // FIXME: in an unlikely coincidence that two threads decide
438         // to collect garbage at the same time, avoid doing two GCs in
439         // a row.
440         RELEASE_ASSERT(!m_state->isInGC());
441         RELEASE_ASSERT(!m_state->isSweepInProgress());
442         if (LIKELY(ThreadState::stopThreads())) {
443             m_parkedAllThreads = true;
444             m_state->enterGC();
445         }
446         if (m_state->isMainThread())
447             TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
448     }
449
450     bool allThreadsParked() { return m_parkedAllThreads; }
451
452     ~GCScope()
453     {
454         // Only cleanup if we parked all threads in which case the GC happened
455         // and we need to resume the other threads.
456         if (LIKELY(m_parkedAllThreads)) {
457             m_state->leaveGC();
458             ASSERT(!m_state->isInGC());
459             ThreadState::resumeThreads();
460         }
461     }
462
463 private:
464     ThreadState* m_state;
465     ThreadState::SafePointScope m_safePointScope;
466     bool m_parkedAllThreads; // False if we fail to park all threads
467 };
468
469 NO_SANITIZE_ADDRESS
470 bool HeapObjectHeader::isMarked() const
471 {
472     checkHeader();
473     unsigned size = asanUnsafeAcquireLoad(&m_size);
474     return size & markBitMask;
475 }
476
477 NO_SANITIZE_ADDRESS
478 void HeapObjectHeader::unmark()
479 {
480     checkHeader();
481     m_size &= ~markBitMask;
482 }
483
484 NO_SANITIZE_ADDRESS
485 bool HeapObjectHeader::hasDeadMark() const
486 {
487     checkHeader();
488     return m_size & deadBitMask;
489 }
490
491 NO_SANITIZE_ADDRESS
492 void HeapObjectHeader::clearDeadMark()
493 {
494     checkHeader();
495     m_size &= ~deadBitMask;
496 }
497
498 NO_SANITIZE_ADDRESS
499 void HeapObjectHeader::setDeadMark()
500 {
501     ASSERT(!isMarked());
502     checkHeader();
503     m_size |= deadBitMask;
504 }
505
506 #if ENABLE(ASSERT)
507 NO_SANITIZE_ADDRESS
508 void HeapObjectHeader::zapMagic()
509 {
510     m_magic = zappedMagic;
511 }
512 #endif
513
514 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
515 {
516     Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
517     HeapObjectHeader* header =
518         reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
519     return header;
520 }
521
522 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
523 {
524     ASSERT(gcInfo);
525     if (gcInfo->hasFinalizer()) {
526         gcInfo->m_finalize(object);
527     }
528
529 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
530     // In Debug builds, memory is zapped when it's freed, and the zapped memory is
531     // zeroed out when the memory is reused. Memory is also zapped when using Leak
532     // Sanitizer because the heap is used as a root region for LSan and therefore
533     // pointers in unreachable memory could hide leaks.
534     for (size_t i = 0; i < objectSize; i++)
535         object[i] = finalizedZapValue;
536
537     // Zap the primary vTable entry (secondary vTable entries are not zapped).
538     if (gcInfo->hasVTable()) {
539         *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
540     }
541 #endif
542     // In Release builds, the entire object is zeroed out when it is added to the free list.
543     // This happens right after sweeping the page and before the thread commences execution.
544 }
545
546 NO_SANITIZE_ADDRESS
547 void FinalizedHeapObjectHeader::finalize()
548 {
549     HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
550 }
551
552 template<typename Header>
553 void LargeHeapObject<Header>::unmark()
554 {
555     return heapObjectHeader()->unmark();
556 }
557
558 template<typename Header>
559 bool LargeHeapObject<Header>::isMarked()
560 {
561     return heapObjectHeader()->isMarked();
562 }
563
564 template<typename Header>
565 void LargeHeapObject<Header>::setDeadMark()
566 {
567     heapObjectHeader()->setDeadMark();
568 }
569
570 template<typename Header>
571 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
572 {
573     ASSERT(contains(address));
574     if (!objectContains(address) || heapObjectHeader()->hasDeadMark())
575         return;
576 #if ENABLE(GC_PROFILE_MARKING)
577     visitor->setHostInfo(&address, "stack");
578 #endif
579     mark(visitor);
580 }
581
582 #if ENABLE(ASSERT)
583 static bool isUninitializedMemory(void* objectPointer, size_t objectSize)
584 {
585     // Scan through the object's fields and check that they are all zero.
586     Address* objectFields = reinterpret_cast<Address*>(objectPointer);
587     for (size_t i = 0; i < objectSize / sizeof(Address); ++i) {
588         if (objectFields[i] != 0)
589             return false;
590     }
591     return true;
592 }
593 #endif
594
595 template<>
596 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
597 {
598     if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload())) {
599         FinalizedHeapObjectHeader* header = heapObjectHeader();
600         visitor->markNoTracing(header);
601         ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
602     } else {
603         visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
604     }
605 }
606
607 template<>
608 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
609 {
610     ASSERT(gcInfo());
611     if (gcInfo()->hasVTable() && !vTableInitialized(payload())) {
612         HeapObjectHeader* header = heapObjectHeader();
613         visitor->markNoTracing(header);
614         ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
615     } else {
616         visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
617     }
618 }
619
620 template<>
621 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
622 {
623     heapObjectHeader()->finalize();
624 }
625
626 template<>
627 void LargeHeapObject<HeapObjectHeader>::finalize()
628 {
629     ASSERT(gcInfo());
630     HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
631 }
632
633 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
634 {
635     Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
636     FinalizedHeapObjectHeader* header =
637         reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
638     return header;
639 }
640
641 template<typename Header>
642 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
643     : m_currentAllocationPoint(0)
644     , m_remainingAllocationSize(0)
645     , m_lastRemainingAllocationSize(0)
646     , m_firstPage(0)
647     , m_firstLargeHeapObject(0)
648     , m_firstPageAllocatedDuringSweeping(0)
649     , m_lastPageAllocatedDuringSweeping(0)
650     , m_mergePoint(0)
651     , m_biggestFreeListIndex(0)
652     , m_threadState(state)
653     , m_index(index)
654     , m_numberOfNormalPages(0)
655     , m_promptlyFreedCount(0)
656 {
657     clearFreeLists();
658 }
659
660 template<typename Header>
661 ThreadHeap<Header>::~ThreadHeap()
662 {
663     ASSERT(!m_firstPage);
664     ASSERT(!m_firstLargeHeapObject);
665 }
666
667 template<typename Header>
668 void ThreadHeap<Header>::cleanupPages()
669 {
670     clearFreeLists();
671
672     // Add the ThreadHeap's pages to the orphanedPagePool.
673     for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
674         Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
675     m_firstPage = 0;
676
677     for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObject; largeObject = largeObject->m_next)
678         Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
679     m_firstLargeHeapObject = 0;
680 }
681
682 template<typename Header>
683 void ThreadHeap<Header>::updateRemainingAllocationSize()
684 {
685     if (m_lastRemainingAllocationSize > remainingAllocationSize()) {
686         stats().increaseObjectSpace(m_lastRemainingAllocationSize - remainingAllocationSize());
687         m_lastRemainingAllocationSize = remainingAllocationSize();
688     }
689     ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
690 }
691
692 template<typename Header>
693 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
694 {
695     size_t allocationSize = allocationSizeFromSize(size);
696     ASSERT(allocationSize > remainingAllocationSize());
697     if (allocationSize > blinkPageSize / 2)
698         return allocateLargeObject(allocationSize, gcInfo);
699
700     updateRemainingAllocationSize();
701     if (threadState()->shouldGC()) {
702         if (threadState()->shouldForceConservativeGC())
703             Heap::collectGarbage(ThreadState::HeapPointersOnStack);
704         else
705             threadState()->setGCRequested();
706     }
707     if (remainingAllocationSize() > 0) {
708         addToFreeList(currentAllocationPoint(), remainingAllocationSize());
709         setAllocationPoint(0, 0);
710     }
711     ensureCurrentAllocation(allocationSize, gcInfo);
712     return allocate(size, gcInfo);
713 }
714
715 template<typename Header>
716 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
717 {
718     size_t bucketSize = 1 << m_biggestFreeListIndex;
719     int i = m_biggestFreeListIndex;
720     for (; i > 0; i--, bucketSize >>= 1) {
721         if (bucketSize < minSize)
722             break;
723         FreeListEntry* entry = m_freeLists[i];
724         if (entry) {
725             m_biggestFreeListIndex = i;
726             entry->unlink(&m_freeLists[i]);
727             setAllocationPoint(entry->address(), entry->size());
728             ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
729             return true;
730         }
731     }
732     m_biggestFreeListIndex = i;
733     return false;
734 }
735
736 template<typename Header>
737 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
738 {
739     ASSERT(minSize >= allocationGranularity);
740     if (allocateFromFreeList(minSize))
741         return;
742     if (coalesce(minSize) && allocateFromFreeList(minSize))
743         return;
744     addPageToHeap(gcInfo);
745     bool success = allocateFromFreeList(minSize);
746     RELEASE_ASSERT(success);
747 }
748
749 template<typename Header>
750 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
751 {
752     for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
753         if (page->contains(address))
754             return page;
755     }
756     for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) {
757         if (page->contains(address))
758             return page;
759     }
760     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
761         // Check that large pages are blinkPageSize aligned (modulo the
762         // osPageSize for the guard page).
763         ASSERT(reinterpret_cast<Address>(current) - osPageSize() == roundToBlinkPageStart(reinterpret_cast<Address>(current)));
764         if (current->contains(address))
765             return current;
766     }
767     return 0;
768 }
769
770 #if ENABLE(GC_PROFILE_MARKING)
771 template<typename Header>
772 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address)
773 {
774     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
775         if (current->contains(address))
776             return current->gcInfo();
777     }
778     return 0;
779 }
780 #endif
781
782 #if ENABLE(GC_PROFILE_HEAP)
783 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0
784 template<typename Header>
785 void ThreadHeap<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
786 {
787     size_t previousPageCount = info->pageCount;
788
789     json->beginArray("pages");
790     for (HeapPage<Header>* page = m_firstPage; page; page = page->next(), ++info->pageCount) {
791         // FIXME: To limit the size of the snapshot we only output "threshold" many page snapshots.
792         if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) {
793             json->beginArray();
794             json->pushInteger(reinterpret_cast<intptr_t>(page));
795             page->snapshot(json, info);
796             json->endArray();
797         } else {
798             page->snapshot(0, info);
799         }
800     }
801     json->endArray();
802
803     json->beginArray("largeObjects");
804     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
805         json->beginDictionary();
806         current->snapshot(json, info);
807         json->endDictionary();
808     }
809     json->endArray();
810
811     json->setInteger("pageCount", info->pageCount - previousPageCount);
812 }
813 #endif
814
815 template<typename Header>
816 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
817 {
818     ASSERT(heapPageFromAddress(address));
819     ASSERT(heapPageFromAddress(address + size - 1));
820     ASSERT(size < blinkPagePayloadSize());
821     // The free list entries are only pointer aligned (but when we allocate
822     // from them we are 8 byte aligned due to the header size).
823     ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
824     ASSERT(!(size & allocationMask));
825     ASAN_POISON_MEMORY_REGION(address, size);
826     FreeListEntry* entry;
827     if (size < sizeof(*entry)) {
828         // Create a dummy header with only a size and freelist bit set.
829         ASSERT(size >= sizeof(BasicObjectHeader));
830         // Free list encode the size to mark the lost memory as freelist memory.
831         new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
832         // This memory gets lost. Sweeping can reclaim it.
833         return;
834     }
835     entry = new (NotNull, address) FreeListEntry(size);
836 #if defined(ADDRESS_SANITIZER)
837     // For ASan we don't add the entry to the free lists until the asanDeferMemoryReuseCount
838     // reaches zero. However we always add entire pages to ensure that adding a new page will
839     // increase the allocation space.
840     if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
841         return;
842 #endif
843     int index = bucketIndexForSize(size);
844     entry->link(&m_freeLists[index]);
845     if (!m_lastFreeListEntries[index])
846         m_lastFreeListEntries[index] = entry;
847     if (index > m_biggestFreeListIndex)
848         m_biggestFreeListIndex = index;
849 }
850
851 template<typename Header>
852 void ThreadHeap<Header>::promptlyFreeObject(Header* header)
853 {
854     ASSERT(!m_threadState->isSweepInProgress());
855     header->checkHeader();
856     Address address = reinterpret_cast<Address>(header);
857     Address payload = header->payload();
858     size_t size = header->size();
859     size_t payloadSize = header->payloadSize();
860     BaseHeapPage* page = pageHeaderFromObject(address);
861     ASSERT(size > 0);
862     ASSERT(page == heapPageFromAddress(address));
863
864     {
865         ThreadState::NoSweepScope scope(m_threadState);
866         HeapObjectHeader::finalize(header->gcInfo(), payload, payloadSize);
867 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
868         memset(payload, 0, payloadSize);
869 #endif
870         header->markPromptlyFreed();
871     }
872
873     page->addToPromptlyFreedSize(size);
874     m_promptlyFreedCount++;
875 }
876
877 template<typename Header>
878 bool ThreadHeap<Header>::coalesce(size_t minSize)
879 {
880     if (m_threadState->isSweepInProgress())
881         return false;
882
883     if (m_promptlyFreedCount < 256)
884         return false;
885
886     // The smallest bucket able to satisfy an allocation request for minSize is
887     // the bucket where all free-list entries are guarantied to be larger than
888     // minSize. That bucket is one larger than the bucket minSize would go into.
889     size_t neededBucketIndex = bucketIndexForSize(minSize) + 1;
890     size_t neededFreeEntrySize = 1 << neededBucketIndex;
891     size_t neededPromptlyFreedSize = neededFreeEntrySize * 3;
892     size_t foundFreeEntrySize = 0;
893
894     // Bailout early on large requests because it is unlikely we will find a free-list entry.
895     if (neededPromptlyFreedSize >= blinkPageSize)
896         return false;
897
898     TRACE_EVENT_BEGIN2("blink_gc", "ThreadHeap::coalesce" , "requestedSize", (unsigned)minSize , "neededSize", (unsigned)neededFreeEntrySize);
899
900     // Search for a coalescing candidate.
901     ASSERT(!ownsNonEmptyAllocationArea());
902     size_t pageCount = 0;
903     HeapPage<Header>* page = m_firstPage;
904     while (page) {
905         // Only consider one of the first 'n' pages. A "younger" page is more likely to have freed backings.
906         if (++pageCount > numberOfPagesToConsiderForCoalescing) {
907             page = 0;
908             break;
909         }
910         // Only coalesce pages with "sufficient" promptly freed space.
911         if (page->promptlyFreedSize() >= neededPromptlyFreedSize) {
912             break;
913         }
914         page = page->next();
915     }
916
917     // If we found a likely candidate, fully coalesce all its promptly-freed entries.
918     if (page) {
919         page->clearObjectStartBitMap();
920         page->resetPromptlyFreedSize();
921         size_t freedCount = 0;
922         Address startOfGap = page->payload();
923         for (Address headerAddress = startOfGap; headerAddress < page->end(); ) {
924             BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
925             ASSERT(basicHeader->size() > 0);
926             ASSERT(basicHeader->size() < blinkPagePayloadSize());
927
928             if (basicHeader->isPromptlyFreed()) {
929                 stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeader)->size());
930                 size_t size = basicHeader->size();
931                 ASSERT(size >= sizeof(Header));
932 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
933                 memset(headerAddress, 0, sizeof(Header));
934 #endif
935                 ++freedCount;
936                 headerAddress += size;
937                 continue;
938             }
939
940             if (startOfGap != headerAddress) {
941                 size_t size = headerAddress - startOfGap;
942                 addToFreeList(startOfGap, size);
943                 if (size > foundFreeEntrySize)
944                     foundFreeEntrySize = size;
945             }
946
947             headerAddress += basicHeader->size();
948             startOfGap = headerAddress;
949         }
950
951         if (startOfGap != page->end()) {
952             size_t size = page->end() - startOfGap;
953             addToFreeList(startOfGap, size);
954             if (size > foundFreeEntrySize)
955                 foundFreeEntrySize = size;
956         }
957
958         // Check before subtracting because freedCount might not be balanced with freed entries.
959         if (freedCount < m_promptlyFreedCount)
960             m_promptlyFreedCount -= freedCount;
961         else
962             m_promptlyFreedCount = 0;
963     }
964
965     TRACE_EVENT_END1("blink_gc", "ThreadHeap::coalesce", "foundFreeEntrySize", (unsigned)foundFreeEntrySize);
966
967     if (foundFreeEntrySize < neededFreeEntrySize) {
968         // If coalescing failed, reset the freed count to delay coalescing again.
969         m_promptlyFreedCount = 0;
970         return false;
971     }
972
973     return true;
974 }
975
976 template<typename Header>
977 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
978 {
979     // Caller already added space for object header and rounded up to allocation alignment
980     ASSERT(!(size & allocationMask));
981
982     size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
983
984     // Ensure that there is enough space for alignment. If the header
985     // is not a multiple of 8 bytes we will allocate an extra
986     // headerPadding<Header> bytes to ensure it 8 byte aligned.
987     allocationSize += headerPadding<Header>();
988
989     // If ASan is supported we add allocationGranularity bytes to the allocated space and
990     // poison that to detect overflows
991 #if defined(ADDRESS_SANITIZER)
992     allocationSize += allocationGranularity;
993 #endif
994
995     updateRemainingAllocationSize();
996     if (m_threadState->shouldGC())
997         m_threadState->setGCRequested();
998     m_threadState->shouldFlushHeapDoesNotContainCache();
999     PageMemory* pageMemory = PageMemory::allocate(allocationSize);
1000     m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region());
1001     Address largeObjectAddress = pageMemory->writableStart();
1002     Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
1003     memset(headerAddress, 0, size);
1004     Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
1005     Address result = headerAddress + sizeof(*header);
1006     ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1007     LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo, threadState());
1008
1009     // Poison the object header and allocationGranularity bytes after the object
1010     ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
1011     ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
1012     largeObject->link(&m_firstLargeHeapObject);
1013     stats().increaseAllocatedSpace(largeObject->size());
1014     stats().increaseObjectSpace(largeObject->size());
1015     return result;
1016 }
1017
1018 template<typename Header>
1019 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
1020 {
1021     object->unlink(previousNext);
1022     object->finalize();
1023
1024     // Unpoison the object header and allocationGranularity bytes after the
1025     // object before freeing.
1026     ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
1027     ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
1028
1029     if (object->terminating()) {
1030         ASSERT(ThreadState::current()->isTerminating());
1031         // The thread is shutting down so this object is being removed as part
1032         // of a thread local GC. In that case the object could be traced in the
1033         // next global GC either due to a dead object being traced via a
1034         // conservative pointer or due to a programming error where an object
1035         // in another thread heap keeps a dangling pointer to this object.
1036         // To guard against this we put the large object memory in the
1037         // orphanedPagePool to ensure it is still reachable. After the next global
1038         // GC it can be released assuming no rogue/dangling pointers refer to
1039         // it.
1040         // NOTE: large objects are not moved to the free page pool as it is
1041         // unlikely they can be reused due to their individual sizes.
1042         Heap::orphanedPagePool()->addOrphanedPage(m_index, object);
1043     } else {
1044         ASSERT(!ThreadState::current()->isTerminating());
1045         PageMemory* memory = object->storage();
1046         object->~LargeHeapObject<Header>();
1047         delete memory;
1048     }
1049 }
1050
1051 template<typename DataType>
1052 PagePool<DataType>::PagePool()
1053 {
1054     for (int i = 0; i < NumberOfHeaps; ++i) {
1055         m_pool[i] = 0;
1056     }
1057 }
1058
1059 FreePagePool::~FreePagePool()
1060 {
1061     for (int index = 0; index < NumberOfHeaps; ++index) {
1062         while (PoolEntry* entry = m_pool[index]) {
1063             m_pool[index] = entry->next;
1064             PageMemory* memory = entry->data;
1065             ASSERT(memory);
1066             delete memory;
1067             delete entry;
1068         }
1069     }
1070 }
1071
1072 void FreePagePool::addFreePage(int index, PageMemory* memory)
1073 {
1074     // When adding a page to the pool we decommit it to ensure it is unused
1075     // while in the pool. This also allows the physical memory, backing the
1076     // page, to be given back to the OS.
1077     memory->decommit();
1078     MutexLocker locker(m_mutex[index]);
1079     PoolEntry* entry = new PoolEntry(memory, m_pool[index]);
1080     m_pool[index] = entry;
1081 }
1082
1083 PageMemory* FreePagePool::takeFreePage(int index)
1084 {
1085     MutexLocker locker(m_mutex[index]);
1086     while (PoolEntry* entry = m_pool[index]) {
1087         m_pool[index] = entry->next;
1088         PageMemory* memory = entry->data;
1089         ASSERT(memory);
1090         delete entry;
1091         if (memory->commit())
1092             return memory;
1093
1094         // We got some memory, but failed to commit it, try again.
1095         delete memory;
1096     }
1097     return 0;
1098 }
1099
1100 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state)
1101     : m_storage(storage)
1102     , m_gcInfo(gcInfo)
1103     , m_threadState(state)
1104     , m_terminating(false)
1105     , m_tracedAfterOrphaned(false)
1106 {
1107     ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1108 }
1109
1110 void BaseHeapPage::markOrphaned()
1111 {
1112     m_threadState = 0;
1113     m_gcInfo = 0;
1114     m_terminating = false;
1115     m_tracedAfterOrphaned = false;
1116     // Since we zap the page payload for orphaned pages we need to mark it as
1117     // unused so a conservative pointer won't interpret the object headers.
1118     storage()->markUnused();
1119 }
1120
1121 OrphanedPagePool::~OrphanedPagePool()
1122 {
1123     for (int index = 0; index < NumberOfHeaps; ++index) {
1124         while (PoolEntry* entry = m_pool[index]) {
1125             m_pool[index] = entry->next;
1126             BaseHeapPage* page = entry->data;
1127             delete entry;
1128             PageMemory* memory = page->storage();
1129             ASSERT(memory);
1130             page->~BaseHeapPage();
1131             delete memory;
1132         }
1133     }
1134 }
1135
1136 void OrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page)
1137 {
1138     page->markOrphaned();
1139     PoolEntry* entry = new PoolEntry(page, m_pool[index]);
1140     m_pool[index] = entry;
1141 }
1142
1143 NO_SANITIZE_ADDRESS
1144 void OrphanedPagePool::decommitOrphanedPages()
1145 {
1146 #if ENABLE(ASSERT)
1147     // No locking needed as all threads are at safepoints at this point in time.
1148     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1149     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1150         ASSERT((*it)->isAtSafePoint());
1151 #endif
1152
1153     for (int index = 0; index < NumberOfHeaps; ++index) {
1154         PoolEntry* entry = m_pool[index];
1155         PoolEntry** prevNext = &m_pool[index];
1156         while (entry) {
1157             BaseHeapPage* page = entry->data;
1158             if (page->tracedAfterOrphaned()) {
1159                 // If the orphaned page was traced in the last GC it is not
1160                 // decommited. We only decommit a page, ie. put it in the
1161                 // memory pool, when the page has no objects pointing to it.
1162                 // We remark the page as orphaned to clear the tracedAfterOrphaned
1163                 // flag and any object trace bits that were set during tracing.
1164                 page->markOrphaned();
1165                 prevNext = &entry->next;
1166                 entry = entry->next;
1167                 continue;
1168             }
1169
1170             // Page was not traced. Check if we should reuse the memory or just
1171             // free it. Large object memory is not reused, but freed, normal
1172             // blink heap pages are reused.
1173             // NOTE: We call the destructor before freeing or adding to the
1174             // free page pool.
1175             PageMemory* memory = page->storage();
1176             if (page->isLargeObject()) {
1177                 page->~BaseHeapPage();
1178                 delete memory;
1179             } else {
1180                 page->~BaseHeapPage();
1181                 // Clear out the page's memory before adding it to the free page
1182                 // pool to ensure it is zero filled when being reused.
1183                 clearMemory(memory);
1184                 Heap::freePagePool()->addFreePage(index, memory);
1185             }
1186
1187             PoolEntry* deadEntry = entry;
1188             entry = entry->next;
1189             *prevNext = entry;
1190             delete deadEntry;
1191         }
1192     }
1193 }
1194
1195 NO_SANITIZE_ADDRESS
1196 void OrphanedPagePool::clearMemory(PageMemory* memory)
1197 {
1198 #if defined(ADDRESS_SANITIZER)
1199     // Don't use memset when running with ASan since this needs to zap
1200     // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation
1201     // only works for code in this method and not for calls to memset.
1202     Address base = memory->writableStart();
1203     for (Address current = base; current < base + blinkPagePayloadSize(); ++current)
1204         *current = 0;
1205 #else
1206     memset(memory->writableStart(), 0, blinkPagePayloadSize());
1207 #endif
1208 }
1209
1210 #if ENABLE(ASSERT)
1211 bool OrphanedPagePool::contains(void* object)
1212 {
1213     for (int index = 0; index < NumberOfHeaps; ++index) {
1214         for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) {
1215             BaseHeapPage* page = entry->data;
1216             if (page->contains(reinterpret_cast<Address>(object)))
1217                 return true;
1218         }
1219     }
1220     return false;
1221 }
1222 #endif
1223
1224 template<>
1225 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
1226 {
1227     // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
1228     // the heap should be unused (ie. 0).
1229     allocatePage(0);
1230 }
1231
1232 template<>
1233 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
1234 {
1235     // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
1236     // since it is the same for all objects
1237     ASSERT(gcInfo);
1238     allocatePage(gcInfo);
1239 }
1240
1241 template <typename Header>
1242 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
1243 {
1244     MutexLocker locker(m_threadState->sweepMutex());
1245     if (page->terminating()) {
1246         // The thread is shutting down so this page is being removed as part
1247         // of a thread local GC. In that case the page could be accessed in the
1248         // next global GC either due to a dead object being traced via a
1249         // conservative pointer or due to a programming error where an object
1250         // in another thread heap keeps a dangling pointer to this object.
1251         // To guard against this we put the page in the orphanedPagePool to
1252         // ensure it is still reachable. After the next global GC it can be
1253         // decommitted and moved to the page pool assuming no rogue/dangling
1254         // pointers refer to it.
1255         Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
1256     } else {
1257         PageMemory* memory = page->storage();
1258         page->~HeapPage<Header>();
1259         Heap::freePagePool()->addFreePage(m_index, memory);
1260     }
1261 }
1262
1263 template<typename Header>
1264 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
1265 {
1266     m_threadState->shouldFlushHeapDoesNotContainCache();
1267     PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index);
1268     // We continue allocating page memory until we succeed in committing one.
1269     while (!pageMemory) {
1270         // Allocate a memory region for blinkPagesPerRegion pages that
1271         // will each have the following layout.
1272         //
1273         //    [ guard os page | ... payload ... | guard os page ]
1274         //    ^---{ aligned to blink page size }
1275         PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages();
1276         m_threadState->allocatedRegionsSinceLastGC().append(region);
1277
1278         // Setup the PageMemory object for each of the pages in the
1279         // region.
1280         size_t offset = 0;
1281         for (size_t i = 0; i < blinkPagesPerRegion; i++) {
1282             PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, offset, blinkPagePayloadSize());
1283             // Take the first possible page ensuring that this thread actually
1284             // gets a page and add the rest to the page pool.
1285             if (!pageMemory) {
1286                 if (memory->commit())
1287                     pageMemory = memory;
1288                 else
1289                     delete memory;
1290             } else {
1291                 Heap::freePagePool()->addFreePage(m_index, memory);
1292             }
1293             offset += blinkPageSize;
1294         }
1295     }
1296     HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
1297     // Use a separate list for pages allocated during sweeping to make
1298     // sure that we do not accidentally sweep objects that have been
1299     // allocated during sweeping.
1300     if (m_threadState->isSweepInProgress()) {
1301         if (!m_lastPageAllocatedDuringSweeping)
1302             m_lastPageAllocatedDuringSweeping = page;
1303         page->link(&m_firstPageAllocatedDuringSweeping);
1304     } else {
1305         page->link(&m_firstPage);
1306     }
1307     ++m_numberOfNormalPages;
1308     addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
1309 }
1310
1311 #if ENABLE(ASSERT)
1312 template<typename Header>
1313 bool ThreadHeap<Header>::pagesToBeSweptContains(Address address)
1314 {
1315     for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
1316         if (page->contains(address))
1317             return true;
1318     }
1319     return false;
1320 }
1321
1322 template<typename Header>
1323 bool ThreadHeap<Header>::pagesAllocatedDuringSweepingContains(Address address)
1324 {
1325     for (HeapPage<Header>* page = m_firstPageAllocatedDuringSweeping; page; page = page->next()) {
1326         if (page->contains(address))
1327             return true;
1328     }
1329     return false;
1330 }
1331 #endif
1332
1333 template<typename Header>
1334 void ThreadHeap<Header>::getStatsForTesting(HeapStats& stats)
1335 {
1336     ASSERT(!m_firstPageAllocatedDuringSweeping);
1337     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1338         page->getStatsForTesting(stats);
1339     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
1340         current->getStatsForTesting(stats);
1341 }
1342
1343 template<typename Header>
1344 void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats)
1345 {
1346     TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages");
1347     HeapPage<Header>* page = m_firstPage;
1348     HeapPage<Header>** previousNext = &m_firstPage;
1349     HeapPage<Header>* previous = 0;
1350     while (page) {
1351         page->resetPromptlyFreedSize();
1352         if (page->isEmpty()) {
1353             HeapPage<Header>* unused = page;
1354             if (unused == m_mergePoint)
1355                 m_mergePoint = previous;
1356             page = page->next();
1357             HeapPage<Header>::unlink(this, unused, previousNext);
1358             --m_numberOfNormalPages;
1359         } else {
1360             page->sweep(stats, this);
1361             previousNext = &page->m_next;
1362             previous = page;
1363             page = page->next();
1364         }
1365     }
1366 }
1367
1368 template<typename Header>
1369 void ThreadHeap<Header>::sweepLargePages(HeapStats* stats)
1370 {
1371     TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages");
1372     LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
1373     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
1374         if (current->isMarked()) {
1375             stats->increaseAllocatedSpace(current->size());
1376             stats->increaseObjectSpace(current->size());
1377             current->unmark();
1378             previousNext = &current->m_next;
1379             current = current->next();
1380         } else {
1381             LargeHeapObject<Header>* next = current->next();
1382             freeLargeObject(current, previousNext);
1383             current = next;
1384         }
1385     }
1386 }
1387
1388
1389 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during
1390 // sweeping to catch cases where dead objects touch each other. This is not
1391 // turned on by default because it also triggers for cases that are safe.
1392 // Examples of such safe cases are context life cycle observers and timers
1393 // embedded in garbage collected objects.
1394 #define STRICT_ASAN_FINALIZATION_CHECKING 0
1395
1396 template<typename Header>
1397 void ThreadHeap<Header>::sweep(HeapStats* stats)
1398 {
1399     ASSERT(isConsistentForSweeping());
1400 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
1401     // When using ASan do a pre-sweep where all unmarked objects are
1402     // poisoned before calling their finalizer methods. This can catch
1403     // the case where the finalizer of an object tries to modify
1404     // another object as part of finalization.
1405     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1406         page->poisonUnmarkedObjects();
1407 #endif
1408     sweepNormalPages(stats);
1409     sweepLargePages(stats);
1410 }
1411
1412 template<typename Header>
1413 void ThreadHeap<Header>::postSweepProcessing()
1414 {
1415     // If pages have been allocated during sweeping, link them into
1416     // the list of pages.
1417     if (m_firstPageAllocatedDuringSweeping) {
1418         m_lastPageAllocatedDuringSweeping->m_next = m_firstPage;
1419         m_firstPage = m_firstPageAllocatedDuringSweeping;
1420         m_lastPageAllocatedDuringSweeping = 0;
1421         m_firstPageAllocatedDuringSweeping = 0;
1422     }
1423 }
1424
1425 #if ENABLE(ASSERT)
1426 template<typename Header>
1427 bool ThreadHeap<Header>::isConsistentForSweeping()
1428 {
1429     // A thread heap is consistent for sweeping if none of the pages to
1430     // be swept contain a freelist block or the current allocation
1431     // point.
1432     for (size_t i = 0; i < blinkPageSizeLog2; i++) {
1433         for (FreeListEntry* freeListEntry = m_freeLists[i]; freeListEntry; freeListEntry = freeListEntry->next()) {
1434             if (pagesToBeSweptContains(freeListEntry->address())) {
1435                 return false;
1436             }
1437             ASSERT(pagesAllocatedDuringSweepingContains(freeListEntry->address()));
1438         }
1439     }
1440     if (ownsNonEmptyAllocationArea()) {
1441         ASSERT(pagesToBeSweptContains(currentAllocationPoint())
1442             || pagesAllocatedDuringSweepingContains(currentAllocationPoint()));
1443         return !pagesToBeSweptContains(currentAllocationPoint());
1444     }
1445     return true;
1446 }
1447 #endif
1448
1449 template<typename Header>
1450 void ThreadHeap<Header>::makeConsistentForSweeping()
1451 {
1452     if (ownsNonEmptyAllocationArea())
1453         addToFreeList(currentAllocationPoint(), remainingAllocationSize());
1454     setAllocationPoint(0, 0);
1455     clearFreeLists();
1456 }
1457
1458 template<typename Header>
1459 void ThreadHeap<Header>::clearLiveAndMarkDead()
1460 {
1461     ASSERT(isConsistentForSweeping());
1462     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
1463         page->clearLiveAndMarkDead();
1464     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
1465         if (current->isMarked())
1466             current->unmark();
1467         else
1468             current->setDeadMark();
1469     }
1470 }
1471
1472 template<typename Header>
1473 void ThreadHeap<Header>::clearFreeLists()
1474 {
1475     m_promptlyFreedCount = 0;
1476     for (size_t i = 0; i < blinkPageSizeLog2; i++) {
1477         m_freeLists[i] = 0;
1478         m_lastFreeListEntries[i] = 0;
1479     }
1480 }
1481
1482 int BaseHeap::bucketIndexForSize(size_t size)
1483 {
1484     ASSERT(size > 0);
1485     int index = -1;
1486     while (size) {
1487         size >>= 1;
1488         index++;
1489     }
1490     return index;
1491 }
1492
1493 template<typename Header>
1494 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
1495     : BaseHeapPage(storage, gcInfo, heap->threadState())
1496     , m_next(0)
1497 {
1498     COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
1499     m_objectStartBitMapComputed = false;
1500     ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1501     heap->stats().increaseAllocatedSpace(blinkPageSize);
1502 }
1503
1504 template<typename Header>
1505 void HeapPage<Header>::link(HeapPage** prevNext)
1506 {
1507     m_next = *prevNext;
1508     *prevNext = this;
1509 }
1510
1511 template<typename Header>
1512 void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPage** prevNext)
1513 {
1514     *prevNext = unused->m_next;
1515     heap->removePageFromHeap(unused);
1516 }
1517
1518 template<typename Header>
1519 void HeapPage<Header>::getStatsForTesting(HeapStats& stats)
1520 {
1521     stats.increaseAllocatedSpace(blinkPageSize);
1522     Address headerAddress = payload();
1523     ASSERT(headerAddress != end());
1524     do {
1525         Header* header = reinterpret_cast<Header*>(headerAddress);
1526         if (!header->isFree()) {
1527             stats.increaseObjectSpace(header->payloadSize());
1528         }
1529         ASSERT(header->size() < blinkPagePayloadSize());
1530         headerAddress += header->size();
1531         ASSERT(headerAddress <= end());
1532     } while (headerAddress < end());
1533 }
1534
1535 template<typename Header>
1536 bool HeapPage<Header>::isEmpty()
1537 {
1538     BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
1539     return header->isFree() && (header->size() == payloadSize());
1540 }
1541
1542 template<typename Header>
1543 void HeapPage<Header>::sweep(HeapStats* stats, ThreadHeap<Header>* heap)
1544 {
1545     clearObjectStartBitMap();
1546     stats->increaseAllocatedSpace(blinkPageSize);
1547     Address startOfGap = payload();
1548     for (Address headerAddress = startOfGap; headerAddress < end(); ) {
1549         BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
1550         ASSERT(basicHeader->size() > 0);
1551         ASSERT(basicHeader->size() < blinkPagePayloadSize());
1552
1553         if (basicHeader->isFree()) {
1554             size_t size = basicHeader->size();
1555 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1556             // Zero the memory in the free list header to maintain the
1557             // invariant that memory on the free list is zero filled.
1558             // The rest of the memory is already on the free list and is
1559             // therefore already zero filled.
1560             if (size < sizeof(FreeListEntry))
1561                 memset(headerAddress, 0, size);
1562             else
1563                 memset(headerAddress, 0, sizeof(FreeListEntry));
1564 #endif
1565             headerAddress += size;
1566             continue;
1567         }
1568         // At this point we know this is a valid object of type Header
1569         Header* header = static_cast<Header*>(basicHeader);
1570
1571         if (!header->isMarked()) {
1572             // For ASan we unpoison the specific object when calling the finalizer and
1573             // poison it again when done to allow the object's own finalizer to operate
1574             // on the object, but not have other finalizers be allowed to access it.
1575             ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
1576             finalize(header);
1577             size_t size = header->size();
1578 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1579             // This memory will be added to the freelist. Maintain the invariant
1580             // that memory on the freelist is zero filled.
1581             memset(headerAddress, 0, size);
1582 #endif
1583             ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1584             headerAddress += size;
1585             continue;
1586         }
1587
1588         if (startOfGap != headerAddress)
1589             heap->addToFreeList(startOfGap, headerAddress - startOfGap);
1590         header->unmark();
1591         headerAddress += header->size();
1592         stats->increaseObjectSpace(header->size());
1593         startOfGap = headerAddress;
1594     }
1595     if (startOfGap != end())
1596         heap->addToFreeList(startOfGap, end() - startOfGap);
1597 }
1598
1599 template<typename Header>
1600 void HeapPage<Header>::clearLiveAndMarkDead()
1601 {
1602     for (Address headerAddress = payload(); headerAddress < end();) {
1603         Header* header = reinterpret_cast<Header*>(headerAddress);
1604         ASSERT(header->size() < blinkPagePayloadSize());
1605         // Check if a free list entry first since we cannot call
1606         // isMarked on a free list entry.
1607         if (header->isFree()) {
1608             headerAddress += header->size();
1609             continue;
1610         }
1611         if (header->isMarked())
1612             header->unmark();
1613         else
1614             header->setDeadMark();
1615         headerAddress += header->size();
1616     }
1617 }
1618
1619 template<typename Header>
1620 void HeapPage<Header>::populateObjectStartBitMap()
1621 {
1622     memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1623     Address start = payload();
1624     for (Address headerAddress = start; headerAddress < end();) {
1625         Header* header = reinterpret_cast<Header*>(headerAddress);
1626         size_t objectOffset = headerAddress - start;
1627         ASSERT(!(objectOffset & allocationMask));
1628         size_t objectStartNumber = objectOffset / allocationGranularity;
1629         size_t mapIndex = objectStartNumber / 8;
1630         ASSERT(mapIndex < objectStartBitMapSize);
1631         m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
1632         headerAddress += header->size();
1633         ASSERT(headerAddress <= end());
1634     }
1635     m_objectStartBitMapComputed = true;
1636 }
1637
1638 template<typename Header>
1639 void HeapPage<Header>::clearObjectStartBitMap()
1640 {
1641     m_objectStartBitMapComputed = false;
1642 }
1643
1644 static int numberOfLeadingZeroes(uint8_t byte)
1645 {
1646     if (!byte)
1647         return 8;
1648     int result = 0;
1649     if (byte <= 0x0F) {
1650         result += 4;
1651         byte = byte << 4;
1652     }
1653     if (byte <= 0x3F) {
1654         result += 2;
1655         byte = byte << 2;
1656     }
1657     if (byte <= 0x7F)
1658         result++;
1659     return result;
1660 }
1661
1662 template<typename Header>
1663 Header* HeapPage<Header>::findHeaderFromAddress(Address address)
1664 {
1665     if (address < payload())
1666         return 0;
1667     if (!isObjectStartBitMapComputed())
1668         populateObjectStartBitMap();
1669     size_t objectOffset = address - payload();
1670     size_t objectStartNumber = objectOffset / allocationGranularity;
1671     size_t mapIndex = objectStartNumber / 8;
1672     ASSERT(mapIndex < objectStartBitMapSize);
1673     size_t bit = objectStartNumber & 7;
1674     uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
1675     while (!byte) {
1676         ASSERT(mapIndex > 0);
1677         byte = m_objectStartBitMap[--mapIndex];
1678     }
1679     int leadingZeroes = numberOfLeadingZeroes(byte);
1680     objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1681     objectOffset = objectStartNumber * allocationGranularity;
1682     Address objectAddress = objectOffset + payload();
1683     Header* header = reinterpret_cast<Header*>(objectAddress);
1684     if (header->isFree())
1685         return 0;
1686     return header;
1687 }
1688
1689 template<typename Header>
1690 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
1691 {
1692     ASSERT(contains(address));
1693     Header* header = findHeaderFromAddress(address);
1694     if (!header || header->hasDeadMark())
1695         return;
1696
1697 #if ENABLE(GC_PROFILE_MARKING)
1698     visitor->setHostInfo(&address, "stack");
1699 #endif
1700     if (hasVTable(header) && !vTableInitialized(header->payload())) {
1701         visitor->markNoTracing(header);
1702         ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
1703     } else {
1704         visitor->mark(header, traceCallback(header));
1705     }
1706 }
1707
1708 #if ENABLE(GC_PROFILE_MARKING)
1709 template<typename Header>
1710 const GCInfo* HeapPage<Header>::findGCInfo(Address address)
1711 {
1712     if (address < payload())
1713         return 0;
1714
1715     if (gcInfo()) // for non FinalizedObjectHeader
1716         return gcInfo();
1717
1718     Header* header = findHeaderFromAddress(address);
1719     if (!header)
1720         return 0;
1721
1722     return header->gcInfo();
1723 }
1724 #endif
1725
1726 #if ENABLE(GC_PROFILE_HEAP)
1727 template<typename Header>
1728 void HeapPage<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
1729 {
1730     Header* header = 0;
1731     for (Address addr = payload(); addr < end(); addr += header->size()) {
1732         header = reinterpret_cast<Header*>(addr);
1733         if (json)
1734             json->pushInteger(header->encodedSize());
1735         if (header->isFree()) {
1736             info->freeSize += header->size();
1737             continue;
1738         }
1739
1740         const GCInfo* gcinfo = header->gcInfo() ? header->gcInfo() : gcInfo();
1741         size_t tag = info->getClassTag(gcinfo);
1742         size_t age = header->age();
1743         if (json)
1744             json->pushInteger(tag);
1745         if (header->isMarked()) {
1746             info->liveCount[tag] += 1;
1747             info->liveSize[tag] += header->size();
1748             // Count objects that are live when promoted to the final generation.
1749             if (age == maxHeapObjectAge - 1)
1750                 info->generations[tag][maxHeapObjectAge] += 1;
1751             header->incAge();
1752         } else {
1753             info->deadCount[tag] += 1;
1754             info->deadSize[tag] += header->size();
1755             // Count objects that are dead before the final generation.
1756             if (age < maxHeapObjectAge)
1757                 info->generations[tag][age] += 1;
1758         }
1759     }
1760 }
1761 #endif
1762
1763 #if defined(ADDRESS_SANITIZER)
1764 template<typename Header>
1765 void HeapPage<Header>::poisonUnmarkedObjects()
1766 {
1767     for (Address headerAddress = payload(); headerAddress < end(); ) {
1768         Header* header = reinterpret_cast<Header*>(headerAddress);
1769         ASSERT(header->size() < blinkPagePayloadSize());
1770
1771         if (!header->isFree() && !header->isMarked())
1772             ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1773         headerAddress += header->size();
1774     }
1775 }
1776 #endif
1777
1778 template<>
1779 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1780 {
1781     header->finalize();
1782 }
1783
1784 template<>
1785 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1786 {
1787     ASSERT(gcInfo());
1788     HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1789 }
1790
1791 template<>
1792 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1793 {
1794     ASSERT(gcInfo());
1795     return gcInfo()->m_trace;
1796 }
1797
1798 template<>
1799 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1800 {
1801     return header->traceCallback();
1802 }
1803
1804 template<>
1805 inline bool HeapPage<HeapObjectHeader>::hasVTable(HeapObjectHeader* header)
1806 {
1807     ASSERT(gcInfo());
1808     return gcInfo()->hasVTable();
1809 }
1810
1811 template<>
1812 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHeader* header)
1813 {
1814     return header->hasVTable();
1815 }
1816
1817 template<typename Header>
1818 void LargeHeapObject<Header>::getStatsForTesting(HeapStats& stats)
1819 {
1820     stats.increaseAllocatedSpace(size());
1821     stats.increaseObjectSpace(payloadSize());
1822 }
1823
1824 #if ENABLE(GC_PROFILE_HEAP)
1825 template<typename Header>
1826 void LargeHeapObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
1827 {
1828     Header* header = heapObjectHeader();
1829     size_t tag = info->getClassTag(header->gcInfo());
1830     size_t age = header->age();
1831     if (isMarked()) {
1832         info->liveCount[tag] += 1;
1833         info->liveSize[tag] += header->size();
1834         // Count objects that are live when promoted to the final generation.
1835         if (age == maxHeapObjectAge - 1)
1836             info->generations[tag][maxHeapObjectAge] += 1;
1837         header->incAge();
1838     } else {
1839         info->deadCount[tag] += 1;
1840         info->deadSize[tag] += header->size();
1841         // Count objects that are dead before the final generation.
1842         if (age < maxHeapObjectAge)
1843             info->generations[tag][age] += 1;
1844     }
1845
1846     if (json) {
1847         json->setInteger("class", tag);
1848         json->setInteger("size", header->size());
1849         json->setInteger("isMarked", isMarked());
1850     }
1851 }
1852 #endif
1853
1854 void HeapDoesNotContainCache::flush()
1855 {
1856     ASSERT(ThreadState::isAnyThreadInGC());
1857
1858     if (m_hasEntries) {
1859         for (int i = 0; i < numberOfEntries; i++)
1860             m_entries[i] = 0;
1861         m_hasEntries = false;
1862     }
1863 }
1864
1865 size_t HeapDoesNotContainCache::hash(Address address)
1866 {
1867     size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1868     value ^= value >> numberOfEntriesLog2;
1869     value ^= value >> (numberOfEntriesLog2 * 2);
1870     value &= numberOfEntries - 1;
1871     return value & ~1; // Returns only even number.
1872 }
1873
1874 bool HeapDoesNotContainCache::lookup(Address address)
1875 {
1876     ASSERT(ThreadState::isAnyThreadInGC());
1877
1878     size_t index = hash(address);
1879     ASSERT(!(index & 1));
1880     Address cachePage = roundToBlinkPageStart(address);
1881     if (m_entries[index] == cachePage)
1882         return m_entries[index];
1883     if (m_entries[index + 1] == cachePage)
1884         return m_entries[index + 1];
1885     return 0;
1886 }
1887
1888 void HeapDoesNotContainCache::addEntry(Address address)
1889 {
1890     ASSERT(ThreadState::isAnyThreadInGC());
1891
1892     m_hasEntries = true;
1893     size_t index = hash(address);
1894     ASSERT(!(index & 1));
1895     Address cachePage = roundToBlinkPageStart(address);
1896     m_entries[index + 1] = m_entries[index];
1897     m_entries[index] = cachePage;
1898 }
1899
1900 void Heap::flushHeapDoesNotContainCache()
1901 {
1902     s_heapDoesNotContainCache->flush();
1903 }
1904
1905 // The marking mutex is used to ensure sequential access to data
1906 // structures during marking. The marking mutex needs to be acquired
1907 // during marking when elements are taken from the global marking
1908 // stack or when elements are added to the global ephemeron,
1909 // post-marking, and weak processing stacks. In debug mode the mutex
1910 // also needs to be acquired when asserts use the heap contains
1911 // caches.
1912 static Mutex& markingMutex()
1913 {
1914     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
1915     return mutex;
1916 }
1917
1918 static ThreadCondition& markingCondition()
1919 {
1920     AtomicallyInitializedStatic(ThreadCondition&, condition = *new ThreadCondition);
1921     return condition;
1922 }
1923
1924 static void markNoTracingCallback(Visitor* visitor, void* object)
1925 {
1926     visitor->markNoTracing(object);
1927 }
1928
1929 class MarkingVisitor : public Visitor {
1930 public:
1931 #if ENABLE(GC_PROFILE_MARKING)
1932     typedef HashSet<uintptr_t> LiveObjectSet;
1933     typedef HashMap<String, LiveObjectSet> LiveObjectMap;
1934     typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph;
1935 #endif
1936
1937     MarkingVisitor(CallbackStack* markingStack) : m_markingStack(markingStack)
1938     {
1939     }
1940
1941     inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1942     {
1943         ASSERT(header);
1944 #if ENABLE(ASSERT)
1945         {
1946             // Check that we are not marking objects that are outside
1947             // the heap by calling Heap::contains. However we cannot
1948             // call Heap::contains when outside a GC and we call mark
1949             // when doing weakness for ephemerons. Hence we only check
1950             // when called within.
1951 #if ENABLE_PARALLEL_MARKING
1952             MutexLocker locker(markingMutex());
1953 #endif
1954             ASSERT(!ThreadState::isAnyThreadInGC() || Heap::containedInHeapOrOrphanedPage(header));
1955         }
1956 #endif
1957         ASSERT(objectPointer);
1958         if (header->isMarked())
1959             return;
1960         header->mark();
1961 #if ENABLE(GC_PROFILE_MARKING)
1962         MutexLocker locker(objectGraphMutex());
1963         String className(classOf(objectPointer));
1964         {
1965             LiveObjectMap::AddResult result = currentlyLive().add(className, LiveObjectSet());
1966             result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPointer));
1967         }
1968         ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintptr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName));
1969         ASSERT(result.isNewEntry);
1970         // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_hostObject, className.ascii().data(), objectPointer);
1971 #endif
1972         if (callback)
1973             Heap::pushTraceCallback(m_markingStack, const_cast<void*>(objectPointer), callback);
1974     }
1975
1976     virtual void mark(HeapObjectHeader* header, TraceCallback callback) override
1977     {
1978         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1979         // version to correctly find the payload.
1980         visitHeader(header, header->payload(), callback);
1981     }
1982
1983     virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) override
1984     {
1985         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1986         // version to correctly find the payload.
1987         visitHeader(header, header->payload(), callback);
1988     }
1989
1990     virtual void mark(const void* objectPointer, TraceCallback callback) override
1991     {
1992         if (!objectPointer)
1993             return;
1994         FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1995         visitHeader(header, header->payload(), callback);
1996     }
1997
1998     virtual void registerDelayedMarkNoTracing(const void* object) override
1999     {
2000         Heap::pushPostMarkingCallback(const_cast<void*>(object), markNoTracingCallback);
2001     }
2002
2003     virtual void registerWeakMembers(const void* closure, const void* containingObject, WeakPointerCallback callback) override
2004     {
2005         Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_cast<void*>(containingObject), callback);
2006     }
2007
2008     virtual void registerWeakTable(const void* closure, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
2009     {
2010         Heap::registerWeakTable(const_cast<void*>(closure), iterationCallback, iterationDoneCallback);
2011     }
2012
2013 #if ENABLE(ASSERT)
2014     virtual bool weakTableRegistered(const void* closure)
2015     {
2016         return Heap::weakTableRegistered(closure);
2017     }
2018 #endif
2019
2020     virtual bool isMarked(const void* objectPointer) override
2021     {
2022         return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
2023     }
2024
2025     // This macro defines the necessary visitor methods for typed heaps
2026 #define DEFINE_VISITOR_METHODS(Type)                                              \
2027     virtual void mark(const Type* objectPointer, TraceCallback callback) override \
2028     {                                                                             \
2029         if (!objectPointer)                                                       \
2030             return;                                                               \
2031         HeapObjectHeader* header =                                                \
2032             HeapObjectHeader::fromPayload(objectPointer);                         \
2033         visitHeader(header, header->payload(), callback);                         \
2034     }                                                                             \
2035     virtual bool isMarked(const Type* objectPointer) override                     \
2036     {                                                                             \
2037         return HeapObjectHeader::fromPayload(objectPointer)->isMarked();          \
2038     }
2039
2040     FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
2041 #undef DEFINE_VISITOR_METHODS
2042
2043 #if ENABLE(GC_PROFILE_MARKING)
2044     void reportStats()
2045     {
2046         fprintf(stderr, "\n---------- AFTER MARKING -------------------\n");
2047         for (LiveObjectMap::iterator it = currentlyLive().begin(), end = currentlyLive().end(); it != end; ++it) {
2048             fprintf(stderr, "%s %u", it->key.ascii().data(), it->value.size());
2049
2050             if (it->key == "blink::Document")
2051                 reportStillAlive(it->value, previouslyLive().get(it->key));
2052
2053             fprintf(stderr, "\n");
2054         }
2055
2056         previouslyLive().swap(currentlyLive());
2057         currentlyLive().clear();
2058
2059         for (HashSet<uintptr_t>::iterator it = objectsToFindPath().begin(), end = objectsToFindPath().end(); it != end; ++it) {
2060             dumpPathToObjectFromObjectGraph(objectGraph(), *it);
2061         }
2062     }
2063
2064     static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous)
2065     {
2066         int count = 0;
2067
2068         fprintf(stderr, " [previously %u]", previous.size());
2069         for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
2070             if (previous.find(*it) == previous.end())
2071                 continue;
2072             count++;
2073         }
2074
2075         if (!count)
2076             return;
2077
2078         fprintf(stderr, " {survived 2GCs %d: ", count);
2079         for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
2080             if (previous.find(*it) == previous.end())
2081                 continue;
2082             fprintf(stderr, "%ld", *it);
2083             if (--count)
2084                 fprintf(stderr, ", ");
2085         }
2086         ASSERT(!count);
2087         fprintf(stderr, "}");
2088     }
2089
2090     static void dumpPathToObjectFromObjectGraph(const ObjectGraph& graph, uintptr_t target)
2091     {
2092         ObjectGraph::const_iterator it = graph.find(target);
2093         if (it == graph.end())
2094             return;
2095         fprintf(stderr, "Path to %lx of %s\n", target, classOf(reinterpret_cast<const void*>(target)).ascii().data());
2096         while (it != graph.end()) {
2097             fprintf(stderr, "<- %lx of %s\n", it->value.first, it->value.second.utf8().data());
2098             it = graph.find(it->value.first);
2099         }
2100         fprintf(stderr, "\n");
2101     }
2102
2103     static void dumpPathToObjectOnNextGC(void* p)
2104     {
2105         objectsToFindPath().add(reinterpret_cast<uintptr_t>(p));
2106     }
2107
2108     static Mutex& objectGraphMutex()
2109     {
2110         AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
2111         return mutex;
2112     }
2113
2114     static LiveObjectMap& previouslyLive()
2115     {
2116         DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
2117         return map;
2118     }
2119
2120     static LiveObjectMap& currentlyLive()
2121     {
2122         DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
2123         return map;
2124     }
2125
2126     static ObjectGraph& objectGraph()
2127     {
2128         DEFINE_STATIC_LOCAL(ObjectGraph, graph, ());
2129         return graph;
2130     }
2131
2132     static HashSet<uintptr_t>& objectsToFindPath()
2133     {
2134         DEFINE_STATIC_LOCAL(HashSet<uintptr_t>, set, ());
2135         return set;
2136     }
2137 #endif
2138
2139 protected:
2140     virtual void registerWeakCell(void** cell, WeakPointerCallback callback) override
2141     {
2142         Heap::pushWeakCellPointerCallback(cell, callback);
2143     }
2144
2145 private:
2146     CallbackStack* m_markingStack;
2147 };
2148
2149 void Heap::init()
2150 {
2151     ThreadState::init();
2152     s_markingStack = new CallbackStack();
2153     s_postMarkingCallbackStack = new CallbackStack();
2154     s_weakCallbackStack = new CallbackStack();
2155     s_ephemeronStack = new CallbackStack();
2156     s_heapDoesNotContainCache = new HeapDoesNotContainCache();
2157     s_markingVisitor = new MarkingVisitor(s_markingStack);
2158     s_freePagePool = new FreePagePool();
2159     s_orphanedPagePool = new OrphanedPagePool();
2160     s_markingThreads = new Vector<OwnPtr<WebThread>>();
2161     if (Platform::current()) {
2162         int processors = Platform::current()->numberOfProcessors();
2163         int numberOfMarkingThreads = std::min(processors, maxNumberOfMarkingThreads);
2164         for (int i = 0; i < numberOfMarkingThreads; i++)
2165             s_markingThreads->append(adoptPtr(Platform::current()->createThread("Blink GC Marking Thread")));
2166     }
2167 }
2168
2169 void Heap::shutdown()
2170 {
2171     s_shutdownCalled = true;
2172     ThreadState::shutdownHeapIfNecessary();
2173 }
2174
2175 void Heap::doShutdown()
2176 {
2177     // We don't want to call doShutdown() twice.
2178     if (!s_markingVisitor)
2179         return;
2180
2181     ASSERT(!ThreadState::isAnyThreadInGC());
2182     ASSERT(!ThreadState::attachedThreads().size());
2183     delete s_markingThreads;
2184     s_markingThreads = 0;
2185     delete s_markingVisitor;
2186     s_markingVisitor = 0;
2187     delete s_heapDoesNotContainCache;
2188     s_heapDoesNotContainCache = 0;
2189     delete s_freePagePool;
2190     s_freePagePool = 0;
2191     delete s_orphanedPagePool;
2192     s_orphanedPagePool = 0;
2193     delete s_weakCallbackStack;
2194     s_weakCallbackStack = 0;
2195     delete s_postMarkingCallbackStack;
2196     s_postMarkingCallbackStack = 0;
2197     delete s_markingStack;
2198     s_markingStack = 0;
2199     delete s_ephemeronStack;
2200     s_ephemeronStack = 0;
2201     delete s_regionTree;
2202     s_regionTree = 0;
2203     ThreadState::shutdown();
2204 }
2205
2206 BaseHeapPage* Heap::contains(Address address)
2207 {
2208     ASSERT(ThreadState::isAnyThreadInGC());
2209     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2210     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2211         BaseHeapPage* page = (*it)->contains(address);
2212         if (page)
2213             return page;
2214     }
2215     return 0;
2216 }
2217
2218 #if ENABLE(ASSERT)
2219 bool Heap::containedInHeapOrOrphanedPage(void* object)
2220 {
2221     return contains(object) || orphanedPagePool()->contains(object);
2222 }
2223 #endif
2224
2225 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
2226 {
2227     ASSERT(ThreadState::isAnyThreadInGC());
2228
2229 #if !ENABLE(ASSERT)
2230     if (s_heapDoesNotContainCache->lookup(address))
2231         return 0;
2232 #endif
2233
2234     if (BaseHeapPage* page = lookup(address)) {
2235         ASSERT(page->contains(address));
2236         ASSERT(!s_heapDoesNotContainCache->lookup(address));
2237         page->checkAndMarkPointer(visitor, address);
2238         // FIXME: We only need to set the conservative flag if checkAndMarkPointer actually marked the pointer.
2239         s_lastGCWasConservative = true;
2240         return address;
2241     }
2242
2243 #if !ENABLE(ASSERT)
2244     s_heapDoesNotContainCache->addEntry(address);
2245 #else
2246     if (!s_heapDoesNotContainCache->lookup(address))
2247         s_heapDoesNotContainCache->addEntry(address);
2248 #endif
2249     return 0;
2250 }
2251
2252 #if ENABLE(GC_PROFILE_MARKING)
2253 const GCInfo* Heap::findGCInfo(Address address)
2254 {
2255     return ThreadState::findGCInfoFromAllThreads(address);
2256 }
2257 #endif
2258
2259 #if ENABLE(GC_PROFILE_MARKING)
2260 void Heap::dumpPathToObjectOnNextGC(void* p)
2261 {
2262     static_cast<MarkingVisitor*>(s_markingVisitor)->dumpPathToObjectOnNextGC(p);
2263 }
2264
2265 String Heap::createBacktraceString()
2266 {
2267     int framesToShow = 3;
2268     int stackFrameSize = 16;
2269     ASSERT(stackFrameSize >= framesToShow);
2270     typedef void* FramePointer;
2271     FramePointer* stackFrame = static_cast<FramePointer*>(alloca(sizeof(FramePointer) * stackFrameSize));
2272     WTFGetBacktrace(stackFrame, &stackFrameSize);
2273
2274     StringBuilder builder;
2275     builder.append("Persistent");
2276     bool didAppendFirstName = false;
2277     // Skip frames before/including "blink::Persistent".
2278     bool didSeePersistent = false;
2279     for (int i = 0; i < stackFrameSize && framesToShow > 0; ++i) {
2280         FrameToNameScope frameToName(stackFrame[i]);
2281         if (!frameToName.nullableName())
2282             continue;
2283         if (strstr(frameToName.nullableName(), "blink::Persistent")) {
2284             didSeePersistent = true;
2285             continue;
2286         }
2287         if (!didSeePersistent)
2288             continue;
2289         if (!didAppendFirstName) {
2290             didAppendFirstName = true;
2291             builder.append(" ... Backtrace:");
2292         }
2293         builder.append("\n\t");
2294         builder.append(frameToName.nullableName());
2295         --framesToShow;
2296     }
2297     return builder.toString().replace("blink::", "");
2298 }
2299 #endif
2300
2301 void Heap::pushTraceCallback(CallbackStack* stack, void* object, TraceCallback callback)
2302 {
2303 #if ENABLE(ASSERT)
2304     {
2305 #if ENABLE_PARALLEL_MARKING
2306         MutexLocker locker(markingMutex());
2307 #endif
2308         ASSERT(Heap::containedInHeapOrOrphanedPage(object));
2309     }
2310 #endif
2311     CallbackStack::Item* slot = stack->allocateEntry();
2312     *slot = CallbackStack::Item(object, callback);
2313 }
2314
2315 template<CallbackInvocationMode Mode>
2316 bool Heap::popAndInvokeTraceCallback(CallbackStack* stack, Visitor* visitor)
2317 {
2318     CallbackStack::Item* item = stack->pop();
2319     if (!item)
2320         return false;
2321     // If the object being traced is located on a page which is dead don't
2322     // trace it. This can happen when a conservative GC kept a dead object
2323     // alive which pointed to a (now gone) object on the cleaned up page.
2324     // Also, if doing a thread local GC, don't trace objects that are located
2325     // on other thread's heaps, ie, pages where the terminating flag is not set.
2326     BaseHeapPage* heapPage = pageHeaderFromObject(item->object());
2327     if (Mode == GlobalMarking && heapPage->orphaned()) {
2328         // When doing a global GC we should only get a trace callback to an orphaned
2329         // page if the GC is conservative. If it is not conservative there is
2330         // a bug in the code where we have a dangling pointer to a page
2331         // on the dead thread.
2332         RELEASE_ASSERT(Heap::lastGCWasConservative());
2333         heapPage->setTracedAfterOrphaned();
2334         return true;
2335     }
2336     if (Mode == ThreadLocalMarking && (heapPage->orphaned() || !heapPage->terminating()))
2337         return true;
2338
2339 #if ENABLE(GC_PROFILE_MARKING)
2340     visitor->setHostInfo(item->object(), classOf(item->object()));
2341 #endif
2342     item->call(visitor);
2343     return true;
2344 }
2345
2346 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback)
2347 {
2348 #if ENABLE_PARALLEL_MARKING
2349     MutexLocker locker(markingMutex());
2350 #endif
2351     ASSERT(!Heap::orphanedPagePool()->contains(object));
2352     CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry();
2353     *slot = CallbackStack::Item(object, callback);
2354 }
2355
2356 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor)
2357 {
2358     if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) {
2359         item->call(visitor);
2360         return true;
2361     }
2362     return false;
2363 }
2364
2365 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback)
2366 {
2367 #if ENABLE_PARALLEL_MARKING
2368     MutexLocker locker(markingMutex());
2369 #endif
2370     ASSERT(!Heap::orphanedPagePool()->contains(cell));
2371     CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry();
2372     *slot = CallbackStack::Item(cell, callback);
2373 }
2374
2375 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointerCallback callback)
2376 {
2377 #if ENABLE_PARALLEL_MARKING
2378     MutexLocker locker(markingMutex());
2379 #endif
2380     ASSERT(Heap::contains(object));
2381     BaseHeapPage* heapPageForObject = pageHeaderFromObject(object);
2382     ASSERT(!heapPageForObject->orphaned());
2383     ASSERT(Heap::contains(object) == heapPageForObject);
2384     ThreadState* state = heapPageForObject->threadState();
2385     state->pushWeakObjectPointerCallback(closure, callback);
2386 }
2387
2388 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
2389 {
2390     // For weak processing we should never reach orphaned pages since orphaned
2391     // pages are not traced and thus objects on those pages are never be
2392     // registered as objects on orphaned pages. We cannot assert this here since
2393     // we might have an off-heap collection. We assert it in
2394     // Heap::pushWeakObjectPointerCallback.
2395     if (CallbackStack::Item* item = s_weakCallbackStack->pop()) {
2396         item->call(visitor);
2397         return true;
2398     }
2399     return false;
2400 }
2401
2402 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
2403 {
2404     {
2405 #if ENABLE_PARALLEL_MARKING
2406         MutexLocker locker(markingMutex());
2407 #endif
2408         // Check that the ephemeron table being pushed onto the stack is not on an
2409         // orphaned page.
2410         ASSERT(!Heap::orphanedPagePool()->contains(table));
2411         CallbackStack::Item* slot = s_ephemeronStack->allocateEntry();
2412         *slot = CallbackStack::Item(table, iterationCallback);
2413     }
2414
2415     // Register a post-marking callback to tell the tables that
2416     // ephemeron iteration is complete.
2417     pushPostMarkingCallback(table, iterationDoneCallback);
2418 }
2419
2420 #if ENABLE(ASSERT)
2421 bool Heap::weakTableRegistered(const void* table)
2422 {
2423 #if ENABLE_PARALLEL_MARKING
2424     MutexLocker locker(markingMutex());
2425 #endif
2426     ASSERT(s_ephemeronStack);
2427     return s_ephemeronStack->hasCallbackForObject(table);
2428 }
2429 #endif
2430
2431 void Heap::prepareForGC()
2432 {
2433     ASSERT(ThreadState::isAnyThreadInGC());
2434     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2435     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
2436         (*it)->prepareForGC();
2437 }
2438
2439 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::CauseOfGC cause)
2440 {
2441     ThreadState* state = ThreadState::current();
2442     state->clearGCRequested();
2443
2444     GCScope gcScope(stackState);
2445     // Check if we successfully parked the other threads. If not we bail out of the GC.
2446     if (!gcScope.allThreadsParked()) {
2447         ThreadState::current()->setGCRequested();
2448         return;
2449     }
2450
2451     if (state->isMainThread())
2452         ScriptForbiddenScope::enter();
2453
2454     s_lastGCWasConservative = false;
2455
2456     TRACE_EVENT2("blink_gc", "Heap::collectGarbage",
2457         "precise", stackState == ThreadState::NoHeapPointersOnStack,
2458         "forced", cause == ThreadState::ForcedGC);
2459     TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC");
2460     double timeStamp = WTF::currentTimeMS();
2461 #if ENABLE(GC_PROFILE_MARKING)
2462     static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
2463 #endif
2464
2465     // Disallow allocation during garbage collection (but not
2466     // during the finalization that happens when the gcScope is
2467     // torn down).
2468     NoAllocationScope<AnyThread> noAllocationScope;
2469
2470     prepareForGC();
2471
2472     // 1. trace persistent roots.
2473     ThreadState::visitPersistentRoots(s_markingVisitor);
2474
2475     // 2. trace objects reachable from the persistent roots including ephemerons.
2476 #if ENABLE_PARALLEL_MARKING
2477     processMarkingStackInParallel();
2478 #else
2479     processMarkingStack<GlobalMarking>();
2480 #endif
2481
2482     // 3. trace objects reachable from the stack. We do this independent of the
2483     // given stackState since other threads might have a different stack state.
2484     ThreadState::visitStackRoots(s_markingVisitor);
2485
2486     // 4. trace objects reachable from the stack "roots" including ephemerons.
2487     // Only do the processing if we found a pointer to an object on one of the
2488     // thread stacks.
2489     if (lastGCWasConservative()) {
2490 #if ENABLE_PARALLEL_MARKING
2491         processMarkingStackInParallel();
2492 #else
2493         processMarkingStack<GlobalMarking>();
2494 #endif
2495     }
2496
2497     postMarkingProcessing();
2498     globalWeakProcessing();
2499
2500     // After a global marking we know that any orphaned page that was not reached
2501     // cannot be reached in a subsequent GC. This is due to a thread either having
2502     // swept its heap or having done a "poor mans sweep" in prepareForGC which marks
2503     // objects that are dead, but not swept in the previous GC as dead. In this GC's
2504     // marking we check that any object marked as dead is not traced. E.g. via a
2505     // conservatively found pointer or a programming error with an object containing
2506     // a dangling pointer.
2507     orphanedPagePool()->decommitOrphanedPages();
2508
2509 #if ENABLE(GC_PROFILE_MARKING)
2510     static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
2511 #endif
2512
2513     if (Platform::current()) {
2514         uint64_t objectSpaceSize;
2515         uint64_t allocatedSpaceSize;
2516         getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
2517         Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
2518         Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2519         Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2520     }
2521
2522     if (state->isMainThread())
2523         ScriptForbiddenScope::exit();
2524 }
2525
2526 void Heap::collectGarbageForTerminatingThread(ThreadState* state)
2527 {
2528     // We explicitly do not enter a safepoint while doing thread specific
2529     // garbage collection since we don't want to allow a global GC at the
2530     // same time as a thread local GC.
2531
2532     {
2533         NoAllocationScope<AnyThread> noAllocationScope;
2534
2535         state->enterGC();
2536         state->prepareForGC();
2537
2538         // 1. trace the thread local persistent roots. For thread local GCs we
2539         // don't trace the stack (ie. no conservative scanning) since this is
2540         // only called during thread shutdown where there should be no objects
2541         // on the stack.
2542         // We also assume that orphaned pages have no objects reachable from
2543         // persistent handles on other threads or CrossThreadPersistents. The
2544         // only cases where this could happen is if a subsequent conservative
2545         // global GC finds a "pointer" on the stack or due to a programming
2546         // error where an object has a dangling cross-thread pointer to an
2547         // object on this heap.
2548         state->visitPersistents(s_markingVisitor);
2549
2550         // 2. trace objects reachable from the thread's persistent roots
2551         // including ephemerons.
2552         processMarkingStack<ThreadLocalMarking>();
2553
2554         postMarkingProcessing();
2555         globalWeakProcessing();
2556
2557         state->leaveGC();
2558     }
2559     state->performPendingSweep();
2560 }
2561
2562 void Heap::processMarkingStackEntries(int* runningMarkingThreads)
2563 {
2564     TRACE_EVENT0("blink_gc", "Heap::processMarkingStackEntries");
2565     CallbackStack stack;
2566     MarkingVisitor visitor(&stack);
2567     {
2568         MutexLocker locker(markingMutex());
2569         stack.takeBlockFrom(s_markingStack);
2570     }
2571     while (!stack.isEmpty()) {
2572         while (popAndInvokeTraceCallback<GlobalMarking>(&stack, &visitor)) { }
2573         {
2574             MutexLocker locker(markingMutex());
2575             stack.takeBlockFrom(s_markingStack);
2576         }
2577     }
2578     {
2579         MutexLocker locker(markingMutex());
2580         if (!--(*runningMarkingThreads))
2581             markingCondition().signal();
2582     }
2583 }
2584
2585 void Heap::processMarkingStackOnMultipleThreads()
2586 {
2587     int runningMarkingThreads = s_markingThreads->size() + 1;
2588
2589     for (size_t i = 0; i < s_markingThreads->size(); ++i)
2590         s_markingThreads->at(i)->postTask(new Task(WTF::bind(Heap::processMarkingStackEntries, &runningMarkingThreads)));
2591
2592     processMarkingStackEntries(&runningMarkingThreads);
2593
2594     // Wait for the other threads to finish their part of marking.
2595     MutexLocker locker(markingMutex());
2596     while (runningMarkingThreads)
2597         markingCondition().wait(markingMutex());
2598 }
2599
2600 void Heap::processMarkingStackInParallel()
2601 {
2602     static const size_t sizeOfStackForParallelMarking = 2 * CallbackStack::blockSize;
2603     // Ephemeron fixed point loop run on the garbage collecting thread.
2604     do {
2605         // Iteratively mark all objects that are reachable from the objects
2606         // currently pushed onto the marking stack. Do so in parallel if there
2607         // are multiple blocks on the global marking stack.
2608         if (s_markingStack->sizeExceeds(sizeOfStackForParallelMarking)) {
2609             processMarkingStackOnMultipleThreads();
2610         } else {
2611             TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded");
2612             while (popAndInvokeTraceCallback<GlobalMarking>(s_markingStack, s_markingVisitor)) { }
2613         }
2614
2615         {
2616             // Mark any strong pointers that have now become reachable in ephemeron
2617             // maps.
2618             TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack");
2619             s_ephemeronStack->invokeEphemeronCallbacks(s_markingVisitor);
2620         }
2621
2622         // Rerun loop if ephemeron processing queued more objects for tracing.
2623     } while (!s_markingStack->isEmpty());
2624 }
2625
2626 template<CallbackInvocationMode Mode>
2627 void Heap::processMarkingStack()
2628 {
2629     // Ephemeron fixed point loop.
2630     do {
2631         {
2632             // Iteratively mark all objects that are reachable from the objects
2633             // currently pushed onto the marking stack. If Mode is ThreadLocalMarking
2634             // don't continue tracing if the trace hits an object on another thread's
2635             // heap.
2636             TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded");
2637             while (popAndInvokeTraceCallback<Mode>(s_markingStack, s_markingVisitor)) { }
2638         }
2639
2640         {
2641             // Mark any strong pointers that have now become reachable in ephemeron
2642             // maps.
2643             TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack");
2644             s_ephemeronStack->invokeEphemeronCallbacks(s_markingVisitor);
2645         }
2646
2647         // Rerun loop if ephemeron processing queued more objects for tracing.
2648     } while (!s_markingStack->isEmpty());
2649 }
2650
2651 void Heap::postMarkingProcessing()
2652 {
2653     TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing");
2654     // Call post-marking callbacks including:
2655     // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup
2656     //    (specifically to clear the queued bits for weak hash tables), and
2657     // 2. the markNoTracing callbacks on collection backings to mark them
2658     //    if they are only reachable from their front objects.
2659     while (popAndInvokePostMarkingCallback(s_markingVisitor)) { }
2660
2661     s_ephemeronStack->clear();
2662
2663     // Post-marking callbacks should not trace any objects and
2664     // therefore the marking stack should be empty after the
2665     // post-marking callbacks.
2666     ASSERT(s_markingStack->isEmpty());
2667 }
2668
2669 void Heap::globalWeakProcessing()
2670 {
2671     TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing");
2672     // Call weak callbacks on objects that may now be pointing to dead
2673     // objects.
2674     while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
2675
2676     // It is not permitted to trace pointers of live objects in the weak
2677     // callback phase, so the marking stack should still be empty here.
2678     ASSERT(s_markingStack->isEmpty());
2679 }
2680
2681 void Heap::collectAllGarbage()
2682 {
2683     // FIXME: oilpan: we should perform a single GC and everything
2684     // should die. Unfortunately it is not the case for all objects
2685     // because the hierarchy was not completely moved to the heap and
2686     // some heap allocated objects own objects that contain persistents
2687     // pointing to other heap allocated objects.
2688     for (int i = 0; i < 5; i++)
2689         collectGarbage(ThreadState::NoHeapPointersOnStack, ThreadState::ForcedGC);
2690 }
2691
2692 void Heap::setForcePreciseGCForTesting()
2693 {
2694     ThreadState::current()->setForcePreciseGCForTesting(true);
2695 }
2696
2697 template<typename Header>
2698 void ThreadHeap<Header>::prepareHeapForTermination()
2699 {
2700     for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
2701         page->setTerminating();
2702     }
2703     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
2704         current->setTerminating();
2705     }
2706 }
2707
2708 template<typename Header>
2709 PassOwnPtr<BaseHeap> ThreadHeap<Header>::split(int numberOfNormalPages)
2710 {
2711     // Create a new split off thread heap containing
2712     // |numberOfNormalPages| of the pages of this ThreadHeap for
2713     // parallel sweeping. The split off thread heap will be merged
2714     // with this heap at the end of sweeping and the temporary
2715     // ThreadHeap object will be deallocated after the merge.
2716     ASSERT(numberOfNormalPages > 0);
2717     OwnPtr<ThreadHeap<Header>> splitOff = adoptPtr(new ThreadHeap(m_threadState, m_index));
2718     HeapPage<Header>* splitPoint = m_firstPage;
2719     for (int i = 1; i < numberOfNormalPages; i++)
2720         splitPoint = splitPoint->next();
2721     splitOff->m_firstPage = m_firstPage;
2722     m_firstPage = splitPoint->m_next;
2723     splitOff->m_mergePoint = splitPoint;
2724     splitOff->m_numberOfNormalPages = numberOfNormalPages;
2725     m_numberOfNormalPages -= numberOfNormalPages;
2726     splitPoint->m_next = 0;
2727     return splitOff.release();
2728 }
2729
2730 template<typename Header>
2731 void ThreadHeap<Header>::merge(PassOwnPtr<BaseHeap> splitOffBase)
2732 {
2733     ThreadHeap<Header>* splitOff = static_cast<ThreadHeap<Header>*>(splitOffBase.get());
2734     // If the mergePoint is zero all split off pages became empty in
2735     // this round and we don't have to merge. There are no pages and
2736     // nothing on the freelists.
2737     ASSERT(splitOff->m_mergePoint || splitOff->m_numberOfNormalPages == 0);
2738     if (splitOff->m_mergePoint) {
2739         // Link the split off pages into the beginning of the list again.
2740         splitOff->m_mergePoint->m_next = m_firstPage;
2741         m_firstPage = splitOff->m_firstPage;
2742         m_numberOfNormalPages += splitOff->m_numberOfNormalPages;
2743         splitOff->m_firstPage = 0;
2744         // Merge free lists.
2745         for (size_t i = 0; i < blinkPageSizeLog2; i++) {
2746             if (!m_freeLists[i]) {
2747                 m_freeLists[i] = splitOff->m_freeLists[i];
2748             } else if (splitOff->m_freeLists[i]) {
2749                 m_lastFreeListEntries[i]->append(splitOff->m_freeLists[i]);
2750                 m_lastFreeListEntries[i] = splitOff->m_lastFreeListEntries[i];
2751             }
2752         }
2753     }
2754 }
2755
2756 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceSize)
2757 {
2758     *objectSpaceSize = 0;
2759     *allocatedSpaceSize = 0;
2760     ASSERT(ThreadState::isAnyThreadInGC());
2761     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2762     typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2763     for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2764         *objectSpaceSize += (*it)->stats().totalObjectSpace();
2765         *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace();
2766     }
2767 }
2768
2769 void Heap::getStats(HeapStats* stats)
2770 {
2771     stats->clear();
2772     ASSERT(ThreadState::isAnyThreadInGC());
2773     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2774     typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2775     for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2776         HeapStats temp;
2777         (*it)->getStats(temp);
2778         stats->add(&temp);
2779     }
2780 }
2781
2782 void Heap::getStatsForTesting(HeapStats* stats)
2783 {
2784     stats->clear();
2785     ASSERT(ThreadState::isAnyThreadInGC());
2786     makeConsistentForSweeping();
2787     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2788     typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
2789     for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2790         HeapStats temp;
2791         (*it)->getStatsForTesting(temp);
2792         stats->add(&temp);
2793     }
2794 }
2795
2796 #if ENABLE(ASSERT)
2797 bool Heap::isConsistentForSweeping()
2798 {
2799     ASSERT(ThreadState::isAnyThreadInGC());
2800     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2801     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
2802         if (!(*it)->isConsistentForSweeping())
2803             return false;
2804     }
2805     return true;
2806 }
2807 #endif
2808
2809 void Heap::makeConsistentForSweeping()
2810 {
2811     ASSERT(ThreadState::isAnyThreadInGC());
2812     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
2813     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
2814         (*it)->makeConsistentForSweeping();
2815 }
2816
2817 void HeapAllocator::backingFree(void* address)
2818 {
2819     if (!address || ThreadState::isAnyThreadInGC())
2820         return;
2821
2822     ThreadState* state = ThreadState::current();
2823     if (state->isSweepInProgress())
2824         return;
2825
2826     // Don't promptly free large objects because their page is never reused
2827     // and don't free backings allocated on other threads.
2828     BaseHeapPage* page = pageHeaderFromObject(address);
2829     if (page->isLargeObject() || page->threadState() != state)
2830         return;
2831
2832     typedef HeapIndexTrait<CollectionBackingHeap> HeapTraits;
2833     typedef HeapTraits::HeapType HeapType;
2834     typedef HeapTraits::HeaderType HeaderType;
2835
2836     HeaderType* header = HeaderType::fromPayload(address);
2837     header->checkHeader();
2838
2839     const GCInfo* gcInfo = header->gcInfo();
2840     int heapIndex = HeapTraits::index(gcInfo->hasFinalizer(), header->payloadSize());
2841     HeapType* heap = static_cast<HeapType*>(state->heap(heapIndex));
2842     heap->promptlyFreeObject(header);
2843 }
2844
2845 BaseHeapPage* Heap::lookup(Address address)
2846 {
2847     ASSERT(ThreadState::isAnyThreadInGC());
2848     if (!s_regionTree)
2849         return 0;
2850     if (PageMemoryRegion* region = s_regionTree->lookup(address))
2851         return region->pageFromAddress(address);
2852     return 0;
2853 }
2854
2855 static Mutex& regionTreeMutex()
2856 {
2857     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
2858     return mutex;
2859 }
2860
2861 void Heap::removePageMemoryRegion(PageMemoryRegion* region)
2862 {
2863     // Deletion of large objects (and thus their regions) can happen concurrently
2864     // on sweeper threads. Removal can also happen during thread shutdown, but
2865     // that case is safe. Regardless, we make all removals mutually exclusive.
2866     MutexLocker locker(regionTreeMutex());
2867     RegionTree::remove(region, &s_regionTree);
2868 }
2869
2870 void Heap::addPageMemoryRegion(PageMemoryRegion* region)
2871 {
2872     ASSERT(ThreadState::isAnyThreadInGC());
2873     RegionTree::add(new RegionTree(region), &s_regionTree);
2874 }
2875
2876 PageMemoryRegion* Heap::RegionTree::lookup(Address address)
2877 {
2878     RegionTree* current = s_regionTree;
2879     while (current) {
2880         Address base = current->m_region->base();
2881         if (address < base) {
2882             current = current->m_left;
2883             continue;
2884         }
2885         if (address >= base + current->m_region->size()) {
2886             current = current->m_right;
2887             continue;
2888         }
2889         ASSERT(current->m_region->contains(address));
2890         return current->m_region;
2891     }
2892     return 0;
2893 }
2894
2895 void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context)
2896 {
2897     ASSERT(newTree);
2898     Address base = newTree->m_region->base();
2899     for (RegionTree* current = *context; current; current = *context) {
2900         ASSERT(!current->m_region->contains(base));
2901         context = (base < current->m_region->base()) ? &current->m_left : &current->m_right;
2902     }
2903     *context = newTree;
2904 }
2905
2906 void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context)
2907 {
2908     ASSERT(region);
2909     ASSERT(context);
2910     Address base = region->base();
2911     RegionTree* current = *context;
2912     for ( ; current; current = *context) {
2913         if (region == current->m_region)
2914             break;
2915         context = (base < current->m_region->base()) ? &current->m_left : &current->m_right;
2916     }
2917
2918     // Shutdown via detachMainThread might not have populated the region tree.
2919     if (!current)
2920         return;
2921
2922     *context = 0;
2923     if (current->m_left) {
2924         add(current->m_left, context);
2925         current->m_left = 0;
2926     }
2927     if (current->m_right) {
2928         add(current->m_right, context);
2929         current->m_right = 0;
2930     }
2931     delete current;
2932 }
2933
2934 // Force template instantiations for the types that we need.
2935 template class HeapPage<FinalizedHeapObjectHeader>;
2936 template class HeapPage<HeapObjectHeader>;
2937 template class ThreadHeap<FinalizedHeapObjectHeader>;
2938 template class ThreadHeap<HeapObjectHeader>;
2939
2940 Visitor* Heap::s_markingVisitor;
2941 Vector<OwnPtr<WebThread>>* Heap::s_markingThreads;
2942 CallbackStack* Heap::s_markingStack;
2943 CallbackStack* Heap::s_postMarkingCallbackStack;
2944 CallbackStack* Heap::s_weakCallbackStack;
2945 CallbackStack* Heap::s_ephemeronStack;
2946 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
2947 bool Heap::s_shutdownCalled = false;
2948 bool Heap::s_lastGCWasConservative = false;
2949 FreePagePool* Heap::s_freePagePool;
2950 OrphanedPagePool* Heap::s_orphanedPagePool;
2951 Heap::RegionTree* Heap::s_regionTree = 0;
2952
2953 } // namespace blink