Upstream version 7.36.149.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / platform / heap / Heap.cpp
1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *
8  *     * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  *     * Neither the name of Google Inc. nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include "config.h"
32 #include "platform/heap/Heap.h"
33
34 #include "platform/TraceEvent.h"
35 #include "platform/heap/ThreadState.h"
36 #include "wtf/Assertions.h"
37 #include "wtf/LeakAnnotations.h"
38 #include "wtf/PassOwnPtr.h"
39 #if ENABLE(GC_TRACING)
40 #include "wtf/HashMap.h"
41 #include "wtf/HashSet.h"
42 #include "wtf/text/StringBuilder.h"
43 #include "wtf/text/StringHash.h"
44 #include <stdio.h>
45 #include <utility>
46 #endif
47
48 #if OS(POSIX)
49 #include <sys/mman.h>
50 #include <unistd.h>
51 #elif OS(WIN)
52 #include <windows.h>
53 #endif
54
55 namespace WebCore {
56
57 #if ENABLE(GC_TRACING)
58 static String classOf(const void* object)
59 {
60     const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_cast<void*>(object)));
61     if (gcInfo)
62         return gcInfo->m_className;
63
64     return "unknown";
65 }
66 #endif
67
68 static bool vTableInitialized(void* objectPointer)
69 {
70     return !!(*reinterpret_cast<Address*>(objectPointer));
71 }
72
73 #if OS(WIN)
74 static bool IsPowerOf2(size_t power)
75 {
76     return !((power - 1) & power);
77 }
78 #endif
79
80 static Address roundToBlinkPageBoundary(void* base)
81 {
82     return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
83 }
84
85 static size_t roundToOsPageSize(size_t size)
86 {
87     return (size + osPageSize() - 1) & ~(osPageSize() - 1);
88 }
89
90 size_t osPageSize()
91 {
92 #if OS(POSIX)
93     static const size_t pageSize = getpagesize();
94 #else
95     static size_t pageSize = 0;
96     if (!pageSize) {
97         SYSTEM_INFO info;
98         GetSystemInfo(&info);
99         pageSize = info.dwPageSize;
100         ASSERT(IsPowerOf2(pageSize));
101     }
102 #endif
103     return pageSize;
104 }
105
106 class MemoryRegion {
107 public:
108     MemoryRegion(Address base, size_t size) : m_base(base), m_size(size) { ASSERT(size > 0); }
109
110     bool contains(Address addr) const
111     {
112         return m_base <= addr && addr < (m_base + m_size);
113     }
114
115
116     bool contains(const MemoryRegion& other) const
117     {
118         return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
119     }
120
121     void release()
122     {
123 #if OS(POSIX)
124         int err = munmap(m_base, m_size);
125         RELEASE_ASSERT(!err);
126 #else
127         bool success = VirtualFree(m_base, 0, MEM_RELEASE);
128         RELEASE_ASSERT(success);
129 #endif
130     }
131
132     WARN_UNUSED_RETURN bool commit()
133     {
134         ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
135 #if OS(POSIX)
136         int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
137         if (!err) {
138             madvise(m_base, m_size, MADV_NORMAL);
139             return true;
140         }
141         return false;
142 #else
143         void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
144         return !!result;
145 #endif
146     }
147
148     void decommit()
149     {
150 #if OS(POSIX)
151         int err = mprotect(m_base, m_size, PROT_NONE);
152         RELEASE_ASSERT(!err);
153         // FIXME: Consider using MADV_FREE on MacOS.
154         madvise(m_base, m_size, MADV_DONTNEED);
155 #else
156         bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
157         RELEASE_ASSERT(success);
158 #endif
159     }
160
161     Address base() const { return m_base; }
162
163 private:
164     Address m_base;
165     size_t m_size;
166 };
167
168 // Representation of the memory used for a Blink heap page.
169 //
170 // The representation keeps track of two memory regions:
171 //
172 // 1. The virtual memory reserved from the sytem in order to be able
173 //    to free all the virtual memory reserved on destruction.
174 //
175 // 2. The writable memory (a sub-region of the reserved virtual
176 //    memory region) that is used for the actual heap page payload.
177 //
178 // Guard pages are created before and after the writable memory.
179 class PageMemory {
180 public:
181     ~PageMemory() { m_reserved.release(); }
182
183     bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
184     void decommit() { m_writable.decommit(); }
185
186     Address writableStart() { return m_writable.base(); }
187
188     // Allocate a virtual address space for the blink page with the
189     // following layout:
190     //
191     //    [ guard os page | ... payload ... | guard os page ]
192     //    ^---{ aligned to blink page size }
193     //
194     static PageMemory* allocate(size_t payloadSize)
195     {
196         ASSERT(payloadSize > 0);
197
198         // Virtual memory allocation routines operate in OS page sizes.
199         // Round up the requested size to nearest os page size.
200         payloadSize = roundToOsPageSize(payloadSize);
201
202         // Overallocate by blinkPageSize and 2 times OS page size to
203         // ensure a chunk of memory which is blinkPageSize aligned and
204         // has a system page before and after to use for guarding. We
205         // unmap the excess memory before returning.
206         size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize;
207
208         ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
209 #if OS(POSIX)
210         Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
211         RELEASE_ASSERT(base != MAP_FAILED);
212
213         Address end = base + allocationSize;
214         Address alignedBase = roundToBlinkPageBoundary(base);
215         Address payloadBase = alignedBase + osPageSize();
216         Address payloadEnd = payloadBase + payloadSize;
217         Address blinkPageEnd = payloadEnd + osPageSize();
218
219         // If the allocate memory was not blink page aligned release
220         // the memory before the aligned address.
221         if (alignedBase != base)
222             MemoryRegion(base, alignedBase - base).release();
223
224         // Create guard pages by decommiting an OS page before and
225         // after the payload.
226         MemoryRegion(alignedBase, osPageSize()).decommit();
227         MemoryRegion(payloadEnd, osPageSize()).decommit();
228
229         // Free the additional memory at the end of the page if any.
230         if (blinkPageEnd < end)
231             MemoryRegion(blinkPageEnd, end - blinkPageEnd).release();
232
233         return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize));
234 #else
235         Address base = 0;
236         Address alignedBase = 0;
237
238         // On Windows it is impossible to partially release a region
239         // of memory allocated by VirtualAlloc. To avoid wasting
240         // virtual address space we attempt to release a large region
241         // of memory returned as a whole and then allocate an aligned
242         // region inside this larger region.
243         for (int attempt = 0; attempt < 3; attempt++) {
244             base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
245             RELEASE_ASSERT(base);
246             VirtualFree(base, 0, MEM_RELEASE);
247
248             alignedBase = roundToBlinkPageBoundary(base);
249             base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS));
250             if (base) {
251                 RELEASE_ASSERT(base == alignedBase);
252                 allocationSize = payloadSize + 2 * osPageSize();
253                 break;
254             }
255         }
256
257         if (!base) {
258             // We failed to avoid wasting virtual address space after
259             // several attempts.
260             base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
261             RELEASE_ASSERT(base);
262
263             // FIXME: If base is by accident blink page size aligned
264             // here then we can create two pages out of reserved
265             // space. Do this.
266             alignedBase = roundToBlinkPageBoundary(base);
267         }
268
269         Address payloadBase = alignedBase + osPageSize();
270         PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize));
271         bool res = storage->commit();
272         RELEASE_ASSERT(res);
273         return storage;
274 #endif
275     }
276
277 private:
278     PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable)
279         : m_reserved(reserved)
280         , m_writable(writable)
281     {
282         // This annotation is for letting the LeakSanitizer ignore PageMemory objects.
283         //
284         // - The LeakSanitizer runs before the shutdown sequence and reports unreachable memory blocks.
285         // - The LeakSanitizer only recognizes memory blocks allocated through malloc/new,
286         //   and we need special handling for mapped regions.
287         // - The PageMemory object is only referenced by a HeapPage<Header> object, which is
288         //   located inside the mapped region, which is not released until the shutdown sequence.
289         //
290         // Given the above, we need to explicitly annotate that the LeakSanitizer should ignore
291         // PageMemory objects.
292         WTF_ANNOTATE_LEAKING_OBJECT_PTR(this);
293
294         ASSERT(reserved.contains(writable));
295     }
296
297     MemoryRegion m_reserved;
298     MemoryRegion m_writable;
299 };
300
301 class GCScope {
302 public:
303     explicit GCScope(ThreadState::StackState stackState)
304         : m_state(ThreadState::current())
305         , m_safePointScope(stackState)
306         , m_parkedAllThreads(false)
307     {
308         TRACE_EVENT0("Blink", "Heap::GCScope");
309         const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
310         if (m_state->isMainThread())
311             TRACE_EVENT_SET_SAMPLING_STATE("Blink", "BlinkGCWaiting");
312
313         m_state->checkThread();
314
315         // FIXME: in an unlikely coincidence that two threads decide
316         // to collect garbage at the same time, avoid doing two GCs in
317         // a row.
318         RELEASE_ASSERT(!m_state->isInGC());
319         RELEASE_ASSERT(!m_state->isSweepInProgress());
320         if (LIKELY(ThreadState::stopThreads())) {
321             m_parkedAllThreads = true;
322             m_state->enterGC();
323         }
324         if (m_state->isMainThread())
325             TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
326     }
327
328     bool allThreadsParked() { return m_parkedAllThreads; }
329
330     ~GCScope()
331     {
332         // Only cleanup if we parked all threads in which case the GC happened
333         // and we need to resume the other threads.
334         if (LIKELY(m_parkedAllThreads)) {
335             m_state->leaveGC();
336             ASSERT(!m_state->isInGC());
337             ThreadState::resumeThreads();
338         }
339     }
340
341 private:
342     ThreadState* m_state;
343     ThreadState::SafePointScope m_safePointScope;
344     bool m_parkedAllThreads; // False if we fail to park all threads
345 };
346
347 NO_SANITIZE_ADDRESS
348 bool HeapObjectHeader::isMarked() const
349 {
350     checkHeader();
351     return m_size & markBitMask;
352 }
353
354 NO_SANITIZE_ADDRESS
355 void HeapObjectHeader::unmark()
356 {
357     checkHeader();
358     m_size &= ~markBitMask;
359 }
360
361 NO_SANITIZE_ADDRESS
362 bool HeapObjectHeader::hasDebugMark() const
363 {
364     checkHeader();
365     return m_size & debugBitMask;
366 }
367
368 NO_SANITIZE_ADDRESS
369 void HeapObjectHeader::clearDebugMark()
370 {
371     checkHeader();
372     m_size &= ~debugBitMask;
373 }
374
375 NO_SANITIZE_ADDRESS
376 void HeapObjectHeader::setDebugMark()
377 {
378     checkHeader();
379     m_size |= debugBitMask;
380 }
381
382 #ifndef NDEBUG
383 NO_SANITIZE_ADDRESS
384 void HeapObjectHeader::zapMagic()
385 {
386     m_magic = zappedMagic;
387 }
388 #endif
389
390 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
391 {
392     Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
393     HeapObjectHeader* header =
394         reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
395     return header;
396 }
397
398 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
399 {
400     ASSERT(gcInfo);
401     if (gcInfo->hasFinalizer()) {
402         gcInfo->m_finalize(object);
403     }
404 #ifndef NDEBUG
405     for (size_t i = 0; i < objectSize; i++)
406         object[i] = finalizedZapValue;
407 #endif
408     // Zap the primary vTable entry (secondary vTable entries are not zapped)
409     *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
410 }
411
412 NO_SANITIZE_ADDRESS
413 void FinalizedHeapObjectHeader::finalize()
414 {
415     HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
416 }
417
418 template<typename Header>
419 void LargeHeapObject<Header>::unmark()
420 {
421     return heapObjectHeader()->unmark();
422 }
423
424 template<typename Header>
425 bool LargeHeapObject<Header>::isMarked()
426 {
427     return heapObjectHeader()->isMarked();
428 }
429
430 template<typename Header>
431 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
432 {
433     ASSERT(contains(address));
434     if (!objectContains(address))
435         return;
436 #if ENABLE(GC_TRACING)
437     visitor->setHostInfo(&address, "stack");
438 #endif
439     mark(visitor);
440 }
441
442 template<>
443 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
444 {
445     if (heapObjectHeader()->hasVTable() && !vTableInitialized(payload()))
446         visitor->markConservatively(heapObjectHeader());
447     else
448         visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
449 }
450
451 template<>
452 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
453 {
454     ASSERT(gcInfo());
455     if (gcInfo()->hasVTable() && !vTableInitialized(payload()))
456         visitor->markConservatively(heapObjectHeader());
457     else
458         visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
459 }
460
461 template<>
462 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
463 {
464     heapObjectHeader()->finalize();
465 }
466
467 template<>
468 void LargeHeapObject<HeapObjectHeader>::finalize()
469 {
470     ASSERT(gcInfo());
471     HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
472 }
473
474 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
475 {
476     Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
477     FinalizedHeapObjectHeader* header =
478         reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
479     return header;
480 }
481
482 template<typename Header>
483 ThreadHeap<Header>::ThreadHeap(ThreadState* state)
484     : m_currentAllocationPoint(0)
485     , m_remainingAllocationSize(0)
486     , m_firstPage(0)
487     , m_firstLargeHeapObject(0)
488     , m_biggestFreeListIndex(0)
489     , m_threadState(state)
490     , m_pagePool(0)
491 {
492     clearFreeLists();
493 }
494
495 template<typename Header>
496 ThreadHeap<Header>::~ThreadHeap()
497 {
498     clearFreeLists();
499     if (!ThreadState::current()->isMainThread())
500         assertEmpty();
501     deletePages();
502 }
503
504 template<typename Header>
505 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
506 {
507     size_t allocationSize = allocationSizeFromSize(size);
508     if (threadState()->shouldGC()) {
509         if (threadState()->shouldForceConservativeGC())
510             Heap::collectGarbage(ThreadState::HeapPointersOnStack);
511         else
512             threadState()->setGCRequested();
513     }
514     ensureCurrentAllocation(allocationSize, gcInfo);
515     return allocate(size, gcInfo);
516 }
517
518 template<typename Header>
519 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
520 {
521     size_t bucketSize = 1 << m_biggestFreeListIndex;
522     int i = m_biggestFreeListIndex;
523     for (; i > 0; i--, bucketSize >>= 1) {
524         if (bucketSize < minSize)
525             break;
526         FreeListEntry* entry = m_freeLists[i];
527         if (entry) {
528             m_biggestFreeListIndex = i;
529             entry->unlink(&m_freeLists[i]);
530             setAllocationPoint(entry->address(), entry->size());
531             ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
532             return true;
533         }
534     }
535     m_biggestFreeListIndex = i;
536     return false;
537 }
538
539 template<typename Header>
540 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
541 {
542     ASSERT(minSize >= allocationGranularity);
543     if (remainingAllocationSize() >= minSize)
544         return;
545
546     if (remainingAllocationSize() > 0)
547         addToFreeList(currentAllocationPoint(), remainingAllocationSize());
548     if (allocateFromFreeList(minSize))
549         return;
550     addPageToHeap(gcInfo);
551     bool success = allocateFromFreeList(minSize);
552     RELEASE_ASSERT(success);
553 }
554
555 template<typename Header>
556 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
557 {
558     for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
559         if (page->contains(address))
560             return page;
561     }
562     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
563         // Check that large pages are blinkPageSize aligned (modulo the
564         // osPageSize for the guard page).
565         ASSERT(reinterpret_cast<Address>(current) - osPageSize() == roundToBlinkPageStart(reinterpret_cast<Address>(current)));
566         if (current->contains(address))
567             return current;
568     }
569     return 0;
570 }
571
572 #if ENABLE(GC_TRACING)
573 template<typename Header>
574 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeHeapObject(Address address)
575 {
576     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
577         if (current->contains(address))
578             return current->gcInfo();
579     }
580     return 0;
581 }
582 #endif
583
584 template<typename Header>
585 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
586 {
587     ASSERT(heapPageFromAddress(address));
588     ASSERT(heapPageFromAddress(address + size - 1));
589     ASSERT(size < blinkPagePayloadSize());
590     // The free list entries are only pointer aligned (but when we allocate
591     // from them we are 8 byte aligned due to the header size).
592     ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
593     ASSERT(!(size & allocationMask));
594     ASAN_POISON_MEMORY_REGION(address, size);
595     FreeListEntry* entry;
596     if (size < sizeof(*entry)) {
597         // Create a dummy header with only a size and freelist bit set.
598         ASSERT(size >= sizeof(BasicObjectHeader));
599         // Free list encode the size to mark the lost memory as freelist memory.
600         new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
601         // This memory gets lost. Sweeping can reclaim it.
602         return;
603     }
604     entry = new (NotNull, address) FreeListEntry(size);
605 #if defined(ADDRESS_SANITIZER)
606     // For ASAN we don't add the entry to the free lists until the asanDeferMemoryReuseCount
607     // reaches zero. However we always add entire pages to ensure that adding a new page will
608     // increase the allocation space.
609     if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
610         return;
611 #endif
612     int index = bucketIndexForSize(size);
613     entry->link(&m_freeLists[index]);
614     if (index > m_biggestFreeListIndex)
615         m_biggestFreeListIndex = index;
616 }
617
618 template<typename Header>
619 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
620 {
621     // Caller already added space for object header and rounded up to allocation alignment
622     ASSERT(!(size & allocationMask));
623
624     size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
625
626     // Ensure that there is enough space for alignment. If the header
627     // is not a multiple of 8 bytes we will allocate an extra
628     // headerPadding<Header> bytes to ensure it 8 byte aligned.
629     allocationSize += headerPadding<Header>();
630
631     // If ASAN is supported we add allocationGranularity bytes to the allocated space and
632     // poison that to detect overflows
633 #if defined(ADDRESS_SANITIZER)
634     allocationSize += allocationGranularity;
635 #endif
636     if (threadState()->shouldGC())
637         threadState()->setGCRequested();
638     Heap::flushHeapDoesNotContainCache();
639     PageMemory* pageMemory = PageMemory::allocate(allocationSize);
640     Address largeObjectAddress = pageMemory->writableStart();
641     Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
642     memset(headerAddress, 0, size);
643     Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
644     Address result = headerAddress + sizeof(*header);
645     ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
646     LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo, threadState());
647
648     // Poison the object header and allocationGranularity bytes after the object
649     ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
650     ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
651     largeObject->link(&m_firstLargeHeapObject);
652     stats().increaseAllocatedSpace(largeObject->size());
653     stats().increaseObjectSpace(largeObject->payloadSize());
654     return result;
655 }
656
657 template<typename Header>
658 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
659 {
660     flushHeapContainsCache();
661     object->unlink(previousNext);
662     object->finalize();
663
664     // Unpoison the object header and allocationGranularity bytes after the
665     // object before freeing.
666     ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
667     ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
668     delete object->storage();
669 }
670
671 template<>
672 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
673 {
674     // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
675     // the heap should be unused (ie. 0).
676     allocatePage(0);
677 }
678
679 template<>
680 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
681 {
682     // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
683     // since it is the same for all objects
684     ASSERT(gcInfo);
685     allocatePage(gcInfo);
686 }
687
688 template<typename Header>
689 void ThreadHeap<Header>::clearPagePool()
690 {
691     while (takePageFromPool()) { }
692 }
693
694 template<typename Header>
695 PageMemory* ThreadHeap<Header>::takePageFromPool()
696 {
697     Heap::flushHeapDoesNotContainCache();
698     while (PagePoolEntry* entry = m_pagePool) {
699         m_pagePool = entry->next();
700         PageMemory* storage = entry->storage();
701         delete entry;
702
703         if (storage->commit())
704             return storage;
705
706         // Failed to commit pooled storage. Release it.
707         delete storage;
708     }
709
710     return 0;
711 }
712
713 template<typename Header>
714 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused)
715 {
716     flushHeapContainsCache();
717     PageMemory* storage = unused->storage();
718     PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool);
719     m_pagePool = entry;
720     storage->decommit();
721 }
722
723 template<typename Header>
724 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
725 {
726     Heap::flushHeapDoesNotContainCache();
727     PageMemory* pageMemory = takePageFromPool();
728     if (!pageMemory) {
729         pageMemory = PageMemory::allocate(blinkPagePayloadSize());
730         RELEASE_ASSERT(pageMemory);
731     }
732     HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
733     // FIXME: Oilpan: Linking new pages into the front of the list is
734     // crucial when performing allocations during finalization because
735     // it ensures that those pages are not swept in the current GC
736     // round. We should create a separate page list for that to
737     // separate out the pages allocated during finalization clearly
738     // from the pages currently being swept.
739     page->link(&m_firstPage);
740     addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
741 }
742
743 #ifndef NDEBUG
744 template<typename Header>
745 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
746 {
747     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
748         page->getStats(scannedStats);
749     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
750         current->getStats(scannedStats);
751 }
752 #endif
753
754 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during
755 // sweeping to catch cases where dead objects touch eachother. This is not
756 // turned on by default because it also triggers for cases that are safe.
757 // Examples of such safe cases are context life cycle observers and timers
758 // embedded in garbage collected objects.
759 #define STRICT_ASAN_FINALIZATION_CHECKING 0
760
761 template<typename Header>
762 void ThreadHeap<Header>::sweep()
763 {
764     ASSERT(isConsistentForGC());
765 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
766     // When using ASAN do a pre-sweep where all unmarked objects are poisoned before
767     // calling their finalizer methods. This can catch the cases where one objects
768     // finalizer tries to modify another object as part of finalization.
769     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
770         page->poisonUnmarkedObjects();
771 #endif
772     HeapPage<Header>* page = m_firstPage;
773     HeapPage<Header>** previous = &m_firstPage;
774     bool pagesRemoved = false;
775     while (page) {
776         if (page->isEmpty()) {
777             flushHeapContainsCache();
778             HeapPage<Header>* unused = page;
779             page = page->next();
780             HeapPage<Header>::unlink(unused, previous);
781             pagesRemoved = true;
782         } else {
783             page->sweep();
784             previous = &page->m_next;
785             page = page->next();
786         }
787     }
788     if (pagesRemoved)
789         flushHeapContainsCache();
790
791     LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
792     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
793         if (current->isMarked()) {
794             stats().increaseAllocatedSpace(current->size());
795             stats().increaseObjectSpace(current->payloadSize());
796             current->unmark();
797             previousNext = &current->m_next;
798             current = current->next();
799         } else {
800             LargeHeapObject<Header>* next = current->next();
801             freeLargeObject(current, previousNext);
802             current = next;
803         }
804     }
805 }
806
807 template<typename Header>
808 void ThreadHeap<Header>::assertEmpty()
809 {
810     // No allocations are permitted. The thread is exiting.
811     NoAllocationScope<AnyThread> noAllocation;
812     makeConsistentForGC();
813     for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
814         Address end = page->end();
815         Address headerAddress;
816         for (headerAddress = page->payload(); headerAddress < end; ) {
817             BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
818             ASSERT(basicHeader->size() < blinkPagePayloadSize());
819             // Live object is potentially a dangling pointer from some root.
820             // Treat it as critical bug both in release and debug mode.
821             RELEASE_ASSERT(basicHeader->isFree());
822             headerAddress += basicHeader->size();
823         }
824         ASSERT(headerAddress == end);
825         addToFreeList(page->payload(), end - page->payload());
826     }
827
828     RELEASE_ASSERT(!m_firstLargeHeapObject);
829 }
830
831 template<typename Header>
832 bool ThreadHeap<Header>::isConsistentForGC()
833 {
834     for (size_t i = 0; i < blinkPageSizeLog2; i++) {
835         if (m_freeLists[i])
836             return false;
837     }
838     return !ownsNonEmptyAllocationArea();
839 }
840
841 template<typename Header>
842 void ThreadHeap<Header>::makeConsistentForGC()
843 {
844     if (ownsNonEmptyAllocationArea())
845         addToFreeList(currentAllocationPoint(), remainingAllocationSize());
846     setAllocationPoint(0, 0);
847     clearFreeLists();
848 }
849
850 template<typename Header>
851 void ThreadHeap<Header>::clearMarks()
852 {
853     ASSERT(isConsistentForGC());
854     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
855         page->clearMarks();
856     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
857         current->unmark();
858 }
859
860 template<typename Header>
861 void ThreadHeap<Header>::deletePages()
862 {
863     flushHeapContainsCache();
864     // Add all pages in the pool to the heap's list of pages before deleting
865     clearPagePool();
866
867     for (HeapPage<Header>* page = m_firstPage; page; ) {
868         HeapPage<Header>* dead = page;
869         page = page->next();
870         PageMemory* storage = dead->storage();
871         dead->~HeapPage();
872         delete storage;
873     }
874     m_firstPage = 0;
875
876     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
877         LargeHeapObject<Header>* dead = current;
878         current = current->next();
879         PageMemory* storage = dead->storage();
880         dead->~LargeHeapObject();
881         delete storage;
882     }
883     m_firstLargeHeapObject = 0;
884 }
885
886 template<typename Header>
887 void ThreadHeap<Header>::clearFreeLists()
888 {
889     for (size_t i = 0; i < blinkPageSizeLog2; i++)
890         m_freeLists[i] = 0;
891 }
892
893 int BaseHeap::bucketIndexForSize(size_t size)
894 {
895     ASSERT(size > 0);
896     int index = -1;
897     while (size) {
898         size >>= 1;
899         index++;
900     }
901     return index;
902 }
903
904 template<typename Header>
905 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
906     : BaseHeapPage(storage, gcInfo, heap->threadState())
907     , m_next(0)
908     , m_heap(heap)
909 {
910     COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
911     m_objectStartBitMapComputed = false;
912     ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
913     heap->stats().increaseAllocatedSpace(blinkPageSize);
914 }
915
916 template<typename Header>
917 void HeapPage<Header>::link(HeapPage** prevNext)
918 {
919     m_next = *prevNext;
920     *prevNext = this;
921 }
922
923 template<typename Header>
924 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
925 {
926     *prevNext = unused->m_next;
927     unused->heap()->addPageToPool(unused);
928 }
929
930 template<typename Header>
931 void HeapPage<Header>::getStats(HeapStats& stats)
932 {
933     stats.increaseAllocatedSpace(blinkPageSize);
934     Address headerAddress = payload();
935     ASSERT(headerAddress != end());
936     do {
937         Header* header = reinterpret_cast<Header*>(headerAddress);
938         if (!header->isFree())
939             stats.increaseObjectSpace(header->payloadSize());
940         ASSERT(header->size() < blinkPagePayloadSize());
941         headerAddress += header->size();
942         ASSERT(headerAddress <= end());
943     } while (headerAddress < end());
944 }
945
946 template<typename Header>
947 bool HeapPage<Header>::isEmpty()
948 {
949     BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
950     return header->isFree() && (header->size() == payloadSize());
951 }
952
953 template<typename Header>
954 void HeapPage<Header>::sweep()
955 {
956     clearObjectStartBitMap();
957     heap()->stats().increaseAllocatedSpace(blinkPageSize);
958     Address startOfGap = payload();
959     for (Address headerAddress = startOfGap; headerAddress < end(); ) {
960         BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
961         ASSERT(basicHeader->size() < blinkPagePayloadSize());
962
963         if (basicHeader->isFree()) {
964             headerAddress += basicHeader->size();
965             continue;
966         }
967         // At this point we know this is a valid object of type Header
968         Header* header = static_cast<Header*>(basicHeader);
969
970         if (!header->isMarked()) {
971             // For ASAN we unpoison the specific object when calling the finalizer and
972             // poison it again when done to allow the object's own finalizer to operate
973             // on the object, but not have other finalizers be allowed to access it.
974             ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
975             finalize(header);
976             ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
977             headerAddress += header->size();
978             continue;
979         }
980
981         if (startOfGap != headerAddress)
982             heap()->addToFreeList(startOfGap, headerAddress - startOfGap);
983         header->unmark();
984         headerAddress += header->size();
985         heap()->stats().increaseObjectSpace(header->payloadSize());
986         startOfGap = headerAddress;
987     }
988     if (startOfGap != end())
989         heap()->addToFreeList(startOfGap, end() - startOfGap);
990 }
991
992 template<typename Header>
993 void HeapPage<Header>::clearMarks()
994 {
995     for (Address headerAddress = payload(); headerAddress < end();) {
996         Header* header = reinterpret_cast<Header*>(headerAddress);
997         ASSERT(header->size() < blinkPagePayloadSize());
998         if (!header->isFree())
999             header->unmark();
1000         headerAddress += header->size();
1001     }
1002 }
1003
1004 template<typename Header>
1005 void HeapPage<Header>::populateObjectStartBitMap()
1006 {
1007     memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1008     Address start = payload();
1009     for (Address headerAddress = start; headerAddress < end();) {
1010         Header* header = reinterpret_cast<Header*>(headerAddress);
1011         size_t objectOffset = headerAddress - start;
1012         ASSERT(!(objectOffset & allocationMask));
1013         size_t objectStartNumber = objectOffset / allocationGranularity;
1014         size_t mapIndex = objectStartNumber / 8;
1015         ASSERT(mapIndex < objectStartBitMapSize);
1016         m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
1017         headerAddress += header->size();
1018         ASSERT(headerAddress <= end());
1019     }
1020     m_objectStartBitMapComputed = true;
1021 }
1022
1023 template<typename Header>
1024 void HeapPage<Header>::clearObjectStartBitMap()
1025 {
1026     m_objectStartBitMapComputed = false;
1027 }
1028
1029 static int numberOfLeadingZeroes(uint8_t byte)
1030 {
1031     if (!byte)
1032         return 8;
1033     int result = 0;
1034     if (byte <= 0x0F) {
1035         result += 4;
1036         byte = byte << 4;
1037     }
1038     if (byte <= 0x3F) {
1039         result += 2;
1040         byte = byte << 2;
1041     }
1042     if (byte <= 0x7F)
1043         result++;
1044     return result;
1045 }
1046
1047 template<typename Header>
1048 Header* HeapPage<Header>::findHeaderFromAddress(Address address)
1049 {
1050     if (address < payload())
1051         return 0;
1052     if (!isObjectStartBitMapComputed())
1053         populateObjectStartBitMap();
1054     size_t objectOffset = address - payload();
1055     size_t objectStartNumber = objectOffset / allocationGranularity;
1056     size_t mapIndex = objectStartNumber / 8;
1057     ASSERT(mapIndex < objectStartBitMapSize);
1058     size_t bit = objectStartNumber & 7;
1059     uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
1060     while (!byte) {
1061         ASSERT(mapIndex > 0);
1062         byte = m_objectStartBitMap[--mapIndex];
1063     }
1064     int leadingZeroes = numberOfLeadingZeroes(byte);
1065     objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1066     objectOffset = objectStartNumber * allocationGranularity;
1067     Address objectAddress = objectOffset + payload();
1068     Header* header = reinterpret_cast<Header*>(objectAddress);
1069     if (header->isFree())
1070         return 0;
1071     return header;
1072 }
1073
1074 template<typename Header>
1075 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
1076 {
1077     ASSERT(contains(address));
1078     Header* header = findHeaderFromAddress(address);
1079     if (!header)
1080         return;
1081
1082 #if ENABLE(GC_TRACING)
1083     visitor->setHostInfo(&address, "stack");
1084 #endif
1085     if (hasVTable(header) && !vTableInitialized(header->payload()))
1086         visitor->markConservatively(header);
1087     else
1088         visitor->mark(header, traceCallback(header));
1089 }
1090
1091 #if ENABLE(GC_TRACING)
1092 template<typename Header>
1093 const GCInfo* HeapPage<Header>::findGCInfo(Address address)
1094 {
1095     if (address < payload())
1096         return 0;
1097
1098     if (gcInfo()) // for non FinalizedObjectHeader
1099         return gcInfo();
1100
1101     Header* header = findHeaderFromAddress(address);
1102     if (!header)
1103         return 0;
1104
1105     return header->gcInfo();
1106 }
1107 #endif
1108
1109 #if defined(ADDRESS_SANITIZER)
1110 template<typename Header>
1111 void HeapPage<Header>::poisonUnmarkedObjects()
1112 {
1113     for (Address headerAddress = payload(); headerAddress < end(); ) {
1114         Header* header = reinterpret_cast<Header*>(headerAddress);
1115         ASSERT(header->size() < blinkPagePayloadSize());
1116
1117         if (!header->isFree() && !header->isMarked())
1118             ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1119         headerAddress += header->size();
1120     }
1121 }
1122 #endif
1123
1124 template<>
1125 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1126 {
1127     header->finalize();
1128 }
1129
1130 template<>
1131 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1132 {
1133     ASSERT(gcInfo());
1134     HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1135 }
1136
1137 template<>
1138 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1139 {
1140     ASSERT(gcInfo());
1141     return gcInfo()->m_trace;
1142 }
1143
1144 template<>
1145 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1146 {
1147     return header->traceCallback();
1148 }
1149
1150 template<>
1151 inline bool HeapPage<HeapObjectHeader>::hasVTable(HeapObjectHeader* header)
1152 {
1153     ASSERT(gcInfo());
1154     return gcInfo()->hasVTable();
1155 }
1156
1157 template<>
1158 inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHeader* header)
1159 {
1160     return header->hasVTable();
1161 }
1162
1163 template<typename Header>
1164 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1165 {
1166     stats.increaseAllocatedSpace(size());
1167     stats.increaseObjectSpace(payloadSize());
1168 }
1169
1170 template<typename Entry>
1171 void HeapExtentCache<Entry>::flush()
1172 {
1173     if (m_hasEntries) {
1174         for (int i = 0; i < numberOfEntries; i++)
1175             m_entries[i] = Entry();
1176         m_hasEntries = false;
1177     }
1178 }
1179
1180 template<typename Entry>
1181 size_t HeapExtentCache<Entry>::hash(Address address)
1182 {
1183     size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1184     value ^= value >> numberOfEntriesLog2;
1185     value ^= value >> (numberOfEntriesLog2 * 2);
1186     value &= numberOfEntries - 1;
1187     return value & ~1; // Returns only even number.
1188 }
1189
1190 template<typename Entry>
1191 typename Entry::LookupResult HeapExtentCache<Entry>::lookup(Address address)
1192 {
1193     size_t index = hash(address);
1194     ASSERT(!(index & 1));
1195     Address cachePage = roundToBlinkPageStart(address);
1196     if (m_entries[index].address() == cachePage)
1197         return m_entries[index].result();
1198     if (m_entries[index + 1].address() == cachePage)
1199         return m_entries[index + 1].result();
1200     return 0;
1201 }
1202
1203 template<typename Entry>
1204 void HeapExtentCache<Entry>::addEntry(Address address, typename Entry::LookupResult entry)
1205 {
1206     m_hasEntries = true;
1207     size_t index = hash(address);
1208     ASSERT(!(index & 1));
1209     Address cachePage = roundToBlinkPageStart(address);
1210     m_entries[index + 1] = m_entries[index];
1211     m_entries[index] = Entry(cachePage, entry);
1212 }
1213
1214 // These should not be needed, but it seems impossible to persuade clang to
1215 // instantiate the template functions and export them from a shared library, so
1216 // we add these in the non-templated subclass, which does not have that issue.
1217 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1218 {
1219     HeapExtentCache<PositiveEntry>::addEntry(address, page);
1220 }
1221
1222 BaseHeapPage* HeapContainsCache::lookup(Address address)
1223 {
1224     return HeapExtentCache<PositiveEntry>::lookup(address);
1225 }
1226
1227 void Heap::flushHeapDoesNotContainCache()
1228 {
1229     s_heapDoesNotContainCache->flush();
1230 }
1231
1232 void CallbackStack::init(CallbackStack** first)
1233 {
1234     // The stacks are chained, so we start by setting this to null as terminator.
1235     *first = 0;
1236     *first = new CallbackStack(first);
1237 }
1238
1239 void CallbackStack::shutdown(CallbackStack** first)
1240 {
1241     CallbackStack* next;
1242     for (CallbackStack* current = *first; current; current = next) {
1243         next = current->m_next;
1244         delete current;
1245     }
1246     *first = 0;
1247 }
1248
1249 CallbackStack::~CallbackStack()
1250 {
1251 #ifndef NDEBUG
1252     clearUnused();
1253 #endif
1254 }
1255
1256 void CallbackStack::clearUnused()
1257 {
1258     ASSERT(m_current == &(m_buffer[0]));
1259     for (size_t i = 0; i < bufferSize; i++)
1260         m_buffer[i] = Item(0, 0);
1261 }
1262
1263 void CallbackStack::assertIsEmpty()
1264 {
1265     ASSERT(m_current == &(m_buffer[0]));
1266     ASSERT(!m_next);
1267 }
1268
1269 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor)
1270 {
1271     if (m_current == &(m_buffer[0])) {
1272         if (!m_next) {
1273 #ifndef NDEBUG
1274             clearUnused();
1275 #endif
1276             return false;
1277         }
1278         CallbackStack* nextStack = m_next;
1279         *first = nextStack;
1280         delete this;
1281         return nextStack->popAndInvokeCallback(first, visitor);
1282     }
1283     Item* item = --m_current;
1284
1285     VisitorCallback callback = item->callback();
1286 #if ENABLE(GC_TRACING)
1287     if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndInvokeCallback
1288         visitor->setHostInfo(item->object(), classOf(item->object()));
1289 #endif
1290     callback(visitor, item->object());
1291
1292     return true;
1293 }
1294
1295 class MarkingVisitor : public Visitor {
1296 public:
1297 #if ENABLE(GC_TRACING)
1298     typedef HashSet<uintptr_t> LiveObjectSet;
1299     typedef HashMap<String, LiveObjectSet> LiveObjectMap;
1300     typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph;
1301 #endif
1302
1303     inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1304     {
1305         ASSERT(header);
1306         ASSERT(objectPointer);
1307         if (header->isMarked())
1308             return;
1309         header->mark();
1310 #if ENABLE(GC_TRACING)
1311         String className(classOf(objectPointer));
1312         {
1313             LiveObjectMap::AddResult result = currentlyLive().add(className, LiveObjectSet());
1314             result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPointer));
1315         }
1316         ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintptr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName));
1317         ASSERT(result.isNewEntry);
1318         // printf("%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_hostObject, className.ascii().data(), objectPointer);
1319 #endif
1320         if (callback)
1321             Heap::pushTraceCallback(const_cast<void*>(objectPointer), callback);
1322     }
1323
1324     virtual void mark(HeapObjectHeader* header, TraceCallback callback) OVERRIDE
1325     {
1326         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1327         // version to correctly find the payload.
1328         visitHeader(header, header->payload(), callback);
1329     }
1330
1331     virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) OVERRIDE
1332     {
1333         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1334         // version to correctly find the payload.
1335         visitHeader(header, header->payload(), callback);
1336     }
1337
1338     virtual void mark(const void* objectPointer, TraceCallback callback) OVERRIDE
1339     {
1340         if (!objectPointer)
1341             return;
1342         FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1343         visitHeader(header, header->payload(), callback);
1344     }
1345
1346
1347     inline void visitConservatively(HeapObjectHeader* header, void* objectPointer, size_t objectSize)
1348     {
1349         ASSERT(header);
1350         ASSERT(objectPointer);
1351         if (header->isMarked())
1352             return;
1353         header->mark();
1354
1355         // Scan through the object's fields and visit them conservatively.
1356         Address* objectFields = reinterpret_cast<Address*>(objectPointer);
1357         for (size_t i = 0; i < objectSize / sizeof(Address); ++i)
1358             Heap::checkAndMarkPointer(this, objectFields[i]);
1359     }
1360
1361     virtual void markConservatively(HeapObjectHeader* header)
1362     {
1363         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1364         // version to correctly find the payload.
1365         visitConservatively(header, header->payload(), header->payloadSize());
1366     }
1367
1368     virtual void markConservatively(FinalizedHeapObjectHeader* header)
1369     {
1370         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1371         // version to correctly find the payload.
1372         visitConservatively(header, header->payload(), header->payloadSize());
1373     }
1374
1375     virtual void registerWeakMembers(const void* closure, const void* containingObject, WeakPointerCallback callback) OVERRIDE
1376     {
1377         Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_cast<void*>(containingObject), callback);
1378     }
1379
1380     virtual bool isMarked(const void* objectPointer) OVERRIDE
1381     {
1382         return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
1383     }
1384
1385     // This macro defines the necessary visitor methods for typed heaps
1386 #define DEFINE_VISITOR_METHODS(Type)                                              \
1387     virtual void mark(const Type* objectPointer, TraceCallback callback) OVERRIDE \
1388     {                                                                             \
1389         if (!objectPointer)                                                       \
1390             return;                                                               \
1391         HeapObjectHeader* header =                                                \
1392             HeapObjectHeader::fromPayload(objectPointer);                         \
1393         visitHeader(header, header->payload(), callback);                         \
1394     }                                                                             \
1395     virtual bool isMarked(const Type* objectPointer) OVERRIDE                     \
1396     {                                                                             \
1397         return HeapObjectHeader::fromPayload(objectPointer)->isMarked();          \
1398     }
1399
1400     FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1401 #undef DEFINE_VISITOR_METHODS
1402
1403 #if ENABLE(GC_TRACING)
1404     void reportStats()
1405     {
1406         printf("\n---------- AFTER MARKING -------------------\n");
1407         for (LiveObjectMap::iterator it = currentlyLive().begin(), end = currentlyLive().end(); it != end; ++it) {
1408             printf("%s %u", it->key.ascii().data(), it->value.size());
1409
1410             if (it->key == "WebCore::Document")
1411                 reportStillAlive(it->value, previouslyLive().get(it->key));
1412
1413             printf("\n");
1414         }
1415
1416         previouslyLive().swap(currentlyLive());
1417         currentlyLive().clear();
1418
1419         for (HashSet<uintptr_t>::iterator it = objectsToFindPath().begin(), end = objectsToFindPath().end(); it != end; ++it) {
1420             dumpPathToObjectFromObjectGraph(objectGraph(), *it);
1421         }
1422     }
1423
1424     static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous)
1425     {
1426         int count = 0;
1427
1428         printf(" [previously %u]", previous.size());
1429         for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
1430             if (previous.find(*it) == previous.end())
1431                 continue;
1432             count++;
1433         }
1434
1435         if (!count)
1436             return;
1437
1438         printf(" {survived 2GCs %d: ", count);
1439         for (LiveObjectSet::iterator it = current.begin(), end = current.end(); it != end; ++it) {
1440             if (previous.find(*it) == previous.end())
1441                 continue;
1442             printf("%ld", *it);
1443             if (--count)
1444                 printf(", ");
1445         }
1446         ASSERT(!count);
1447         printf("}");
1448     }
1449
1450     static void dumpPathToObjectFromObjectGraph(const ObjectGraph& graph, uintptr_t target)
1451     {
1452         printf("Path to %lx of %s\n", target, classOf(reinterpret_cast<const void*>(target)).ascii().data());
1453         ObjectGraph::const_iterator it = graph.find(target);
1454         while (it != graph.end()) {
1455             printf("<- %lx of %s\n", it->value.first, it->value.second.ascii().data());
1456             it = graph.find(it->value.first);
1457         }
1458         printf("\n");
1459     }
1460
1461     static void dumpPathToObjectOnNextGC(void* p)
1462     {
1463         objectsToFindPath().add(reinterpret_cast<uintptr_t>(p));
1464     }
1465
1466     static LiveObjectMap& previouslyLive()
1467     {
1468         DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
1469         return map;
1470     }
1471
1472     static LiveObjectMap& currentlyLive()
1473     {
1474         DEFINE_STATIC_LOCAL(LiveObjectMap, map, ());
1475         return map;
1476     }
1477
1478     static ObjectGraph& objectGraph()
1479     {
1480         DEFINE_STATIC_LOCAL(ObjectGraph, graph, ());
1481         return graph;
1482     }
1483
1484     static HashSet<uintptr_t>& objectsToFindPath()
1485     {
1486         DEFINE_STATIC_LOCAL(HashSet<uintptr_t>, set, ());
1487         return set;
1488     }
1489 #endif
1490
1491 protected:
1492     virtual void registerWeakCell(void** cell, WeakPointerCallback callback) OVERRIDE
1493     {
1494         Heap::pushWeakCellPointerCallback(cell, callback);
1495     }
1496 };
1497
1498 void Heap::init()
1499 {
1500     ThreadState::init();
1501     CallbackStack::init(&s_markingStack);
1502     CallbackStack::init(&s_weakCallbackStack);
1503     s_heapDoesNotContainCache = new HeapDoesNotContainCache();
1504     s_markingVisitor = new MarkingVisitor();
1505 }
1506
1507 void Heap::shutdown()
1508 {
1509     s_shutdownCalled = true;
1510     ThreadState::shutdownHeapIfNecessary();
1511 }
1512
1513 void Heap::doShutdown()
1514 {
1515     // We don't want to call doShutdown() twice.
1516     if (!s_markingVisitor)
1517         return;
1518
1519     ASSERT(!ThreadState::isAnyThreadInGC());
1520     ASSERT(!ThreadState::attachedThreads().size());
1521     delete s_markingVisitor;
1522     s_markingVisitor = 0;
1523     delete s_heapDoesNotContainCache;
1524     s_heapDoesNotContainCache = 0;
1525     CallbackStack::shutdown(&s_weakCallbackStack);
1526     CallbackStack::shutdown(&s_markingStack);
1527     ThreadState::shutdown();
1528 }
1529
1530 BaseHeapPage* Heap::contains(Address address)
1531 {
1532     ASSERT(ThreadState::isAnyThreadInGC());
1533     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1534     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1535         BaseHeapPage* page = (*it)->contains(address);
1536         if (page)
1537             return page;
1538     }
1539     return 0;
1540 }
1541
1542 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1543 {
1544     ASSERT(ThreadState::isAnyThreadInGC());
1545
1546 #ifdef NDEBUG
1547     if (s_heapDoesNotContainCache->lookup(address))
1548         return 0;
1549 #endif
1550
1551     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1552     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1553         if ((*it)->checkAndMarkPointer(visitor, address)) {
1554             // Pointer was in a page of that thread. If it actually pointed
1555             // into an object then that object was found and marked.
1556             ASSERT(!s_heapDoesNotContainCache->lookup(address));
1557             return address;
1558         }
1559     }
1560
1561 #ifdef NDEBUG
1562     s_heapDoesNotContainCache->addEntry(address, true);
1563 #else
1564     if (!s_heapDoesNotContainCache->lookup(address))
1565         s_heapDoesNotContainCache->addEntry(address, true);
1566 #endif
1567     return 0;
1568 }
1569
1570 #if ENABLE(GC_TRACING)
1571 const GCInfo* Heap::findGCInfo(Address address)
1572 {
1573     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1574     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1575         if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) {
1576             return gcInfo;
1577         }
1578     }
1579     return 0;
1580 }
1581
1582 void Heap::dumpPathToObjectOnNextGC(void* p)
1583 {
1584     static_cast<MarkingVisitor*>(s_markingVisitor)->dumpPathToObjectOnNextGC(p);
1585 }
1586 #endif
1587
1588 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1589 {
1590     ASSERT(Heap::contains(object));
1591     CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1592     *slot = CallbackStack::Item(object, callback);
1593 }
1594
1595 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1596 {
1597     return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor);
1598 }
1599
1600 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback)
1601 {
1602     ASSERT(Heap::contains(cell));
1603     CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallbackStack);
1604     *slot = CallbackStack::Item(cell, callback);
1605 }
1606
1607 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointerCallback callback)
1608 {
1609     ASSERT(Heap::contains(object));
1610     BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeaderAddress(reinterpret_cast<Address>(object)));
1611     ASSERT(Heap::contains(object) == heapPageForObject);
1612     ThreadState* state = heapPageForObject->threadState();
1613     state->pushWeakObjectPointerCallback(closure, callback);
1614 }
1615
1616 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1617 {
1618     return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visitor);
1619 }
1620
1621 void Heap::prepareForGC()
1622 {
1623     ASSERT(ThreadState::isAnyThreadInGC());
1624     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1625     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1626         (*it)->prepareForGC();
1627 }
1628
1629 void Heap::collectGarbage(ThreadState::StackState stackState)
1630 {
1631     ThreadState* state = ThreadState::current();
1632     state->clearGCRequested();
1633
1634     GCScope gcScope(stackState);
1635     // Check if we successfully parked the other threads. If not we bail out of the GC.
1636     if (!gcScope.allThreadsParked()) {
1637         ThreadState::current()->setGCRequested();
1638         return;
1639     }
1640     TRACE_EVENT0("Blink", "Heap::collectGarbage");
1641     TRACE_EVENT_SCOPED_SAMPLING_STATE("Blink", "BlinkGC");
1642 #if ENABLE(GC_TRACING)
1643     static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
1644 #endif
1645
1646     // Disallow allocation during garbage collection (but not
1647     // during the finalization that happens when the gcScope is
1648     // torn down).
1649     NoAllocationScope<AnyThread> noAllocationScope;
1650
1651     prepareForGC();
1652
1653     ThreadState::visitRoots(s_markingVisitor);
1654     // Recursively mark all objects that are reachable from the roots.
1655     while (popAndInvokeTraceCallback(s_markingVisitor)) { }
1656
1657     // Call weak callbacks on objects that may now be pointing to dead
1658     // objects.
1659     while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
1660
1661     // It is not permitted to trace pointers of live objects in the weak
1662     // callback phase, so the marking stack should still be empty here.
1663     s_markingStack->assertIsEmpty();
1664
1665 #if ENABLE(GC_TRACING)
1666     static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
1667 #endif
1668 }
1669
1670 void Heap::collectAllGarbage()
1671 {
1672     // FIXME: oilpan: we should perform a single GC and everything
1673     // should die. Unfortunately it is not the case for all objects
1674     // because the hierarchy was not completely moved to the heap and
1675     // some heap allocated objects own objects that contain persistents
1676     // pointing to other heap allocated objects.
1677     for (int i = 0; i < 5; i++)
1678         collectGarbage(ThreadState::NoHeapPointersOnStack);
1679 }
1680
1681 void Heap::setForcePreciseGCForTesting()
1682 {
1683     ThreadState::current()->setForcePreciseGCForTesting(true);
1684 }
1685
1686 void Heap::getStats(HeapStats* stats)
1687 {
1688     stats->clear();
1689     ASSERT(ThreadState::isAnyThreadInGC());
1690     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1691     typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1692     for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1693         HeapStats temp;
1694         (*it)->getStats(temp);
1695         stats->add(&temp);
1696     }
1697 }
1698
1699 bool Heap::isConsistentForGC()
1700 {
1701     ASSERT(ThreadState::isAnyThreadInGC());
1702     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1703     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1704         if (!(*it)->isConsistentForGC())
1705             return false;
1706     }
1707     return true;
1708 }
1709
1710 void Heap::makeConsistentForGC()
1711 {
1712     ASSERT(ThreadState::isAnyThreadInGC());
1713     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1714     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1715         (*it)->makeConsistentForGC();
1716 }
1717
1718 // Force template instantiations for the types that we need.
1719 template class HeapPage<FinalizedHeapObjectHeader>;
1720 template class HeapPage<HeapObjectHeader>;
1721 template class ThreadHeap<FinalizedHeapObjectHeader>;
1722 template class ThreadHeap<HeapObjectHeader>;
1723
1724 Visitor* Heap::s_markingVisitor;
1725 CallbackStack* Heap::s_markingStack;
1726 CallbackStack* Heap::s_weakCallbackStack;
1727 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
1728 bool Heap::s_shutdownCalled = false;
1729 }