Upstream version 7.35.144.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / heap / Heap.cpp
1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *
8  *     * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  *     * Neither the name of Google Inc. nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include "config.h"
32 #include "heap/Heap.h"
33
34 #include "heap/ThreadState.h"
35
36 #include "wtf/PassOwnPtr.h"
37
38 #if OS(POSIX)
39 #include <sys/mman.h>
40 #include <unistd.h>
41 #elif OS(WIN)
42 #include <windows.h>
43 #endif
44
45 namespace WebCore {
46
47 #if OS(WIN)
48 static bool IsPowerOf2(size_t power)
49 {
50     return !((power - 1) & power);
51 }
52 #endif
53
54 static Address roundToBlinkPageBoundary(void* base)
55 {
56     return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
57 }
58
59 static size_t roundToOsPageSize(size_t size)
60 {
61     return (size + osPageSize() - 1) & ~(osPageSize() - 1);
62 }
63
64 size_t osPageSize()
65 {
66 #if OS(POSIX)
67     static const size_t pageSize = getpagesize();
68 #else
69     static size_t pageSize = 0;
70     if (!pageSize) {
71         SYSTEM_INFO info;
72         GetSystemInfo(&info);
73         pageSize = info.dwPageSize;
74         ASSERT(IsPowerOf2(pageSize));
75     }
76 #endif
77     return pageSize;
78 }
79
80 class MemoryRegion {
81 public:
82     MemoryRegion(Address base, size_t size) : m_base(base), m_size(size) { ASSERT(size > 0); }
83
84     bool contains(Address addr) const
85     {
86         return m_base <= addr && addr < (m_base + m_size);
87     }
88
89
90     bool contains(const MemoryRegion& other) const
91     {
92         return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
93     }
94
95     void release()
96     {
97 #if OS(POSIX)
98         int err = munmap(m_base, m_size);
99         RELEASE_ASSERT(!err);
100 #else
101         bool success = VirtualFree(m_base, 0, MEM_RELEASE);
102         RELEASE_ASSERT(success);
103 #endif
104     }
105
106     WARN_UNUSED_RETURN bool commit()
107     {
108 #if OS(POSIX)
109         int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
110         if (!err) {
111             madvise(m_base, m_size, MADV_NORMAL);
112             return true;
113         }
114         return false;
115 #else
116         void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
117         return !!result;
118 #endif
119     }
120
121     void decommit()
122     {
123 #if OS(POSIX)
124         int err = mprotect(m_base, m_size, PROT_NONE);
125         RELEASE_ASSERT(!err);
126         // FIXME: Consider using MADV_FREE on MacOS.
127         madvise(m_base, m_size, MADV_DONTNEED);
128 #else
129         bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
130         RELEASE_ASSERT(success);
131 #endif
132     }
133
134     Address base() const { return m_base; }
135
136 private:
137     Address m_base;
138     size_t m_size;
139 };
140
141 // Representation of the memory used for a Blink heap page.
142 //
143 // The representation keeps track of two memory regions:
144 //
145 // 1. The virtual memory reserved from the sytem in order to be able
146 //    to free all the virtual memory reserved on destruction.
147 //
148 // 2. The writable memory (a sub-region of the reserved virtual
149 //    memory region) that is used for the actual heap page payload.
150 //
151 // Guard pages are created before and after the writable memory.
152 class PageMemory {
153 public:
154     ~PageMemory() { m_reserved.release(); }
155
156     bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
157     void decommit() { m_writable.decommit(); }
158
159     Address writableStart() { return m_writable.base(); }
160
161     // Allocate a virtual address space for the blink page with the
162     // following layout:
163     //
164     //    [ guard os page | ... payload ... | guard os page ]
165     //    ^---{ aligned to blink page size }
166     //
167     static PageMemory* allocate(size_t payloadSize)
168     {
169         ASSERT(payloadSize > 0);
170
171         // Virtual memory allocation routines operate in OS page sizes.
172         // Round up the requested size to nearest os page size.
173         payloadSize = roundToOsPageSize(payloadSize);
174
175         // Overallocate by blinkPageSize and 2 times OS page size to
176         // ensure a chunk of memory which is blinkPageSize aligned and
177         // has a system page before and after to use for guarding. We
178         // unmap the excess memory before returning.
179         size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize;
180
181 #if OS(POSIX)
182         Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
183         RELEASE_ASSERT(base != MAP_FAILED);
184
185         Address end = base + allocationSize;
186         Address alignedBase = roundToBlinkPageBoundary(base);
187         Address payloadBase = alignedBase + osPageSize();
188         Address payloadEnd = payloadBase + payloadSize;
189         Address blinkPageEnd = payloadEnd + osPageSize();
190
191         // If the allocate memory was not blink page aligned release
192         // the memory before the aligned address.
193         if (alignedBase != base)
194             MemoryRegion(base, alignedBase - base).release();
195
196         // Create guard pages by decommiting an OS page before and
197         // after the payload.
198         MemoryRegion(alignedBase, osPageSize()).decommit();
199         MemoryRegion(payloadEnd, osPageSize()).decommit();
200
201         // Free the additional memory at the end of the page if any.
202         if (blinkPageEnd < end)
203             MemoryRegion(blinkPageEnd, end - blinkPageEnd).release();
204
205         return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize));
206 #else
207         Address base = 0;
208         Address alignedBase = 0;
209
210         // On Windows it is impossible to partially release a region
211         // of memory allocated by VirtualAlloc. To avoid wasting
212         // virtual address space we attempt to release a large region
213         // of memory returned as a whole and then allocate an aligned
214         // region inside this larger region.
215         for (int attempt = 0; attempt < 3; attempt++) {
216             base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
217             RELEASE_ASSERT(base);
218             VirtualFree(base, 0, MEM_RELEASE);
219
220             alignedBase = roundToBlinkPageBoundary(base);
221             base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS));
222             if (base) {
223                 RELEASE_ASSERT(base == alignedBase);
224                 allocationSize = payloadSize + 2 * osPageSize();
225                 break;
226             }
227         }
228
229         if (!base) {
230             // We failed to avoid wasting virtual address space after
231             // several attempts.
232             base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
233             RELEASE_ASSERT(base);
234
235             // FIXME: If base is by accident blink page size aligned
236             // here then we can create two pages out of reserved
237             // space. Do this.
238             alignedBase = roundToBlinkPageBoundary(base);
239         }
240
241         Address payloadBase = alignedBase + osPageSize();
242         PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize));
243         bool res = storage->commit();
244         RELEASE_ASSERT(res);
245         return storage;
246 #endif
247     }
248
249 private:
250     PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable)
251         : m_reserved(reserved)
252         , m_writable(writable)
253     {
254         ASSERT(reserved.contains(writable));
255     }
256
257     MemoryRegion m_reserved;
258     MemoryRegion m_writable;
259 };
260
261 class GCScope {
262 public:
263     explicit GCScope(ThreadState::StackState stackState)
264         : m_state(ThreadState::current())
265         , m_safePointScope(stackState)
266     {
267         m_state->checkThread();
268
269         // FIXME: in an unlikely coincidence that two threads decide
270         // to collect garbage at the same time, avoid doing two GCs in
271         // a row.
272         RELEASE_ASSERT(!m_state->isInGC());
273         RELEASE_ASSERT(!m_state->isSweepInProgress());
274         ThreadState::stopThreads();
275         m_state->enterGC();
276     }
277
278     ~GCScope()
279     {
280         m_state->leaveGC();
281         ASSERT(!m_state->isInGC());
282         ThreadState::resumeThreads();
283     }
284
285 private:
286     ThreadState* m_state;
287     ThreadState::SafePointScope m_safePointScope;
288 };
289
290 NO_SANITIZE_ADDRESS
291 bool HeapObjectHeader::isMarked() const
292 {
293     checkHeader();
294     return m_size & markBitMask;
295 }
296
297 NO_SANITIZE_ADDRESS
298 void HeapObjectHeader::unmark()
299 {
300     checkHeader();
301     m_size &= ~markBitMask;
302 }
303
304 NO_SANITIZE_ADDRESS
305 bool HeapObjectHeader::hasDebugMark() const
306 {
307     checkHeader();
308     return m_size & debugBitMask;
309 }
310
311 NO_SANITIZE_ADDRESS
312 void HeapObjectHeader::clearDebugMark()
313 {
314     checkHeader();
315     m_size &= ~debugBitMask;
316 }
317
318 NO_SANITIZE_ADDRESS
319 void HeapObjectHeader::setDebugMark()
320 {
321     checkHeader();
322     m_size |= debugBitMask;
323 }
324
325 #ifndef NDEBUG
326 NO_SANITIZE_ADDRESS
327 void HeapObjectHeader::zapMagic()
328 {
329     m_magic = zappedMagic;
330 }
331 #endif
332
333 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
334 {
335     Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
336     HeapObjectHeader* header =
337         reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
338     return header;
339 }
340
341 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
342 {
343     ASSERT(gcInfo);
344     if (gcInfo->hasFinalizer()) {
345         gcInfo->m_finalize(object);
346     }
347 #ifndef NDEBUG
348     for (size_t i = 0; i < objectSize; i++)
349         object[i] = finalizedZapValue;
350 #endif
351     // Zap the primary vTable entry (secondary vTable entries are not zapped)
352     *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
353 }
354
355 NO_SANITIZE_ADDRESS
356 void FinalizedHeapObjectHeader::finalize()
357 {
358     HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
359 }
360
361 template<typename Header>
362 void LargeHeapObject<Header>::unmark()
363 {
364     return heapObjectHeader()->unmark();
365 }
366
367 template<typename Header>
368 bool LargeHeapObject<Header>::isMarked()
369 {
370     return heapObjectHeader()->isMarked();
371 }
372
373 template<typename Header>
374 bool LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
375 {
376     if (contains(address)) {
377         mark(visitor);
378         return true;
379     }
380     return false;
381 }
382
383 template<>
384 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
385 {
386     visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
387 }
388
389 template<>
390 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
391 {
392     ASSERT(gcInfo());
393     visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
394 }
395
396 template<>
397 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
398 {
399     heapObjectHeader()->finalize();
400 }
401
402 template<>
403 void LargeHeapObject<HeapObjectHeader>::finalize()
404 {
405     ASSERT(gcInfo());
406     HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
407 }
408
409 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
410 {
411     Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
412     FinalizedHeapObjectHeader* header =
413         reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
414     return header;
415 }
416
417 template<typename Header>
418 ThreadHeap<Header>::ThreadHeap(ThreadState* state)
419     : m_currentAllocationPoint(0)
420     , m_remainingAllocationSize(0)
421     , m_firstPage(0)
422     , m_firstLargeHeapObject(0)
423     , m_biggestFreeListIndex(0)
424     , m_threadState(state)
425     , m_pagePool(0)
426 {
427     clearFreeLists();
428 }
429
430 template<typename Header>
431 ThreadHeap<Header>::~ThreadHeap()
432 {
433     clearFreeLists();
434     if (!ThreadState::isMainThread())
435         assertEmpty();
436     deletePages();
437 }
438
439 template<typename Header>
440 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
441 {
442     size_t allocationSize = allocationSizeFromSize(size);
443     if (threadState()->shouldGC()) {
444         if (threadState()->shouldForceConservativeGC())
445             Heap::collectGarbage(ThreadState::HeapPointersOnStack);
446         else
447             threadState()->setGCRequested();
448     }
449     ensureCurrentAllocation(allocationSize, gcInfo);
450     return allocate(size, gcInfo);
451 }
452
453 template<typename Header>
454 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
455 {
456     size_t bucketSize = 1 << m_biggestFreeListIndex;
457     int i = m_biggestFreeListIndex;
458     for (; i > 0; i--, bucketSize >>= 1) {
459         if (bucketSize < minSize)
460             break;
461         FreeListEntry* entry = m_freeLists[i];
462         if (entry) {
463             m_biggestFreeListIndex = i;
464             entry->unlink(&m_freeLists[i]);
465             setAllocationPoint(entry->address(), entry->size());
466             ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
467             return true;
468         }
469     }
470     m_biggestFreeListIndex = i;
471     return false;
472 }
473
474 template<typename Header>
475 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
476 {
477     ASSERT(minSize >= allocationGranularity);
478     if (remainingAllocationSize() >= minSize)
479         return;
480
481     if (remainingAllocationSize() > 0)
482         addToFreeList(currentAllocationPoint(), remainingAllocationSize());
483     if (allocateFromFreeList(minSize))
484         return;
485     addPageToHeap(gcInfo);
486     bool success = allocateFromFreeList(minSize);
487     RELEASE_ASSERT(success);
488 }
489
490 template<typename Header>
491 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
492 {
493     for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
494         if (page->contains(address))
495             return page;
496     }
497     return 0;
498 }
499
500 template<typename Header>
501 BaseHeapPage* ThreadHeap<Header>::largeHeapObjectFromAddress(Address address)
502 {
503     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
504         if (current->contains(address))
505             return current;
506     }
507     return 0;
508 }
509
510 template<typename Header>
511 bool ThreadHeap<Header>::checkAndMarkLargeHeapObject(Visitor* visitor, Address address)
512 {
513     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
514         if (current->checkAndMarkPointer(visitor, address))
515             return true;
516     }
517     return false;
518 }
519
520 template<typename Header>
521 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
522 {
523     ASSERT(heapPageFromAddress(address));
524     ASSERT(heapPageFromAddress(address + size - 1));
525     ASSERT(size < blinkPagePayloadSize());
526     // The free list entries are only pointer aligned (but when we allocate
527     // from them we are 8 byte aligned due to the header size).
528     ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
529     ASSERT(!(size & allocationMask));
530     ASAN_POISON_MEMORY_REGION(address, size);
531     FreeListEntry* entry;
532     if (size < sizeof(*entry)) {
533         // Create a dummy header with only a size and freelist bit set.
534         ASSERT(size >= sizeof(BasicObjectHeader));
535         // Free list encode the size to mark the lost memory as freelist memory.
536         new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
537         // This memory gets lost. Sweeping can reclaim it.
538         return;
539     }
540     entry = new (NotNull, address) FreeListEntry(size);
541 #if defined(ADDRESS_SANITIZER)
542     // For ASAN we don't add the entry to the free lists until the asanDeferMemoryReuseCount
543     // reaches zero. However we always add entire pages to ensure that adding a new page will
544     // increase the allocation space.
545     if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
546         return;
547 #endif
548     int index = bucketIndexForSize(size);
549     entry->link(&m_freeLists[index]);
550     if (index > m_biggestFreeListIndex)
551         m_biggestFreeListIndex = index;
552 }
553
554 template<typename Header>
555 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
556 {
557     // Caller already added space for object header and rounded up to allocation alignment
558     ASSERT(!(size & allocationMask));
559
560     size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
561
562     // Ensure that there is enough space for alignment. If the header
563     // is not a multiple of 8 bytes we will allocate an extra
564     // headerPadding<Header> bytes to ensure it 8 byte aligned.
565     allocationSize += headerPadding<Header>();
566
567     // If ASAN is supported we add allocationGranularity bytes to the allocated space and
568     // poison that to detect overflows
569 #if defined(ADDRESS_SANITIZER)
570     allocationSize += allocationGranularity;
571 #endif
572     if (threadState()->shouldGC())
573         threadState()->setGCRequested();
574     PageMemory* pageMemory = PageMemory::allocate(allocationSize);
575     Address largeObjectAddress = pageMemory->writableStart();
576     Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
577     memset(headerAddress, 0, size);
578     Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
579     Address result = headerAddress + sizeof(*header);
580     ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
581     LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo, threadState());
582
583     // Poison the object header and allocationGranularity bytes after the object
584     ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
585     ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
586     largeObject->link(&m_firstLargeHeapObject);
587     stats().increaseAllocatedSpace(largeObject->size());
588     stats().increaseObjectSpace(largeObject->payloadSize());
589     return result;
590 }
591
592 template<typename Header>
593 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
594 {
595     object->unlink(previousNext);
596     object->finalize();
597
598     // Unpoison the object header and allocationGranularity bytes after the
599     // object before freeing.
600     ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
601     ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
602     delete object->storage();
603 }
604
605 template<>
606 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
607 {
608     // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
609     // the heap should be unused (ie. 0).
610     allocatePage(0);
611 }
612
613 template<>
614 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
615 {
616     // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
617     // since it is the same for all objects
618     ASSERT(gcInfo);
619     allocatePage(gcInfo);
620 }
621
622 template<typename Header>
623 void ThreadHeap<Header>::clearPagePool()
624 {
625     while (takePageFromPool()) { }
626 }
627
628 template<typename Header>
629 PageMemory* ThreadHeap<Header>::takePageFromPool()
630 {
631     while (PagePoolEntry* entry = m_pagePool) {
632         m_pagePool = entry->next();
633         PageMemory* storage = entry->storage();
634         delete entry;
635
636         if (storage->commit())
637             return storage;
638
639         // Failed to commit pooled storage. Release it.
640         delete storage;
641     }
642
643     return 0;
644 }
645
646 template<typename Header>
647 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused)
648 {
649     PageMemory* storage = unused->storage();
650     PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool);
651     m_pagePool = entry;
652     storage->decommit();
653 }
654
655 template<typename Header>
656 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
657 {
658     heapContainsCache()->flush();
659     PageMemory* pageMemory = takePageFromPool();
660     if (!pageMemory) {
661         pageMemory = PageMemory::allocate(blinkPagePayloadSize());
662         RELEASE_ASSERT(pageMemory);
663     }
664     HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
665     // FIXME: Oilpan: Linking new pages into the front of the list is
666     // crucial when performing allocations during finalization because
667     // it ensures that those pages are not swept in the current GC
668     // round. We should create a separate page list for that to
669     // separate out the pages allocated during finalization clearly
670     // from the pages currently being swept.
671     page->link(&m_firstPage);
672     addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
673 }
674
675 #ifndef NDEBUG
676 template<typename Header>
677 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
678 {
679     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
680         page->getStats(scannedStats);
681     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
682         current->getStats(scannedStats);
683 }
684 #endif
685
686 template<typename Header>
687 void ThreadHeap<Header>::sweep()
688 {
689     ASSERT(isConsistentForGC());
690 #if defined(ADDRESS_SANITIZER)
691     // When using ASAN do a pre-sweep where all unmarked objects are poisoned before
692     // calling their finalizer methods. This can catch the cases where one objects
693     // finalizer tries to modify another object as part of finalization.
694     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
695         page->poisonUnmarkedObjects();
696 #endif
697     HeapPage<Header>* page = m_firstPage;
698     HeapPage<Header>** previous = &m_firstPage;
699     bool pagesRemoved = false;
700     while (page) {
701         if (page->isEmpty()) {
702             HeapPage<Header>* unused = page;
703             page = page->next();
704             HeapPage<Header>::unlink(unused, previous);
705             pagesRemoved = true;
706         } else {
707             page->sweep();
708             previous = &page->m_next;
709             page = page->next();
710         }
711     }
712     if (pagesRemoved)
713         heapContainsCache()->flush();
714
715     LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
716     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
717         if (current->isMarked()) {
718             stats().increaseAllocatedSpace(current->size());
719             stats().increaseObjectSpace(current->payloadSize());
720             current->unmark();
721             previousNext = &current->m_next;
722             current = current->next();
723         } else {
724             LargeHeapObject<Header>* next = current->next();
725             freeLargeObject(current, previousNext);
726             current = next;
727         }
728     }
729 }
730
731 template<typename Header>
732 void ThreadHeap<Header>::assertEmpty()
733 {
734     // No allocations are permitted. The thread is exiting.
735     NoAllocationScope<AnyThread> noAllocation;
736     makeConsistentForGC();
737     for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
738         Address end = page->end();
739         Address headerAddress;
740         for (headerAddress = page->payload(); headerAddress < end; ) {
741             BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
742             ASSERT(basicHeader->size() < blinkPagePayloadSize());
743             // Live object is potentially a dangling pointer from some root.
744             // Treat it as critical bug both in release and debug mode.
745             RELEASE_ASSERT(basicHeader->isFree());
746             headerAddress += basicHeader->size();
747         }
748         ASSERT(headerAddress == end);
749         addToFreeList(page->payload(), end - page->payload());
750     }
751
752     RELEASE_ASSERT(!m_firstLargeHeapObject);
753 }
754
755 template<typename Header>
756 bool ThreadHeap<Header>::isConsistentForGC()
757 {
758     for (size_t i = 0; i < blinkPageSizeLog2; i++) {
759         if (m_freeLists[i])
760             return false;
761     }
762     return !ownsNonEmptyAllocationArea();
763 }
764
765 template<typename Header>
766 void ThreadHeap<Header>::makeConsistentForGC()
767 {
768     if (ownsNonEmptyAllocationArea())
769         addToFreeList(currentAllocationPoint(), remainingAllocationSize());
770     setAllocationPoint(0, 0);
771     clearFreeLists();
772 }
773
774 template<typename Header>
775 void ThreadHeap<Header>::clearMarks()
776 {
777     ASSERT(isConsistentForGC());
778     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
779         page->clearMarks();
780     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
781         current->unmark();
782 }
783
784 template<typename Header>
785 void ThreadHeap<Header>::deletePages()
786 {
787     heapContainsCache()->flush();
788     // Add all pages in the pool to the heap's list of pages before deleting
789     clearPagePool();
790
791     for (HeapPage<Header>* page = m_firstPage; page; ) {
792         HeapPage<Header>* dead = page;
793         page = page->next();
794         PageMemory* storage = dead->storage();
795         dead->~HeapPage();
796         delete storage;
797     }
798     m_firstPage = 0;
799
800     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
801         LargeHeapObject<Header>* dead = current;
802         current = current->next();
803         PageMemory* storage = dead->storage();
804         dead->~LargeHeapObject();
805         delete storage;
806     }
807     m_firstLargeHeapObject = 0;
808 }
809
810 template<typename Header>
811 void ThreadHeap<Header>::clearFreeLists()
812 {
813     for (size_t i = 0; i < blinkPageSizeLog2; i++)
814         m_freeLists[i] = 0;
815 }
816
817 int BaseHeap::bucketIndexForSize(size_t size)
818 {
819     ASSERT(size > 0);
820     int index = -1;
821     while (size) {
822         size >>= 1;
823         index++;
824     }
825     return index;
826 }
827
828 template<typename Header>
829 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
830     : BaseHeapPage(storage, gcInfo, heap->threadState())
831     , m_next(0)
832     , m_heap(heap)
833 {
834     COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
835     m_objectStartBitMapComputed = false;
836     ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
837     heap->stats().increaseAllocatedSpace(blinkPageSize);
838 }
839
840 template<typename Header>
841 void HeapPage<Header>::link(HeapPage** prevNext)
842 {
843     m_next = *prevNext;
844     *prevNext = this;
845 }
846
847 template<typename Header>
848 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
849 {
850     *prevNext = unused->m_next;
851     unused->heap()->addPageToPool(unused);
852 }
853
854 template<typename Header>
855 void HeapPage<Header>::getStats(HeapStats& stats)
856 {
857     stats.increaseAllocatedSpace(blinkPageSize);
858     Address headerAddress = payload();
859     ASSERT(headerAddress != end());
860     do {
861         Header* header = reinterpret_cast<Header*>(headerAddress);
862         if (!header->isFree())
863             stats.increaseObjectSpace(header->payloadSize());
864         ASSERT(header->size() < blinkPagePayloadSize());
865         headerAddress += header->size();
866         ASSERT(headerAddress <= end());
867     } while (headerAddress < end());
868 }
869
870 template<typename Header>
871 bool HeapPage<Header>::isEmpty()
872 {
873     BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
874     return header->isFree() && (header->size() == payloadSize());
875 }
876
877 template<typename Header>
878 void HeapPage<Header>::sweep()
879 {
880     clearObjectStartBitMap();
881     heap()->stats().increaseAllocatedSpace(blinkPageSize);
882     Address startOfGap = payload();
883     for (Address headerAddress = startOfGap; headerAddress < end(); ) {
884         BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
885         ASSERT(basicHeader->size() < blinkPagePayloadSize());
886
887         if (basicHeader->isFree()) {
888             headerAddress += basicHeader->size();
889             continue;
890         }
891         // At this point we know this is a valid object of type Header
892         Header* header = static_cast<Header*>(basicHeader);
893
894         if (!header->isMarked()) {
895             // For ASAN we unpoison the specific object when calling the finalizer and
896             // poison it again when done to allow the object's own finalizer to operate
897             // on the object, but not have other finalizers be allowed to access it.
898             ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
899             finalize(header);
900             ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
901             headerAddress += header->size();
902             continue;
903         }
904
905         if (startOfGap != headerAddress)
906             heap()->addToFreeList(startOfGap, headerAddress - startOfGap);
907         header->unmark();
908         headerAddress += header->size();
909         heap()->stats().increaseObjectSpace(header->payloadSize());
910         startOfGap = headerAddress;
911     }
912     if (startOfGap != end())
913         heap()->addToFreeList(startOfGap, end() - startOfGap);
914 }
915
916 template<typename Header>
917 void HeapPage<Header>::clearMarks()
918 {
919     for (Address headerAddress = payload(); headerAddress < end();) {
920         Header* header = reinterpret_cast<Header*>(headerAddress);
921         ASSERT(header->size() < blinkPagePayloadSize());
922         if (!header->isFree())
923             header->unmark();
924         headerAddress += header->size();
925     }
926 }
927
928 template<typename Header>
929 void HeapPage<Header>::populateObjectStartBitMap()
930 {
931     memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
932     Address start = payload();
933     for (Address headerAddress = start; headerAddress < end();) {
934         Header* header = reinterpret_cast<Header*>(headerAddress);
935         size_t objectOffset = headerAddress - start;
936         ASSERT(!(objectOffset & allocationMask));
937         size_t objectStartNumber = objectOffset / allocationGranularity;
938         size_t mapIndex = objectStartNumber / 8;
939         ASSERT(mapIndex < objectStartBitMapSize);
940         m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
941         headerAddress += header->size();
942         ASSERT(headerAddress <= end());
943     }
944     m_objectStartBitMapComputed = true;
945 }
946
947 template<typename Header>
948 void HeapPage<Header>::clearObjectStartBitMap()
949 {
950     m_objectStartBitMapComputed = false;
951 }
952
953 static int numberOfLeadingZeroes(uint8_t byte)
954 {
955     if (!byte)
956         return 8;
957     int result = 0;
958     if (byte <= 0x0F) {
959         result += 4;
960         byte = byte << 4;
961     }
962     if (byte <= 0x3F) {
963         result += 2;
964         byte = byte << 2;
965     }
966     if (byte <= 0x7F)
967         result++;
968     return result;
969 }
970
971 template<typename Header>
972 bool HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address addr)
973 {
974     if (addr < payload())
975         return false;
976     if (!isObjectStartBitMapComputed())
977         populateObjectStartBitMap();
978     size_t objectOffset = addr - payload();
979     size_t objectStartNumber = objectOffset / allocationGranularity;
980     size_t mapIndex = objectStartNumber / 8;
981     ASSERT(mapIndex < objectStartBitMapSize);
982     size_t bit = objectStartNumber & 7;
983     uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
984     while (!byte) {
985         ASSERT(mapIndex > 0);
986         byte = m_objectStartBitMap[--mapIndex];
987     }
988     int leadingZeroes = numberOfLeadingZeroes(byte);
989     objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
990     objectOffset = objectStartNumber * allocationGranularity;
991     Address objectAddress = objectOffset + payload();
992     Header* header = reinterpret_cast<Header*>(objectAddress);
993     if (header->isFree())
994         return false;
995
996     visitor->mark(header, traceCallback(header));
997     return true;
998 }
999
1000 #if defined(ADDRESS_SANITIZER)
1001 template<typename Header>
1002 void HeapPage<Header>::poisonUnmarkedObjects()
1003 {
1004     for (Address headerAddress = payload(); headerAddress < end(); ) {
1005         Header* header = reinterpret_cast<Header*>(headerAddress);
1006         ASSERT(header->size() < blinkPagePayloadSize());
1007
1008         if (!header->isFree() && !header->isMarked())
1009             ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1010         headerAddress += header->size();
1011     }
1012 }
1013 #endif
1014
1015 template<>
1016 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1017 {
1018     header->finalize();
1019 }
1020
1021 template<>
1022 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1023 {
1024     ASSERT(gcInfo());
1025     HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1026 }
1027
1028 template<>
1029 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1030 {
1031     ASSERT(gcInfo());
1032     return gcInfo()->m_trace;
1033 }
1034
1035 template<>
1036 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1037 {
1038     return header->traceCallback();
1039 }
1040
1041 template<typename Header>
1042 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1043 {
1044     stats.increaseAllocatedSpace(size());
1045     stats.increaseObjectSpace(payloadSize());
1046 }
1047
1048 HeapContainsCache::HeapContainsCache()
1049     : m_entries(adoptArrayPtr(new Entry[HeapContainsCache::numberOfEntries]))
1050 {
1051 }
1052
1053 void HeapContainsCache::flush()
1054 {
1055     for (int i = 0; i < numberOfEntries; i++)
1056         m_entries[i] = Entry();
1057 }
1058
1059 size_t HeapContainsCache::hash(Address address)
1060 {
1061     size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1062     value ^= value >> numberOfEntriesLog2;
1063     value ^= value >> (numberOfEntriesLog2 * 2);
1064     value &= numberOfEntries - 1;
1065     return value & ~1; // Returns only even number.
1066 }
1067
1068 bool HeapContainsCache::lookup(Address address, BaseHeapPage** page)
1069 {
1070     ASSERT(page);
1071     size_t index = hash(address);
1072     ASSERT(!(index & 1));
1073     Address cachePage = roundToBlinkPageStart(address);
1074     if (m_entries[index].address() == cachePage) {
1075         *page = m_entries[index].containingPage();
1076         return true;
1077     }
1078     if (m_entries[index + 1].address() == cachePage) {
1079         *page = m_entries[index + 1].containingPage();
1080         return true;
1081     }
1082     *page = 0;
1083     return false;
1084 }
1085
1086 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1087 {
1088     size_t index = hash(address);
1089     ASSERT(!(index & 1));
1090     Address cachePage = roundToBlinkPageStart(address);
1091     m_entries[index + 1] = m_entries[index];
1092     m_entries[index] = Entry(cachePage, page);
1093 }
1094
1095 void CallbackStack::init(CallbackStack** first)
1096 {
1097     // The stacks are chained, so we start by setting this to null as terminator.
1098     *first = 0;
1099     *first = new CallbackStack(first);
1100 }
1101
1102 void CallbackStack::shutdown(CallbackStack** first)
1103 {
1104     CallbackStack* next;
1105     for (CallbackStack* current = *first; current; current = next) {
1106         next = current->m_next;
1107         delete current;
1108     }
1109     *first = 0;
1110 }
1111
1112 CallbackStack::~CallbackStack()
1113 {
1114 #ifndef NDEBUG
1115     clearUnused();
1116 #endif
1117 }
1118
1119 void CallbackStack::clearUnused()
1120 {
1121     ASSERT(m_current == &(m_buffer[0]));
1122     for (size_t i = 0; i < bufferSize; i++)
1123         m_buffer[i] = Item(0, 0);
1124 }
1125
1126 void CallbackStack::assertIsEmpty()
1127 {
1128     ASSERT(m_current == &(m_buffer[0]));
1129     ASSERT(!m_next);
1130 }
1131
1132 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor)
1133 {
1134     if (m_current == &(m_buffer[0])) {
1135         if (!m_next) {
1136 #ifndef NDEBUG
1137             clearUnused();
1138 #endif
1139             return false;
1140         }
1141         CallbackStack* nextStack = m_next;
1142         *first = nextStack;
1143         delete this;
1144         return nextStack->popAndInvokeCallback(first, visitor);
1145     }
1146     Item* item = --m_current;
1147
1148     VisitorCallback callback = item->callback();
1149     callback(visitor, item->object());
1150
1151     return true;
1152 }
1153
1154 class MarkingVisitor : public Visitor {
1155 public:
1156     inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1157     {
1158         ASSERT(header);
1159         ASSERT(objectPointer);
1160         if (header->isMarked())
1161             return;
1162         header->mark();
1163         if (callback)
1164             Heap::pushTraceCallback(const_cast<void*>(objectPointer), callback);
1165     }
1166
1167     virtual void mark(HeapObjectHeader* header, TraceCallback callback) OVERRIDE
1168     {
1169         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1170         // version to correctly find the payload.
1171         visitHeader(header, header->payload(), callback);
1172     }
1173
1174     virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) OVERRIDE
1175     {
1176         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1177         // version to correctly find the payload.
1178         visitHeader(header, header->payload(), callback);
1179     }
1180
1181     virtual void mark(const void* objectPointer, TraceCallback callback) OVERRIDE
1182     {
1183         if (!objectPointer)
1184             return;
1185         FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1186         visitHeader(header, header->payload(), callback);
1187     }
1188
1189     virtual void registerWeakMembers(const void* closure, const void* containingObject, WeakPointerCallback callback) OVERRIDE
1190     {
1191         Heap::pushWeakObjectPointerCallback(const_cast<void*>(closure), const_cast<void*>(containingObject), callback);
1192     }
1193
1194     virtual bool isMarked(const void* objectPointer) OVERRIDE
1195     {
1196         return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
1197     }
1198
1199     // This macro defines the necessary visitor methods for typed heaps
1200 #define DEFINE_VISITOR_METHODS(Type)                                              \
1201     virtual void mark(const Type* objectPointer, TraceCallback callback) OVERRIDE \
1202     {                                                                             \
1203         if (!objectPointer)                                                       \
1204             return;                                                               \
1205         HeapObjectHeader* header =                                                \
1206             HeapObjectHeader::fromPayload(objectPointer);                         \
1207         visitHeader(header, header->payload(), callback);                         \
1208     }                                                                             \
1209     virtual bool isMarked(const Type* objectPointer) OVERRIDE                     \
1210     {                                                                             \
1211         return HeapObjectHeader::fromPayload(objectPointer)->isMarked();          \
1212     }
1213
1214     FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1215 #undef DEFINE_VISITOR_METHODS
1216
1217 protected:
1218     virtual void registerWeakCell(void** cell, WeakPointerCallback callback) OVERRIDE
1219     {
1220         Heap::pushWeakCellPointerCallback(cell, callback);
1221     }
1222 };
1223
1224 void Heap::init()
1225 {
1226     ThreadState::init();
1227     CallbackStack::init(&s_markingStack);
1228     CallbackStack::init(&s_weakCallbackStack);
1229     s_markingVisitor = new MarkingVisitor();
1230 }
1231
1232 void Heap::shutdown()
1233 {
1234     delete s_markingVisitor;
1235     CallbackStack::shutdown(&s_weakCallbackStack);
1236     CallbackStack::shutdown(&s_markingStack);
1237     ThreadState::shutdown();
1238 }
1239
1240 BaseHeapPage* Heap::contains(Address address)
1241 {
1242     ASSERT(ThreadState::isAnyThreadInGC());
1243     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1244     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1245         BaseHeapPage* page = (*it)->contains(address);
1246         if (page)
1247             return page;
1248     }
1249     return 0;
1250 }
1251
1252 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1253 {
1254     ASSERT(ThreadState::isAnyThreadInGC());
1255     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1256     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1257         if ((*it)->checkAndMarkPointer(visitor, address)) {
1258             // Pointer found and marked.
1259             return address;
1260         }
1261     }
1262     return 0;
1263 }
1264
1265 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1266 {
1267     ASSERT(Heap::contains(object));
1268     CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1269     *slot = CallbackStack::Item(object, callback);
1270 }
1271
1272 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1273 {
1274     return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor);
1275 }
1276
1277 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback)
1278 {
1279     ASSERT(Heap::contains(cell));
1280     CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallbackStack);
1281     *slot = CallbackStack::Item(cell, callback);
1282 }
1283
1284 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointerCallback callback)
1285 {
1286     ASSERT(Heap::contains(object));
1287     BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeaderAddress(reinterpret_cast<Address>(object)));
1288     ASSERT(Heap::contains(object) == heapPageForObject);
1289     ThreadState* state = heapPageForObject->threadState();
1290     state->pushWeakObjectPointerCallback(closure, callback);
1291 }
1292
1293 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1294 {
1295     return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visitor);
1296 }
1297
1298 void Heap::prepareForGC()
1299 {
1300     ASSERT(ThreadState::isAnyThreadInGC());
1301     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1302     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1303         (*it)->prepareForGC();
1304 }
1305
1306 void Heap::collectGarbage(ThreadState::StackState stackState, GCType gcType)
1307 {
1308     ThreadState::current()->clearGCRequested();
1309     GCScope gcScope(stackState);
1310
1311     // Disallow allocation during garbage collection (but not
1312     // during the finalization that happens when the gcScope is
1313     // torn down).
1314     NoAllocationScope<AnyThread> noAllocationScope;
1315
1316     prepareForGC();
1317
1318     ThreadState::visitRoots(s_markingVisitor);
1319     // Recursively mark all objects that are reachable from the roots.
1320     while (popAndInvokeTraceCallback(s_markingVisitor)) { }
1321
1322     // Call weak callbacks on objects that may now be pointing to dead
1323     // objects.
1324     while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
1325
1326     // It is not permitted to trace pointers of live objects in the weak
1327     // callback phase, so the marking stack should still be empty here.
1328     s_markingStack->assertIsEmpty();
1329 }
1330
1331 void Heap::collectAllGarbage(ThreadState::StackState stackState, GCType gcType)
1332 {
1333     // FIXME: oilpan: we should perform a single GC and everything
1334     // should die. Unfortunately it is not the case for all objects
1335     // because the hierarchy was not completely moved to the heap and
1336     // some heap allocated objects own objects that contain persistents
1337     // pointing to other heap allocated objects.
1338     for (int i = 0; i < 5; i++)
1339         collectGarbage(stackState, gcType);
1340 }
1341
1342 void Heap::getStats(HeapStats* stats)
1343 {
1344     stats->clear();
1345     ASSERT(ThreadState::isAnyThreadInGC());
1346     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1347     typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1348     for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1349         HeapStats temp;
1350         (*it)->getStats(temp);
1351         stats->add(&temp);
1352     }
1353 }
1354
1355 bool Heap::isConsistentForGC()
1356 {
1357     ASSERT(ThreadState::isAnyThreadInGC());
1358     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1359     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1360         if (!(*it)->isConsistentForGC())
1361             return false;
1362     }
1363     return true;
1364 }
1365
1366 void Heap::makeConsistentForGC()
1367 {
1368     ASSERT(ThreadState::isAnyThreadInGC());
1369     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1370     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1371         (*it)->makeConsistentForGC();
1372 }
1373
1374 // Force template instantiations for the types that we need.
1375 template class HeapPage<FinalizedHeapObjectHeader>;
1376 template class HeapPage<HeapObjectHeader>;
1377 template class ThreadHeap<FinalizedHeapObjectHeader>;
1378 template class ThreadHeap<HeapObjectHeader>;
1379
1380 Visitor* Heap::s_markingVisitor;
1381 CallbackStack* Heap::s_markingStack;
1382 CallbackStack* Heap::s_weakCallbackStack;
1383 }