Upstream version 5.34.92.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / heap / Heap.cpp
1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *
8  *     * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  *     * Neither the name of Google Inc. nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include "config.h"
32 #include "heap/Heap.h"
33
34 #include "heap/ThreadState.h"
35
36 #include "wtf/PassOwnPtr.h"
37
38 #if OS(POSIX)
39 #include <sys/mman.h>
40 #include <unistd.h>
41 #elif OS(WIN)
42 #include <windows.h>
43 #endif
44
45 namespace WebCore {
46
47 #if OS(WIN)
48 static bool IsPowerOf2(size_t power)
49 {
50     return !((power - 1) & power);
51 }
52 #endif
53
54 static Address roundToBlinkPageBoundary(void* base)
55 {
56     return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
57 }
58
59 static size_t roundToOsPageSize(size_t size)
60 {
61     return (size + osPageSize() - 1) & ~(osPageSize() - 1);
62 }
63
64 size_t osPageSize()
65 {
66 #if OS(POSIX)
67     static const size_t pageSize = getpagesize();
68 #else
69     static size_t pageSize = 0;
70     if (!pageSize) {
71         SYSTEM_INFO info;
72         GetSystemInfo(&info);
73         pageSize = info.dwPageSize;
74         ASSERT(IsPowerOf2(pageSize));
75     }
76 #endif
77     return pageSize;
78 }
79
80 class MemoryRegion {
81 public:
82     MemoryRegion(Address base, size_t size) : m_base(base), m_size(size) { ASSERT(size > 0); }
83
84     bool contains(Address addr) const
85     {
86         return m_base <= addr && addr < (m_base + m_size);
87     }
88
89
90     bool contains(const MemoryRegion& other) const
91     {
92         return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
93     }
94
95     void release()
96     {
97 #if OS(POSIX)
98         int err = munmap(m_base, m_size);
99         RELEASE_ASSERT(!err);
100 #else
101         bool success = VirtualFree(m_base, 0, MEM_RELEASE);
102         RELEASE_ASSERT(success);
103 #endif
104     }
105
106     WARN_UNUSED_RETURN bool commit()
107     {
108 #if OS(POSIX)
109         int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
110         if (!err) {
111             madvise(m_base, m_size, MADV_NORMAL);
112             return true;
113         }
114         return false;
115 #else
116         void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
117         return !!result;
118 #endif
119     }
120
121     void decommit()
122     {
123 #if OS(POSIX)
124         int err = mprotect(m_base, m_size, PROT_NONE);
125         RELEASE_ASSERT(!err);
126         // FIXME: Consider using MADV_FREE on MacOS.
127         madvise(m_base, m_size, MADV_DONTNEED);
128 #else
129         bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
130         RELEASE_ASSERT(success);
131 #endif
132     }
133
134     Address base() const { return m_base; }
135
136 private:
137     Address m_base;
138     size_t m_size;
139 };
140
141 // Representation of the memory used for a Blink heap page.
142 //
143 // The representation keeps track of two memory regions:
144 //
145 // 1. The virtual memory reserved from the sytem in order to be able
146 //    to free all the virtual memory reserved on destruction.
147 //
148 // 2. The writable memory (a sub-region of the reserved virtual
149 //    memory region) that is used for the actual heap page payload.
150 //
151 // Guard pages are created before and after the writable memory.
152 class PageMemory {
153 public:
154     ~PageMemory() { m_reserved.release(); }
155
156     bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
157     void decommit() { m_writable.decommit(); }
158
159     Address writableStart() { return m_writable.base(); }
160
161     // Allocate a virtual address space for the blink page with the
162     // following layout:
163     //
164     //    [ guard os page | ... payload ... | guard os page ]
165     //    ^---{ aligned to blink page size }
166     //
167     static PageMemory* allocate(size_t payloadSize)
168     {
169         ASSERT(payloadSize > 0);
170
171         // Virtual memory allocation routines operate in OS page sizes.
172         // Round up the requested size to nearest os page size.
173         payloadSize = roundToOsPageSize(payloadSize);
174
175         // Overallocate by blinkPageSize and 2 times OS page size to
176         // ensure a chunk of memory which is blinkPageSize aligned and
177         // has a system page before and after to use for guarding. We
178         // unmap the excess memory before returning.
179         size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize;
180
181 #if OS(POSIX)
182         Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
183         RELEASE_ASSERT(base != MAP_FAILED);
184
185         Address end = base + allocationSize;
186         Address alignedBase = roundToBlinkPageBoundary(base);
187         Address payloadBase = alignedBase + osPageSize();
188         Address payloadEnd = payloadBase + payloadSize;
189         Address blinkPageEnd = payloadEnd + osPageSize();
190
191         // If the allocate memory was not blink page aligned release
192         // the memory before the aligned address.
193         if (alignedBase != base)
194             MemoryRegion(base, alignedBase - base).release();
195
196         // Create guard pages by decommiting an OS page before and
197         // after the payload.
198         MemoryRegion(alignedBase, osPageSize()).decommit();
199         MemoryRegion(payloadEnd, osPageSize()).decommit();
200
201         // Free the additional memory at the end of the page if any.
202         if (blinkPageEnd < end)
203             MemoryRegion(blinkPageEnd, end - blinkPageEnd).release();
204
205         return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize));
206 #else
207         Address base = 0;
208         Address alignedBase = 0;
209
210         // On Windows it is impossible to partially release a region
211         // of memory allocated by VirtualAlloc. To avoid wasting
212         // virtual address space we attempt to release a large region
213         // of memory returned as a whole and then allocate an aligned
214         // region inside this larger region.
215         for (int attempt = 0; attempt < 3; attempt++) {
216             base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
217             RELEASE_ASSERT(base);
218             VirtualFree(base, 0, MEM_RELEASE);
219
220             alignedBase = roundToBlinkPageBoundary(base);
221             base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS));
222             if (base) {
223                 RELEASE_ASSERT(base == alignedBase);
224                 allocationSize = payloadSize + 2 * osPageSize();
225                 break;
226             }
227         }
228
229         if (!base) {
230             // We failed to avoid wasting virtual address space after
231             // several attempts.
232             base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
233             RELEASE_ASSERT(base);
234
235             // FIXME: If base is by accident blink page size aligned
236             // here then we can create two pages out of reserved
237             // space. Do this.
238             alignedBase = roundToBlinkPageBoundary(base);
239         }
240
241         Address payloadBase = alignedBase + osPageSize();
242         PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize));
243         bool res = storage->commit();
244         RELEASE_ASSERT(res);
245         return storage;
246 #endif
247     }
248
249 private:
250     PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable)
251         : m_reserved(reserved)
252         , m_writable(writable)
253     {
254         ASSERT(reserved.contains(writable));
255     }
256
257     MemoryRegion m_reserved;
258     MemoryRegion m_writable;
259 };
260
261 class GCScope {
262 public:
263     explicit GCScope(ThreadState::StackState stackState)
264         : m_state(ThreadState::current())
265         , m_safePointScope(stackState)
266     {
267         m_state->checkThread();
268
269         // FIXME: in an unlikely coincidence that two threads decide
270         // to collect garbage at the same time, avoid doing two GCs in
271         // a row.
272         ASSERT(!m_state->isInGC());
273         ThreadState::stopThreads();
274         m_state->enterGC();
275     }
276
277     ~GCScope()
278     {
279         m_state->leaveGC();
280         ASSERT(!m_state->isInGC());
281         ThreadState::resumeThreads();
282     }
283
284 private:
285     ThreadState* m_state;
286     ThreadState::SafePointScope m_safePointScope;
287 };
288
289 NO_SANITIZE_ADDRESS
290 bool HeapObjectHeader::isMarked() const
291 {
292     checkHeader();
293     return m_size & markBitMask;
294 }
295
296 NO_SANITIZE_ADDRESS
297 void HeapObjectHeader::unmark()
298 {
299     checkHeader();
300     m_size &= ~markBitMask;
301 }
302
303 NO_SANITIZE_ADDRESS
304 bool HeapObjectHeader::hasDebugMark() const
305 {
306     checkHeader();
307     return m_size & debugBitMask;
308 }
309
310 NO_SANITIZE_ADDRESS
311 void HeapObjectHeader::clearDebugMark()
312 {
313     checkHeader();
314     m_size &= ~debugBitMask;
315 }
316
317 NO_SANITIZE_ADDRESS
318 void HeapObjectHeader::setDebugMark()
319 {
320     checkHeader();
321     m_size |= debugBitMask;
322 }
323
324 #ifndef NDEBUG
325 NO_SANITIZE_ADDRESS
326 void HeapObjectHeader::zapMagic()
327 {
328     m_magic = zappedMagic;
329 }
330 #endif
331
332 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
333 {
334     Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
335     HeapObjectHeader* header =
336         reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
337     return header;
338 }
339
340 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
341 {
342     ASSERT(gcInfo);
343     if (gcInfo->hasFinalizer()) {
344         gcInfo->m_finalize(object);
345     }
346 #ifndef NDEBUG
347     for (size_t i = 0; i < objectSize; i++)
348         object[i] = finalizedZapValue;
349 #endif
350     // Zap the primary vTable entry (secondary vTable entries are not zapped)
351     *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
352 }
353
354 NO_SANITIZE_ADDRESS
355 void FinalizedHeapObjectHeader::finalize()
356 {
357     HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
358 }
359
360 template<typename Header>
361 void LargeHeapObject<Header>::unmark()
362 {
363     return heapObjectHeader()->unmark();
364 }
365
366 template<typename Header>
367 bool LargeHeapObject<Header>::isMarked()
368 {
369     return heapObjectHeader()->isMarked();
370 }
371
372 template<typename Header>
373 bool LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
374 {
375     if (contains(address)) {
376         mark(visitor);
377         return true;
378     }
379     return false;
380 }
381
382 template<>
383 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
384 {
385     visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
386 }
387
388 template<>
389 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
390 {
391     ASSERT(gcInfo());
392     visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
393 }
394
395 template<>
396 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
397 {
398     heapObjectHeader()->finalize();
399 }
400
401 template<>
402 void LargeHeapObject<HeapObjectHeader>::finalize()
403 {
404     ASSERT(gcInfo());
405     HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
406 }
407
408 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
409 {
410     Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
411     FinalizedHeapObjectHeader* header =
412         reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
413     return header;
414 }
415
416 template<typename Header>
417 ThreadHeap<Header>::ThreadHeap(ThreadState* state)
418     : m_currentAllocationPoint(0)
419     , m_remainingAllocationSize(0)
420     , m_firstPage(0)
421     , m_firstLargeHeapObject(0)
422     , m_biggestFreeListIndex(0)
423     , m_inFinalizeAll(false)
424     , m_threadState(state)
425     , m_pagePool(0)
426 {
427     clearFreeLists();
428 }
429
430 template<typename Header>
431 ThreadHeap<Header>::~ThreadHeap()
432 {
433     clearFreeLists();
434     // FIXME: at the moment we can't finalize all objects owned by the
435     // main thread eagerly because there are tangled destruction order
436     // dependencies there.
437     if (!ThreadState::isMainThread())
438         finalizeAll();
439     deletePages();
440 }
441
442 template<typename Header>
443 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
444 {
445     size_t allocationSize = allocationSizeFromSize(size);
446     if (threadState()->shouldGC()) {
447         if (threadState()->shouldForceConservativeGC())
448             Heap::collectGarbage(ThreadState::HeapPointersOnStack);
449         else
450             threadState()->setGCRequested();
451     }
452     ensureCurrentAllocation(allocationSize, gcInfo);
453     return allocate(size, gcInfo);
454 }
455
456 template<typename Header>
457 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
458 {
459     size_t bucketSize = 1 << m_biggestFreeListIndex;
460     int i = m_biggestFreeListIndex;
461     for (; i > 0; i--, bucketSize >>= 1) {
462         if (bucketSize < minSize)
463             break;
464         FreeListEntry* entry = m_freeLists[i];
465         if (entry) {
466             m_biggestFreeListIndex = i;
467             entry->unlink(&m_freeLists[i]);
468             setAllocationPoint(entry->address(), entry->size());
469             ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
470             return true;
471         }
472     }
473     m_biggestFreeListIndex = i;
474     return false;
475 }
476
477 template<typename Header>
478 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
479 {
480     ASSERT(minSize >= allocationGranularity);
481     if (remainingAllocationSize() >= minSize)
482         return;
483
484     if (remainingAllocationSize() > 0)
485         addToFreeList(currentAllocationPoint(), remainingAllocationSize());
486     if (allocateFromFreeList(minSize))
487         return;
488     addPageToHeap(gcInfo);
489     bool success = allocateFromFreeList(minSize);
490     RELEASE_ASSERT(success);
491 }
492
493 template<typename Header>
494 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
495 {
496     for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
497         if (page->contains(address))
498             return page;
499     }
500     return 0;
501 }
502
503 template<typename Header>
504 BaseHeapPage* ThreadHeap<Header>::largeHeapObjectFromAddress(Address address)
505 {
506     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
507         if (current->contains(address))
508             return current;
509     }
510     return 0;
511 }
512
513 template<typename Header>
514 bool ThreadHeap<Header>::checkAndMarkLargeHeapObject(Visitor* visitor, Address address)
515 {
516     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
517         if (current->checkAndMarkPointer(visitor, address))
518             return true;
519     }
520     return false;
521 }
522
523 template<typename Header>
524 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
525 {
526     ASSERT(heapPageFromAddress(address));
527     ASSERT(heapPageFromAddress(address + size - 1));
528     ASSERT(size < blinkPagePayloadSize());
529     // The free list entries are only pointer aligned (but when we allocate
530     // from them we are 8 byte aligned due to the header size).
531     ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
532     ASSERT(!(size & allocationMask));
533     ASAN_POISON_MEMORY_REGION(address, size);
534     FreeListEntry* entry;
535     if (size < sizeof(*entry)) {
536         // Create a dummy header with only a size and freelist bit set.
537         ASSERT(size >= sizeof(BasicObjectHeader));
538         // Free list encode the size to mark the lost memory as freelist memory.
539         new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
540         // This memory gets lost. Sweeping can reclaim it.
541         return;
542     }
543     entry = new (NotNull, address) FreeListEntry(size);
544 #if USE_ASAN
545     // For ASAN we don't add the entry to the free lists until the asanDeferMemoryReuseCount
546     // reaches zero. However we always add entire pages to ensure that adding a new page will
547     // increase the allocation space.
548     if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
549         return;
550 #endif
551     int index = bucketIndexForSize(size);
552     entry->link(&m_freeLists[index]);
553     if (index > m_biggestFreeListIndex)
554         m_biggestFreeListIndex = index;
555 }
556
557 template<typename Header>
558 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
559 {
560     // Caller already added space for object header and rounded up to allocation alignment
561     ASSERT(!(size & allocationMask));
562
563     size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
564
565     // Ensure that there is enough space for alignment. If the header
566     // is not a multiple of 8 bytes we will allocate an extra
567     // headerPadding<Header> bytes to ensure it 8 byte aligned.
568     allocationSize += headerPadding<Header>();
569
570     // If ASAN is supported we add allocationGranularity bytes to the allocated space and
571     // poison that to detect overflows
572 #if USE_ASAN
573     allocationSize += allocationGranularity;
574 #endif
575     if (threadState()->shouldGC())
576         threadState()->setGCRequested();
577     PageMemory* pageMemory = PageMemory::allocate(allocationSize);
578     Address largeObjectAddress = pageMemory->writableStart();
579     Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
580     memset(headerAddress, 0, size);
581     Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
582     Address result = headerAddress + sizeof(*header);
583     ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
584     LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo);
585
586     // Poison the object header and allocationGranularity bytes after the object
587     ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
588     ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
589     largeObject->link(&m_firstLargeHeapObject);
590     stats().increaseAllocatedSpace(largeObject->size());
591     stats().increaseObjectSpace(largeObject->payloadSize());
592     return result;
593 }
594
595 template<typename Header>
596 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
597 {
598     object->unlink(previousNext);
599     object->finalize();
600
601     // Unpoison the object header and allocationGranularity bytes after the
602     // object before freeing.
603     ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
604     ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
605     delete object->storage();
606 }
607
608 template<>
609 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
610 {
611     // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
612     // the heap should be unused (ie. 0).
613     allocatePage(0);
614 }
615
616 template<>
617 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
618 {
619     // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
620     // since it is the same for all objects
621     ASSERT(gcInfo);
622     allocatePage(gcInfo);
623 }
624
625 template<typename Header>
626 void ThreadHeap<Header>::clearPagePool()
627 {
628     while (takePageFromPool()) { }
629 }
630
631 template<typename Header>
632 PageMemory* ThreadHeap<Header>::takePageFromPool()
633 {
634     while (PagePoolEntry* entry = m_pagePool) {
635         m_pagePool = entry->next();
636         PageMemory* storage = entry->storage();
637         delete entry;
638
639         if (storage->commit())
640             return storage;
641
642         // Failed to commit pooled storage. Release it.
643         delete storage;
644     }
645
646     return 0;
647 }
648
649 template<typename Header>
650 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused)
651 {
652     PageMemory* storage = unused->storage();
653     PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool);
654     m_pagePool = entry;
655     storage->decommit();
656 }
657
658 template<typename Header>
659 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
660 {
661     heapContainsCache()->flush();
662     PageMemory* pageMemory = takePageFromPool();
663     if (!pageMemory) {
664         pageMemory = PageMemory::allocate(blinkPagePayloadSize());
665         RELEASE_ASSERT(pageMemory);
666     }
667     HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
668     page->link(&m_firstPage);
669     addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
670 }
671
672 #ifndef NDEBUG
673 template<typename Header>
674 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
675 {
676     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
677         page->getStats(scannedStats);
678     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
679         current->getStats(scannedStats);
680 }
681 #endif
682
683 template<typename Header>
684 void ThreadHeap<Header>::sweep()
685 {
686     ASSERT(isConsistentForGC());
687 #if USE_ASAN
688     // When using ASAN do a pre-sweep where all unmarked objects are poisoned before
689     // calling their finalizer methods. This can catch the cases where one objects
690     // finalizer tries to modify another object as part of finalization.
691     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
692         page->poisonUnmarkedObjects();
693 #endif
694     HeapPage<Header>* page = m_firstPage;
695     HeapPage<Header>** previous = &m_firstPage;
696     bool pagesRemoved = false;
697     while (page) {
698         if (page->isEmpty()) {
699             HeapPage<Header>* unused = page;
700             page = page->next();
701             HeapPage<Header>::unlink(unused, previous);
702             pagesRemoved = true;
703         } else {
704             page->sweep();
705             previous = &page->m_next;
706             page = page->next();
707         }
708     }
709     if (pagesRemoved)
710         heapContainsCache()->flush();
711
712     LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
713     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
714         if (current->isMarked()) {
715             stats().increaseAllocatedSpace(current->size());
716             stats().increaseObjectSpace(current->payloadSize());
717             current->unmark();
718             previousNext = &current->m_next;
719             current = current->next();
720         } else {
721             LargeHeapObject<Header>* next = current->next();
722             freeLargeObject(current, previousNext);
723             current = next;
724         }
725     }
726 }
727
728 template<typename Header>
729 void ThreadHeap<Header>::finalizeAll(const void* except)
730 {
731     if (inFinalizeAll())
732         return;
733     setFinalizeAll(true);
734
735     // No nested GCs are permitted. The thread is exiting.
736     NoAllocationScope<AnyThread> noAllocation;
737     makeConsistentForGC();
738     for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
739         Address startOfGap = page->payload();
740         Address end = page->end();
741         Address headerAddress;
742         for (headerAddress = page->payload(); headerAddress < end; ) {
743             BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
744             ASSERT(basicHeader->size() < blinkPagePayloadSize());
745
746             if (basicHeader->isFree()) {
747                 headerAddress += basicHeader->size();
748                 continue;
749             }
750             // At this point we know this is a valid object of type Header
751             Header* header = static_cast<Header*>(basicHeader);
752
753             if (header->payload() == except) {
754                 if (startOfGap != headerAddress)
755                     addToFreeList(startOfGap, headerAddress - startOfGap);
756                 headerAddress += header->size();
757                 startOfGap = headerAddress;
758                 continue;
759             }
760
761             page->finalize(header);
762             headerAddress += header->size();
763         }
764         ASSERT(headerAddress == end);
765         if (startOfGap != end)
766             addToFreeList(startOfGap, end - startOfGap);
767     }
768
769     LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
770     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
771         LargeHeapObject<Header>* next = current->next();
772         if (current->heapObjectHeader()->payload() != except)
773             freeLargeObject(current, previousNext);
774         else
775             previousNext = &current->m_next;
776         current = next;
777     }
778     setFinalizeAll(false);
779 }
780
781 template<typename Header>
782 bool ThreadHeap<Header>::isConsistentForGC()
783 {
784     for (size_t i = 0; i < blinkPageSizeLog2; i++) {
785         if (m_freeLists[i])
786             return false;
787     }
788     return !ownsNonEmptyAllocationArea();
789 }
790
791 template<typename Header>
792 void ThreadHeap<Header>::makeConsistentForGC()
793 {
794     if (ownsNonEmptyAllocationArea())
795         addToFreeList(currentAllocationPoint(), remainingAllocationSize());
796     setAllocationPoint(0, 0);
797     clearFreeLists();
798 }
799
800 template<typename Header>
801 void ThreadHeap<Header>::clearMarks()
802 {
803     ASSERT(isConsistentForGC());
804     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
805         page->clearMarks();
806     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
807         current->unmark();
808 }
809
810 template<typename Header>
811 void ThreadHeap<Header>::deletePages()
812 {
813     heapContainsCache()->flush();
814     // Add all pages in the pool to the heap's list of pages before deleting
815     clearPagePool();
816
817     for (HeapPage<Header>* page = m_firstPage; page; ) {
818         HeapPage<Header>* dead = page;
819         page = page->next();
820         PageMemory* storage = dead->storage();
821         dead->~HeapPage();
822         delete storage;
823     }
824     m_firstPage = 0;
825
826     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
827         LargeHeapObject<Header>* dead = current;
828         current = current->next();
829         PageMemory* storage = dead->storage();
830         dead->~LargeHeapObject();
831         delete storage;
832     }
833     m_firstLargeHeapObject = 0;
834 }
835
836 template<typename Header>
837 void ThreadHeap<Header>::clearFreeLists()
838 {
839     for (size_t i = 0; i < blinkPageSizeLog2; i++)
840         m_freeLists[i] = 0;
841 }
842
843 int BaseHeap::bucketIndexForSize(size_t size)
844 {
845     ASSERT(size > 0);
846     int index = -1;
847     while (size) {
848         size >>= 1;
849         index++;
850     }
851     return index;
852 }
853
854 template<typename Header>
855 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
856     : BaseHeapPage(storage, gcInfo)
857     , m_next(0)
858     , m_heap(heap)
859 {
860     COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
861     m_objectStartBitMapComputed = false;
862     ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
863     heap->stats().increaseAllocatedSpace(blinkPageSize);
864 }
865
866 template<typename Header>
867 void HeapPage<Header>::link(HeapPage** prevNext)
868 {
869     m_next = *prevNext;
870     *prevNext = this;
871 }
872
873 template<typename Header>
874 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
875 {
876     *prevNext = unused->m_next;
877     unused->heap()->addPageToPool(unused);
878 }
879
880 template<typename Header>
881 void HeapPage<Header>::getStats(HeapStats& stats)
882 {
883     stats.increaseAllocatedSpace(blinkPageSize);
884     Address headerAddress = payload();
885     ASSERT(headerAddress != end());
886     do {
887         Header* header = reinterpret_cast<Header*>(headerAddress);
888         if (!header->isFree())
889             stats.increaseObjectSpace(header->payloadSize());
890         ASSERT(header->size() < blinkPagePayloadSize());
891         headerAddress += header->size();
892         ASSERT(headerAddress <= end());
893     } while (headerAddress < end());
894 }
895
896 template<typename Header>
897 bool HeapPage<Header>::isEmpty()
898 {
899     BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
900     return header->isFree() && (header->size() == payloadSize());
901 }
902
903 template<typename Header>
904 void HeapPage<Header>::sweep()
905 {
906     clearObjectStartBitMap();
907     heap()->stats().increaseAllocatedSpace(blinkPageSize);
908     Address startOfGap = payload();
909     for (Address headerAddress = startOfGap; headerAddress < end(); ) {
910         BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
911         ASSERT(basicHeader->size() < blinkPagePayloadSize());
912
913         if (basicHeader->isFree()) {
914             headerAddress += basicHeader->size();
915             continue;
916         }
917         // At this point we know this is a valid object of type Header
918         Header* header = static_cast<Header*>(basicHeader);
919
920         if (!header->isMarked()) {
921             // For ASAN we unpoison the specific object when calling the finalizer and
922             // poison it again when done to allow the object's own finalizer to operate
923             // on the object, but not have other finalizers be allowed to access it.
924             ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
925             finalize(header);
926             ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
927             headerAddress += header->size();
928             continue;
929         }
930
931         if (startOfGap != headerAddress)
932             heap()->addToFreeList(startOfGap, headerAddress - startOfGap);
933         header->unmark();
934         headerAddress += header->size();
935         heap()->stats().increaseObjectSpace(header->payloadSize());
936         startOfGap = headerAddress;
937     }
938     if (startOfGap != end())
939         heap()->addToFreeList(startOfGap, end() - startOfGap);
940 }
941
942 template<typename Header>
943 void HeapPage<Header>::clearMarks()
944 {
945     for (Address headerAddress = payload(); headerAddress < end();) {
946         Header* header = reinterpret_cast<Header*>(headerAddress);
947         ASSERT(header->size() < blinkPagePayloadSize());
948         if (!header->isFree())
949             header->unmark();
950         headerAddress += header->size();
951     }
952 }
953
954 template<typename Header>
955 void HeapPage<Header>::populateObjectStartBitMap()
956 {
957     memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
958     Address start = payload();
959     for (Address headerAddress = start; headerAddress < end();) {
960         Header* header = reinterpret_cast<Header*>(headerAddress);
961         size_t objectOffset = headerAddress - start;
962         ASSERT(!(objectOffset & allocationMask));
963         size_t objectStartNumber = objectOffset / allocationGranularity;
964         size_t mapIndex = objectStartNumber / 8;
965         ASSERT(mapIndex < objectStartBitMapSize);
966         m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
967         headerAddress += header->size();
968         ASSERT(headerAddress <= end());
969     }
970     m_objectStartBitMapComputed = true;
971 }
972
973 template<typename Header>
974 void HeapPage<Header>::clearObjectStartBitMap()
975 {
976     m_objectStartBitMapComputed = false;
977 }
978
979 static int numberOfLeadingZeroes(uint8_t byte)
980 {
981     if (!byte)
982         return 8;
983     int result = 0;
984     if (byte <= 0x0F) {
985         result += 4;
986         byte = byte << 4;
987     }
988     if (byte <= 0x3F) {
989         result += 2;
990         byte = byte << 2;
991     }
992     if (byte <= 0x7F)
993         result++;
994     return result;
995 }
996
997 template<typename Header>
998 bool HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address addr)
999 {
1000     if (addr < payload())
1001         return false;
1002     if (!isObjectStartBitMapComputed())
1003         populateObjectStartBitMap();
1004     size_t objectOffset = addr - payload();
1005     size_t objectStartNumber = objectOffset / allocationGranularity;
1006     size_t mapIndex = objectStartNumber / 8;
1007     ASSERT(mapIndex < objectStartBitMapSize);
1008     size_t bit = objectStartNumber & 7;
1009     uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
1010     while (!byte) {
1011         ASSERT(mapIndex > 0);
1012         byte = m_objectStartBitMap[--mapIndex];
1013     }
1014     int leadingZeroes = numberOfLeadingZeroes(byte);
1015     objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
1016     objectOffset = objectStartNumber * allocationGranularity;
1017     Address objectAddress = objectOffset + payload();
1018     Header* header = reinterpret_cast<Header*>(objectAddress);
1019     if (header->isFree())
1020         return false;
1021
1022     visitor->mark(header, traceCallback(header));
1023     return true;
1024 }
1025
1026 #if USE_ASAN
1027 template<typename Header>
1028 void HeapPage<Header>::poisonUnmarkedObjects()
1029 {
1030     for (Address headerAddress = payload(); headerAddress < end(); ) {
1031         Header* header = reinterpret_cast<Header*>(headerAddress);
1032         ASSERT(header->size() < blinkPagePayloadSize());
1033
1034         if (!header->isFree() && !header->isMarked())
1035             ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1036         headerAddress += header->size();
1037     }
1038 }
1039 #endif
1040
1041 template<>
1042 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1043 {
1044     header->finalize();
1045 }
1046
1047 template<>
1048 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1049 {
1050     ASSERT(gcInfo());
1051     HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1052 }
1053
1054 template<>
1055 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1056 {
1057     ASSERT(gcInfo());
1058     return gcInfo()->m_trace;
1059 }
1060
1061 template<>
1062 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1063 {
1064     return header->traceCallback();
1065 }
1066
1067 template<typename Header>
1068 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1069 {
1070     stats.increaseAllocatedSpace(size());
1071     stats.increaseObjectSpace(payloadSize());
1072 }
1073
1074 HeapContainsCache::HeapContainsCache()
1075     : m_entries(adoptArrayPtr(new Entry[HeapContainsCache::numberOfEntries]))
1076 {
1077 }
1078
1079 void HeapContainsCache::flush()
1080 {
1081     for (int i = 0; i < numberOfEntries; i++)
1082         m_entries[i] = Entry();
1083 }
1084
1085 size_t HeapContainsCache::hash(Address address)
1086 {
1087     size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1088     value ^= value >> numberOfEntriesLog2;
1089     value ^= value >> (numberOfEntriesLog2 * 2);
1090     value &= numberOfEntries - 1;
1091     return value & ~1; // Returns only even number.
1092 }
1093
1094 bool HeapContainsCache::lookup(Address address, BaseHeapPage** page)
1095 {
1096     ASSERT(page);
1097     size_t index = hash(address);
1098     ASSERT(!(index & 1));
1099     Address cachePage = roundToBlinkPageStart(address);
1100     if (m_entries[index].address() == cachePage) {
1101         *page = m_entries[index].containingPage();
1102         return true;
1103     }
1104     if (m_entries[index + 1].address() == cachePage) {
1105         *page = m_entries[index + 1].containingPage();
1106         return true;
1107     }
1108     *page = 0;
1109     return false;
1110 }
1111
1112 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1113 {
1114     size_t index = hash(address);
1115     ASSERT(!(index & 1));
1116     Address cachePage = roundToBlinkPageStart(address);
1117     m_entries[index + 1] = m_entries[index];
1118     m_entries[index] = Entry(cachePage, page);
1119 }
1120
1121 void CallbackStack::init(CallbackStack** first)
1122 {
1123     // The stacks are chained, so we start by setting this to null as terminator.
1124     *first = 0;
1125     *first = new CallbackStack(first);
1126 }
1127
1128 void CallbackStack::shutdown(CallbackStack** first)
1129 {
1130     CallbackStack* next;
1131     for (CallbackStack* current = *first; current; current = next) {
1132         next = current->m_next;
1133         delete current;
1134     }
1135     *first = 0;
1136 }
1137
1138 CallbackStack::~CallbackStack()
1139 {
1140 #ifndef NDEBUG
1141     clearUnused();
1142 #endif
1143 }
1144
1145 void CallbackStack::clearUnused()
1146 {
1147     ASSERT(m_current == &(m_buffer[0]));
1148     for (size_t i = 0; i < bufferSize; i++)
1149         m_buffer[i] = Item(0, 0);
1150 }
1151
1152 void CallbackStack::assertIsEmpty()
1153 {
1154     ASSERT(m_current == &(m_buffer[0]));
1155     ASSERT(!m_next);
1156 }
1157
1158 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor)
1159 {
1160     if (m_current == &(m_buffer[0])) {
1161         if (!m_next) {
1162 #ifndef NDEBUG
1163             clearUnused();
1164 #endif
1165             return false;
1166         }
1167         CallbackStack* nextStack = m_next;
1168         *first = nextStack;
1169         delete this;
1170         return nextStack->popAndInvokeCallback(first, visitor);
1171     }
1172     Item* item = --m_current;
1173
1174     VisitorCallback callback = item->callback();
1175     callback(visitor, item->object());
1176
1177     return true;
1178 }
1179
1180 class MarkingVisitor : public Visitor {
1181 public:
1182     inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1183     {
1184         ASSERT(header);
1185         ASSERT(objectPointer);
1186         if (header->isMarked())
1187             return;
1188         header->mark();
1189         if (callback)
1190             Heap::pushTraceCallback(const_cast<void*>(objectPointer), callback);
1191     }
1192
1193     virtual void mark(HeapObjectHeader* header, TraceCallback callback) OVERRIDE
1194     {
1195         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1196         // version to correctly find the payload.
1197         visitHeader(header, header->payload(), callback);
1198     }
1199
1200     virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) OVERRIDE
1201     {
1202         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1203         // version to correctly find the payload.
1204         visitHeader(header, header->payload(), callback);
1205     }
1206
1207     virtual void mark(const void* objectPointer, TraceCallback callback) OVERRIDE
1208     {
1209         if (!objectPointer)
1210             return;
1211         FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1212         visitHeader(header, header->payload(), callback);
1213     }
1214
1215     virtual void registerWeakMembers(const void* containingObject, WeakPointerCallback callback) OVERRIDE
1216     {
1217         Heap::pushWeakPointerCallback(const_cast<void*>(containingObject), callback);
1218     }
1219
1220     virtual bool isMarked(const void* objectPointer) OVERRIDE
1221     {
1222         return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
1223     }
1224
1225     // This macro defines the necessary visitor methods for typed heaps
1226 #define DEFINE_VISITOR_METHODS(Type)                                              \
1227     virtual void mark(const Type* objectPointer, TraceCallback callback) OVERRIDE \
1228     {                                                                             \
1229         if (!objectPointer)                                                       \
1230             return;                                                               \
1231         HeapObjectHeader* header =                                                \
1232             HeapObjectHeader::fromPayload(objectPointer);                         \
1233         visitHeader(header, header->payload(), callback);                         \
1234     }                                                                             \
1235     virtual bool isMarked(const Type* objectPointer) OVERRIDE                     \
1236     {                                                                             \
1237         return HeapObjectHeader::fromPayload(objectPointer)->isMarked();          \
1238     }
1239
1240     FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1241 #undef DEFINE_VISITOR_METHODS
1242 };
1243
1244 void Heap::init()
1245 {
1246     ThreadState::init();
1247     CallbackStack::init(&s_markingStack);
1248     CallbackStack::init(&s_weakCallbackStack);
1249 }
1250
1251 void Heap::shutdown()
1252 {
1253     ThreadState::shutdown();
1254     CallbackStack::shutdown(&s_markingStack);
1255     CallbackStack::shutdown(&s_weakCallbackStack);
1256 }
1257
1258 bool Heap::contains(Address address)
1259 {
1260     ASSERT(ThreadState::isAnyThreadInGC());
1261     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1262     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1263         if ((*it)->contains(address))
1264             return true;
1265     }
1266     return false;
1267 }
1268
1269 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1270 {
1271     ASSERT(ThreadState::isAnyThreadInGC());
1272     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1273     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1274         if ((*it)->checkAndMarkPointer(visitor, address)) {
1275             // Pointer found and marked.
1276             return address;
1277         }
1278     }
1279     return 0;
1280 }
1281
1282 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1283 {
1284     ASSERT(Heap::contains(object));
1285     CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1286     *slot = CallbackStack::Item(object, callback);
1287 }
1288
1289 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1290 {
1291     return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor);
1292 }
1293
1294 void Heap::pushWeakPointerCallback(void* object, WeakPointerCallback callback)
1295 {
1296     ASSERT(Heap::contains(object));
1297     CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallbackStack);
1298     *slot = CallbackStack::Item(object, callback);
1299 }
1300
1301 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1302 {
1303     return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visitor);
1304 }
1305
1306 void Heap::prepareForGC()
1307 {
1308     ASSERT(ThreadState::isAnyThreadInGC());
1309     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1310     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1311         (*it)->prepareForGC();
1312 }
1313
1314 void Heap::collectGarbage(ThreadState::StackState stackState, GCType gcType)
1315 {
1316     ThreadState::current()->clearGCRequested();
1317     GCScope gcScope(stackState);
1318
1319     // Disallow allocation during garbage collection.
1320     NoAllocationScope<AnyThread> noAllocationScope;
1321     prepareForGC();
1322     MarkingVisitor marker;
1323
1324     ThreadState::visitRoots(&marker);
1325     // Recursively mark all objects that are reachable from the roots.
1326     while (popAndInvokeTraceCallback(&marker)) { }
1327
1328     // Call weak callbacks on objects that may now be pointing to dead
1329     // objects.
1330     while (popAndInvokeWeakPointerCallback(&marker)) { }
1331
1332     // It is not permitted to trace pointers of live objects in the weak
1333     // callback phase, so the marking stack should still be empty here.
1334     s_markingStack->assertIsEmpty();
1335 }
1336
1337 void Heap::getStats(HeapStats* stats)
1338 {
1339     stats->clear();
1340     ASSERT(ThreadState::isAnyThreadInGC());
1341     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1342     typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1343     for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1344         HeapStats temp;
1345         (*it)->getStats(temp);
1346         stats->add(&temp);
1347     }
1348 }
1349
1350 bool Heap::isConsistentForGC()
1351 {
1352     ASSERT(ThreadState::isAnyThreadInGC());
1353     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1354     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1355         return (*it)->isConsistentForGC();
1356     return true;
1357 }
1358
1359 void Heap::makeConsistentForGC()
1360 {
1361     ASSERT(ThreadState::isAnyThreadInGC());
1362     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1363     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1364         (*it)->makeConsistentForGC();
1365 }
1366
1367 // Force template instantiations for the types that we need.
1368 template class HeapPage<FinalizedHeapObjectHeader>;
1369 template class HeapPage<HeapObjectHeader>;
1370 template class ThreadHeap<FinalizedHeapObjectHeader>;
1371 template class ThreadHeap<HeapObjectHeader>;
1372
1373 CallbackStack* Heap::s_markingStack;
1374 CallbackStack* Heap::s_weakCallbackStack;
1375 }