Upstream version 5.34.104.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / heap / Heap.cpp
1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *
8  *     * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  *     * Neither the name of Google Inc. nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include "config.h"
32 #include "heap/Heap.h"
33
34 #include "heap/ThreadState.h"
35
36 #include "wtf/PassOwnPtr.h"
37
38 #if OS(POSIX)
39 #include <sys/mman.h>
40 #include <unistd.h>
41 #elif OS(WIN)
42 #include <windows.h>
43 #endif
44
45 namespace WebCore {
46
47 #if OS(WIN)
48 static bool IsPowerOf2(size_t power)
49 {
50     return !((power - 1) & power);
51 }
52 #endif
53
54 static Address roundToBlinkPageBoundary(void* base)
55 {
56     return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
57 }
58
59 static size_t roundToOsPageSize(size_t size)
60 {
61     return (size + osPageSize() - 1) & ~(osPageSize() - 1);
62 }
63
64 size_t osPageSize()
65 {
66 #if OS(POSIX)
67     static const size_t pageSize = getpagesize();
68 #else
69     static size_t pageSize = 0;
70     if (!pageSize) {
71         SYSTEM_INFO info;
72         GetSystemInfo(&info);
73         pageSize = info.dwPageSize;
74         ASSERT(IsPowerOf2(pageSize));
75     }
76 #endif
77     return pageSize;
78 }
79
80 class MemoryRegion {
81 public:
82     MemoryRegion(Address base, size_t size) : m_base(base), m_size(size) { ASSERT(size > 0); }
83
84     bool contains(Address addr) const
85     {
86         return m_base <= addr && addr < (m_base + m_size);
87     }
88
89
90     bool contains(const MemoryRegion& other) const
91     {
92         return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
93     }
94
95     void release()
96     {
97 #if OS(POSIX)
98         int err = munmap(m_base, m_size);
99         RELEASE_ASSERT(!err);
100 #else
101         bool success = VirtualFree(m_base, 0, MEM_RELEASE);
102         RELEASE_ASSERT(success);
103 #endif
104     }
105
106     WARN_UNUSED_RETURN bool commit()
107     {
108 #if OS(POSIX)
109         int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
110         if (!err) {
111             madvise(m_base, m_size, MADV_NORMAL);
112             return true;
113         }
114         return false;
115 #else
116         void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
117         return !!result;
118 #endif
119     }
120
121     void decommit()
122     {
123 #if OS(POSIX)
124         int err = mprotect(m_base, m_size, PROT_NONE);
125         RELEASE_ASSERT(!err);
126         // FIXME: Consider using MADV_FREE on MacOS.
127         madvise(m_base, m_size, MADV_DONTNEED);
128 #else
129         bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
130         RELEASE_ASSERT(success);
131 #endif
132     }
133
134     Address base() const { return m_base; }
135
136 private:
137     Address m_base;
138     size_t m_size;
139 };
140
141 // Representation of the memory used for a Blink heap page.
142 //
143 // The representation keeps track of two memory regions:
144 //
145 // 1. The virtual memory reserved from the sytem in order to be able
146 //    to free all the virtual memory reserved on destruction.
147 //
148 // 2. The writable memory (a sub-region of the reserved virtual
149 //    memory region) that is used for the actual heap page payload.
150 //
151 // Guard pages are created before and after the writable memory.
152 class PageMemory {
153 public:
154     ~PageMemory() { m_reserved.release(); }
155
156     bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
157     void decommit() { m_writable.decommit(); }
158
159     Address writableStart() { return m_writable.base(); }
160
161     // Allocate a virtual address space for the blink page with the
162     // following layout:
163     //
164     //    [ guard os page | ... payload ... | guard os page ]
165     //    ^---{ aligned to blink page size }
166     //
167     static PageMemory* allocate(size_t payloadSize)
168     {
169         ASSERT(payloadSize > 0);
170
171         // Virtual memory allocation routines operate in OS page sizes.
172         // Round up the requested size to nearest os page size.
173         payloadSize = roundToOsPageSize(payloadSize);
174
175         // Overallocate by blinkPageSize and 2 times OS page size to
176         // ensure a chunk of memory which is blinkPageSize aligned and
177         // has a system page before and after to use for guarding. We
178         // unmap the excess memory before returning.
179         size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize;
180
181 #if OS(POSIX)
182         Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
183         RELEASE_ASSERT(base != MAP_FAILED);
184
185         Address end = base + allocationSize;
186         Address alignedBase = roundToBlinkPageBoundary(base);
187         Address payloadBase = alignedBase + osPageSize();
188         Address payloadEnd = payloadBase + payloadSize;
189         Address blinkPageEnd = payloadEnd + osPageSize();
190
191         // If the allocate memory was not blink page aligned release
192         // the memory before the aligned address.
193         if (alignedBase != base)
194             MemoryRegion(base, alignedBase - base).release();
195
196         // Create guard pages by decommiting an OS page before and
197         // after the payload.
198         MemoryRegion(alignedBase, osPageSize()).decommit();
199         MemoryRegion(payloadEnd, osPageSize()).decommit();
200
201         // Free the additional memory at the end of the page if any.
202         if (blinkPageEnd < end)
203             MemoryRegion(blinkPageEnd, end - blinkPageEnd).release();
204
205         return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize));
206 #else
207         Address base = 0;
208         Address alignedBase = 0;
209
210         // On Windows it is impossible to partially release a region
211         // of memory allocated by VirtualAlloc. To avoid wasting
212         // virtual address space we attempt to release a large region
213         // of memory returned as a whole and then allocate an aligned
214         // region inside this larger region.
215         for (int attempt = 0; attempt < 3; attempt++) {
216             base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
217             RELEASE_ASSERT(base);
218             VirtualFree(base, 0, MEM_RELEASE);
219
220             alignedBase = roundToBlinkPageBoundary(base);
221             base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS));
222             if (base) {
223                 RELEASE_ASSERT(base == alignedBase);
224                 allocationSize = payloadSize + 2 * osPageSize();
225                 break;
226             }
227         }
228
229         if (!base) {
230             // We failed to avoid wasting virtual address space after
231             // several attempts.
232             base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
233             RELEASE_ASSERT(base);
234
235             // FIXME: If base is by accident blink page size aligned
236             // here then we can create two pages out of reserved
237             // space. Do this.
238             alignedBase = roundToBlinkPageBoundary(base);
239         }
240
241         Address payloadBase = alignedBase + osPageSize();
242         PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize));
243         bool res = storage->commit();
244         RELEASE_ASSERT(res);
245         return storage;
246 #endif
247     }
248
249 private:
250     PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable)
251         : m_reserved(reserved)
252         , m_writable(writable)
253     {
254         ASSERT(reserved.contains(writable));
255     }
256
257     MemoryRegion m_reserved;
258     MemoryRegion m_writable;
259 };
260
261 class GCScope {
262 public:
263     explicit GCScope(ThreadState::StackState stackState)
264         : m_state(ThreadState::current())
265         , m_safePointScope(stackState)
266     {
267         m_state->checkThread();
268
269         // FIXME: in an unlikely coincidence that two threads decide
270         // to collect garbage at the same time, avoid doing two GCs in
271         // a row.
272         ASSERT(!m_state->isInGC());
273         ThreadState::stopThreads();
274         m_state->enterGC();
275     }
276
277     ~GCScope()
278     {
279         m_state->leaveGC();
280         ASSERT(!m_state->isInGC());
281         ThreadState::resumeThreads();
282     }
283
284 private:
285     ThreadState* m_state;
286     ThreadState::SafePointScope m_safePointScope;
287 };
288
289 NO_SANITIZE_ADDRESS
290 bool HeapObjectHeader::isMarked() const
291 {
292     checkHeader();
293     return m_size & markBitMask;
294 }
295
296 NO_SANITIZE_ADDRESS
297 void HeapObjectHeader::unmark()
298 {
299     checkHeader();
300     m_size &= ~markBitMask;
301 }
302
303 NO_SANITIZE_ADDRESS
304 bool HeapObjectHeader::hasDebugMark() const
305 {
306     checkHeader();
307     return m_size & debugBitMask;
308 }
309
310 NO_SANITIZE_ADDRESS
311 void HeapObjectHeader::clearDebugMark()
312 {
313     checkHeader();
314     m_size &= ~debugBitMask;
315 }
316
317 NO_SANITIZE_ADDRESS
318 void HeapObjectHeader::setDebugMark()
319 {
320     checkHeader();
321     m_size |= debugBitMask;
322 }
323
324 #ifndef NDEBUG
325 NO_SANITIZE_ADDRESS
326 void HeapObjectHeader::zapMagic()
327 {
328     m_magic = zappedMagic;
329 }
330 #endif
331
332 HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload)
333 {
334     Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
335     HeapObjectHeader* header =
336         reinterpret_cast<HeapObjectHeader*>(addr - objectHeaderSize);
337     return header;
338 }
339
340 void HeapObjectHeader::finalize(const GCInfo* gcInfo, Address object, size_t objectSize)
341 {
342     ASSERT(gcInfo);
343     if (gcInfo->hasFinalizer()) {
344         gcInfo->m_finalize(object);
345     }
346 #ifndef NDEBUG
347     for (size_t i = 0; i < objectSize; i++)
348         object[i] = finalizedZapValue;
349 #endif
350     // Zap the primary vTable entry (secondary vTable entries are not zapped)
351     *(reinterpret_cast<uintptr_t*>(object)) = zappedVTable;
352 }
353
354 NO_SANITIZE_ADDRESS
355 void FinalizedHeapObjectHeader::finalize()
356 {
357     HeapObjectHeader::finalize(m_gcInfo, payload(), payloadSize());
358 }
359
360 template<typename Header>
361 void LargeHeapObject<Header>::unmark()
362 {
363     return heapObjectHeader()->unmark();
364 }
365
366 template<typename Header>
367 bool LargeHeapObject<Header>::isMarked()
368 {
369     return heapObjectHeader()->isMarked();
370 }
371
372 template<typename Header>
373 bool LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
374 {
375     if (contains(address)) {
376         mark(visitor);
377         return true;
378     }
379     return false;
380 }
381
382 template<>
383 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
384 {
385     visitor->mark(heapObjectHeader(), heapObjectHeader()->traceCallback());
386 }
387
388 template<>
389 void LargeHeapObject<HeapObjectHeader>::mark(Visitor* visitor)
390 {
391     ASSERT(gcInfo());
392     visitor->mark(heapObjectHeader(), gcInfo()->m_trace);
393 }
394
395 template<>
396 void LargeHeapObject<FinalizedHeapObjectHeader>::finalize()
397 {
398     heapObjectHeader()->finalize();
399 }
400
401 template<>
402 void LargeHeapObject<HeapObjectHeader>::finalize()
403 {
404     ASSERT(gcInfo());
405     HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize());
406 }
407
408 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* payload)
409 {
410     Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
411     FinalizedHeapObjectHeader* header =
412         reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize);
413     return header;
414 }
415
416 template<typename Header>
417 ThreadHeap<Header>::ThreadHeap(ThreadState* state)
418     : m_currentAllocationPoint(0)
419     , m_remainingAllocationSize(0)
420     , m_firstPage(0)
421     , m_firstLargeHeapObject(0)
422     , m_biggestFreeListIndex(0)
423     , m_threadState(state)
424     , m_pagePool(0)
425 {
426     clearFreeLists();
427 }
428
429 template<typename Header>
430 ThreadHeap<Header>::~ThreadHeap()
431 {
432     clearFreeLists();
433     if (!ThreadState::isMainThread())
434         assertEmpty();
435     deletePages();
436 }
437
438 template<typename Header>
439 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
440 {
441     size_t allocationSize = allocationSizeFromSize(size);
442     if (threadState()->shouldGC()) {
443         if (threadState()->shouldForceConservativeGC())
444             Heap::collectGarbage(ThreadState::HeapPointersOnStack);
445         else
446             threadState()->setGCRequested();
447     }
448     ensureCurrentAllocation(allocationSize, gcInfo);
449     return allocate(size, gcInfo);
450 }
451
452 template<typename Header>
453 bool ThreadHeap<Header>::allocateFromFreeList(size_t minSize)
454 {
455     size_t bucketSize = 1 << m_biggestFreeListIndex;
456     int i = m_biggestFreeListIndex;
457     for (; i > 0; i--, bucketSize >>= 1) {
458         if (bucketSize < minSize)
459             break;
460         FreeListEntry* entry = m_freeLists[i];
461         if (entry) {
462             m_biggestFreeListIndex = i;
463             entry->unlink(&m_freeLists[i]);
464             setAllocationPoint(entry->address(), entry->size());
465             ASSERT(currentAllocationPoint() && remainingAllocationSize() >= minSize);
466             return true;
467         }
468     }
469     m_biggestFreeListIndex = i;
470     return false;
471 }
472
473 template<typename Header>
474 void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo)
475 {
476     ASSERT(minSize >= allocationGranularity);
477     if (remainingAllocationSize() >= minSize)
478         return;
479
480     if (remainingAllocationSize() > 0)
481         addToFreeList(currentAllocationPoint(), remainingAllocationSize());
482     if (allocateFromFreeList(minSize))
483         return;
484     addPageToHeap(gcInfo);
485     bool success = allocateFromFreeList(minSize);
486     RELEASE_ASSERT(success);
487 }
488
489 template<typename Header>
490 BaseHeapPage* ThreadHeap<Header>::heapPageFromAddress(Address address)
491 {
492     for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
493         if (page->contains(address))
494             return page;
495     }
496     return 0;
497 }
498
499 template<typename Header>
500 BaseHeapPage* ThreadHeap<Header>::largeHeapObjectFromAddress(Address address)
501 {
502     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
503         if (current->contains(address))
504             return current;
505     }
506     return 0;
507 }
508
509 template<typename Header>
510 bool ThreadHeap<Header>::checkAndMarkLargeHeapObject(Visitor* visitor, Address address)
511 {
512     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
513         if (current->checkAndMarkPointer(visitor, address))
514             return true;
515     }
516     return false;
517 }
518
519 template<typename Header>
520 void ThreadHeap<Header>::addToFreeList(Address address, size_t size)
521 {
522     ASSERT(heapPageFromAddress(address));
523     ASSERT(heapPageFromAddress(address + size - 1));
524     ASSERT(size < blinkPagePayloadSize());
525     // The free list entries are only pointer aligned (but when we allocate
526     // from them we are 8 byte aligned due to the header size).
527     ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(Header)) & allocationMask));
528     ASSERT(!(size & allocationMask));
529     ASAN_POISON_MEMORY_REGION(address, size);
530     FreeListEntry* entry;
531     if (size < sizeof(*entry)) {
532         // Create a dummy header with only a size and freelist bit set.
533         ASSERT(size >= sizeof(BasicObjectHeader));
534         // Free list encode the size to mark the lost memory as freelist memory.
535         new (NotNull, address) BasicObjectHeader(BasicObjectHeader::freeListEncodedSize(size));
536         // This memory gets lost. Sweeping can reclaim it.
537         return;
538     }
539     entry = new (NotNull, address) FreeListEntry(size);
540 #if defined(ADDRESS_SANITIZER)
541     // For ASAN we don't add the entry to the free lists until the asanDeferMemoryReuseCount
542     // reaches zero. However we always add entire pages to ensure that adding a new page will
543     // increase the allocation space.
544     if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList())
545         return;
546 #endif
547     int index = bucketIndexForSize(size);
548     entry->link(&m_freeLists[index]);
549     if (index > m_biggestFreeListIndex)
550         m_biggestFreeListIndex = index;
551 }
552
553 template<typename Header>
554 Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInfo)
555 {
556     // Caller already added space for object header and rounded up to allocation alignment
557     ASSERT(!(size & allocationMask));
558
559     size_t allocationSize = sizeof(LargeHeapObject<Header>) + size;
560
561     // Ensure that there is enough space for alignment. If the header
562     // is not a multiple of 8 bytes we will allocate an extra
563     // headerPadding<Header> bytes to ensure it 8 byte aligned.
564     allocationSize += headerPadding<Header>();
565
566     // If ASAN is supported we add allocationGranularity bytes to the allocated space and
567     // poison that to detect overflows
568 #if defined(ADDRESS_SANITIZER)
569     allocationSize += allocationGranularity;
570 #endif
571     if (threadState()->shouldGC())
572         threadState()->setGCRequested();
573     PageMemory* pageMemory = PageMemory::allocate(allocationSize);
574     Address largeObjectAddress = pageMemory->writableStart();
575     Address headerAddress = largeObjectAddress + sizeof(LargeHeapObject<Header>) + headerPadding<Header>();
576     memset(headerAddress, 0, size);
577     Header* header = new (NotNull, headerAddress) Header(size, gcInfo);
578     Address result = headerAddress + sizeof(*header);
579     ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
580     LargeHeapObject<Header>* largeObject = new (largeObjectAddress) LargeHeapObject<Header>(pageMemory, gcInfo);
581
582     // Poison the object header and allocationGranularity bytes after the object
583     ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
584     ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
585     largeObject->link(&m_firstLargeHeapObject);
586     stats().increaseAllocatedSpace(largeObject->size());
587     stats().increaseObjectSpace(largeObject->payloadSize());
588     return result;
589 }
590
591 template<typename Header>
592 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeHeapObject<Header>** previousNext)
593 {
594     object->unlink(previousNext);
595     object->finalize();
596
597     // Unpoison the object header and allocationGranularity bytes after the
598     // object before freeing.
599     ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
600     ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
601     delete object->storage();
602 }
603
604 template<>
605 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
606 {
607     // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
608     // the heap should be unused (ie. 0).
609     allocatePage(0);
610 }
611
612 template<>
613 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
614 {
615     // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
616     // since it is the same for all objects
617     ASSERT(gcInfo);
618     allocatePage(gcInfo);
619 }
620
621 template<typename Header>
622 void ThreadHeap<Header>::clearPagePool()
623 {
624     while (takePageFromPool()) { }
625 }
626
627 template<typename Header>
628 PageMemory* ThreadHeap<Header>::takePageFromPool()
629 {
630     while (PagePoolEntry* entry = m_pagePool) {
631         m_pagePool = entry->next();
632         PageMemory* storage = entry->storage();
633         delete entry;
634
635         if (storage->commit())
636             return storage;
637
638         // Failed to commit pooled storage. Release it.
639         delete storage;
640     }
641
642     return 0;
643 }
644
645 template<typename Header>
646 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused)
647 {
648     PageMemory* storage = unused->storage();
649     PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool);
650     m_pagePool = entry;
651     storage->decommit();
652 }
653
654 template<typename Header>
655 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
656 {
657     heapContainsCache()->flush();
658     PageMemory* pageMemory = takePageFromPool();
659     if (!pageMemory) {
660         pageMemory = PageMemory::allocate(blinkPagePayloadSize());
661         RELEASE_ASSERT(pageMemory);
662     }
663     HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
664     page->link(&m_firstPage);
665     addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
666 }
667
668 #ifndef NDEBUG
669 template<typename Header>
670 void ThreadHeap<Header>::getScannedStats(HeapStats& scannedStats)
671 {
672     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
673         page->getStats(scannedStats);
674     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
675         current->getStats(scannedStats);
676 }
677 #endif
678
679 template<typename Header>
680 void ThreadHeap<Header>::sweep()
681 {
682     ASSERT(isConsistentForGC());
683 #if defined(ADDRESS_SANITIZER)
684     // When using ASAN do a pre-sweep where all unmarked objects are poisoned before
685     // calling their finalizer methods. This can catch the cases where one objects
686     // finalizer tries to modify another object as part of finalization.
687     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
688         page->poisonUnmarkedObjects();
689 #endif
690     HeapPage<Header>* page = m_firstPage;
691     HeapPage<Header>** previous = &m_firstPage;
692     bool pagesRemoved = false;
693     while (page) {
694         if (page->isEmpty()) {
695             HeapPage<Header>* unused = page;
696             page = page->next();
697             HeapPage<Header>::unlink(unused, previous);
698             pagesRemoved = true;
699         } else {
700             page->sweep();
701             previous = &page->m_next;
702             page = page->next();
703         }
704     }
705     if (pagesRemoved)
706         heapContainsCache()->flush();
707
708     LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
709     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
710         if (current->isMarked()) {
711             stats().increaseAllocatedSpace(current->size());
712             stats().increaseObjectSpace(current->payloadSize());
713             current->unmark();
714             previousNext = &current->m_next;
715             current = current->next();
716         } else {
717             LargeHeapObject<Header>* next = current->next();
718             freeLargeObject(current, previousNext);
719             current = next;
720         }
721     }
722 }
723
724 template<typename Header>
725 void ThreadHeap<Header>::assertEmpty()
726 {
727     // No nested GCs are permitted. The thread is exiting.
728     NoAllocationScope<AnyThread> noAllocation;
729     makeConsistentForGC();
730     for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
731         Address end = page->end();
732         Address headerAddress;
733         for (headerAddress = page->payload(); headerAddress < end; ) {
734             BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
735             ASSERT(basicHeader->size() < blinkPagePayloadSize());
736             // Live object is potentially a dangling pointer from some root.
737             // Treat it as critical bug both in release and debug mode.
738             RELEASE_ASSERT(basicHeader->isFree());
739             headerAddress += basicHeader->size();
740         }
741         ASSERT(headerAddress == end);
742         addToFreeList(page->payload(), end - page->payload());
743     }
744
745     RELEASE_ASSERT(!m_firstLargeHeapObject);
746 }
747
748 template<typename Header>
749 bool ThreadHeap<Header>::isConsistentForGC()
750 {
751     for (size_t i = 0; i < blinkPageSizeLog2; i++) {
752         if (m_freeLists[i])
753             return false;
754     }
755     return !ownsNonEmptyAllocationArea();
756 }
757
758 template<typename Header>
759 void ThreadHeap<Header>::makeConsistentForGC()
760 {
761     if (ownsNonEmptyAllocationArea())
762         addToFreeList(currentAllocationPoint(), remainingAllocationSize());
763     setAllocationPoint(0, 0);
764     clearFreeLists();
765 }
766
767 template<typename Header>
768 void ThreadHeap<Header>::clearMarks()
769 {
770     ASSERT(isConsistentForGC());
771     for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
772         page->clearMarks();
773     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
774         current->unmark();
775 }
776
777 template<typename Header>
778 void ThreadHeap<Header>::deletePages()
779 {
780     heapContainsCache()->flush();
781     // Add all pages in the pool to the heap's list of pages before deleting
782     clearPagePool();
783
784     for (HeapPage<Header>* page = m_firstPage; page; ) {
785         HeapPage<Header>* dead = page;
786         page = page->next();
787         PageMemory* storage = dead->storage();
788         dead->~HeapPage();
789         delete storage;
790     }
791     m_firstPage = 0;
792
793     for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
794         LargeHeapObject<Header>* dead = current;
795         current = current->next();
796         PageMemory* storage = dead->storage();
797         dead->~LargeHeapObject();
798         delete storage;
799     }
800     m_firstLargeHeapObject = 0;
801 }
802
803 template<typename Header>
804 void ThreadHeap<Header>::clearFreeLists()
805 {
806     for (size_t i = 0; i < blinkPageSizeLog2; i++)
807         m_freeLists[i] = 0;
808 }
809
810 int BaseHeap::bucketIndexForSize(size_t size)
811 {
812     ASSERT(size > 0);
813     int index = -1;
814     while (size) {
815         size >>= 1;
816         index++;
817     }
818     return index;
819 }
820
821 template<typename Header>
822 HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const GCInfo* gcInfo)
823     : BaseHeapPage(storage, gcInfo)
824     , m_next(0)
825     , m_heap(heap)
826 {
827     COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
828     m_objectStartBitMapComputed = false;
829     ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
830     heap->stats().increaseAllocatedSpace(blinkPageSize);
831 }
832
833 template<typename Header>
834 void HeapPage<Header>::link(HeapPage** prevNext)
835 {
836     m_next = *prevNext;
837     *prevNext = this;
838 }
839
840 template<typename Header>
841 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
842 {
843     *prevNext = unused->m_next;
844     unused->heap()->addPageToPool(unused);
845 }
846
847 template<typename Header>
848 void HeapPage<Header>::getStats(HeapStats& stats)
849 {
850     stats.increaseAllocatedSpace(blinkPageSize);
851     Address headerAddress = payload();
852     ASSERT(headerAddress != end());
853     do {
854         Header* header = reinterpret_cast<Header*>(headerAddress);
855         if (!header->isFree())
856             stats.increaseObjectSpace(header->payloadSize());
857         ASSERT(header->size() < blinkPagePayloadSize());
858         headerAddress += header->size();
859         ASSERT(headerAddress <= end());
860     } while (headerAddress < end());
861 }
862
863 template<typename Header>
864 bool HeapPage<Header>::isEmpty()
865 {
866     BasicObjectHeader* header = reinterpret_cast<BasicObjectHeader*>(payload());
867     return header->isFree() && (header->size() == payloadSize());
868 }
869
870 template<typename Header>
871 void HeapPage<Header>::sweep()
872 {
873     clearObjectStartBitMap();
874     heap()->stats().increaseAllocatedSpace(blinkPageSize);
875     Address startOfGap = payload();
876     for (Address headerAddress = startOfGap; headerAddress < end(); ) {
877         BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
878         ASSERT(basicHeader->size() < blinkPagePayloadSize());
879
880         if (basicHeader->isFree()) {
881             headerAddress += basicHeader->size();
882             continue;
883         }
884         // At this point we know this is a valid object of type Header
885         Header* header = static_cast<Header*>(basicHeader);
886
887         if (!header->isMarked()) {
888             // For ASAN we unpoison the specific object when calling the finalizer and
889             // poison it again when done to allow the object's own finalizer to operate
890             // on the object, but not have other finalizers be allowed to access it.
891             ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize());
892             finalize(header);
893             ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
894             headerAddress += header->size();
895             continue;
896         }
897
898         if (startOfGap != headerAddress)
899             heap()->addToFreeList(startOfGap, headerAddress - startOfGap);
900         header->unmark();
901         headerAddress += header->size();
902         heap()->stats().increaseObjectSpace(header->payloadSize());
903         startOfGap = headerAddress;
904     }
905     if (startOfGap != end())
906         heap()->addToFreeList(startOfGap, end() - startOfGap);
907 }
908
909 template<typename Header>
910 void HeapPage<Header>::clearMarks()
911 {
912     for (Address headerAddress = payload(); headerAddress < end();) {
913         Header* header = reinterpret_cast<Header*>(headerAddress);
914         ASSERT(header->size() < blinkPagePayloadSize());
915         if (!header->isFree())
916             header->unmark();
917         headerAddress += header->size();
918     }
919 }
920
921 template<typename Header>
922 void HeapPage<Header>::populateObjectStartBitMap()
923 {
924     memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
925     Address start = payload();
926     for (Address headerAddress = start; headerAddress < end();) {
927         Header* header = reinterpret_cast<Header*>(headerAddress);
928         size_t objectOffset = headerAddress - start;
929         ASSERT(!(objectOffset & allocationMask));
930         size_t objectStartNumber = objectOffset / allocationGranularity;
931         size_t mapIndex = objectStartNumber / 8;
932         ASSERT(mapIndex < objectStartBitMapSize);
933         m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
934         headerAddress += header->size();
935         ASSERT(headerAddress <= end());
936     }
937     m_objectStartBitMapComputed = true;
938 }
939
940 template<typename Header>
941 void HeapPage<Header>::clearObjectStartBitMap()
942 {
943     m_objectStartBitMapComputed = false;
944 }
945
946 static int numberOfLeadingZeroes(uint8_t byte)
947 {
948     if (!byte)
949         return 8;
950     int result = 0;
951     if (byte <= 0x0F) {
952         result += 4;
953         byte = byte << 4;
954     }
955     if (byte <= 0x3F) {
956         result += 2;
957         byte = byte << 2;
958     }
959     if (byte <= 0x7F)
960         result++;
961     return result;
962 }
963
964 template<typename Header>
965 bool HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address addr)
966 {
967     if (addr < payload())
968         return false;
969     if (!isObjectStartBitMapComputed())
970         populateObjectStartBitMap();
971     size_t objectOffset = addr - payload();
972     size_t objectStartNumber = objectOffset / allocationGranularity;
973     size_t mapIndex = objectStartNumber / 8;
974     ASSERT(mapIndex < objectStartBitMapSize);
975     size_t bit = objectStartNumber & 7;
976     uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
977     while (!byte) {
978         ASSERT(mapIndex > 0);
979         byte = m_objectStartBitMap[--mapIndex];
980     }
981     int leadingZeroes = numberOfLeadingZeroes(byte);
982     objectStartNumber = (mapIndex * 8) + 7 - leadingZeroes;
983     objectOffset = objectStartNumber * allocationGranularity;
984     Address objectAddress = objectOffset + payload();
985     Header* header = reinterpret_cast<Header*>(objectAddress);
986     if (header->isFree())
987         return false;
988
989     visitor->mark(header, traceCallback(header));
990     return true;
991 }
992
993 #if defined(ADDRESS_SANITIZER)
994 template<typename Header>
995 void HeapPage<Header>::poisonUnmarkedObjects()
996 {
997     for (Address headerAddress = payload(); headerAddress < end(); ) {
998         Header* header = reinterpret_cast<Header*>(headerAddress);
999         ASSERT(header->size() < blinkPagePayloadSize());
1000
1001         if (!header->isFree() && !header->isMarked())
1002             ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize());
1003         headerAddress += header->size();
1004     }
1005 }
1006 #endif
1007
1008 template<>
1009 inline void HeapPage<FinalizedHeapObjectHeader>::finalize(FinalizedHeapObjectHeader* header)
1010 {
1011     header->finalize();
1012 }
1013
1014 template<>
1015 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header)
1016 {
1017     ASSERT(gcInfo());
1018     HeapObjectHeader::finalize(gcInfo(), header->payload(), header->payloadSize());
1019 }
1020
1021 template<>
1022 inline TraceCallback HeapPage<HeapObjectHeader>::traceCallback(HeapObjectHeader* header)
1023 {
1024     ASSERT(gcInfo());
1025     return gcInfo()->m_trace;
1026 }
1027
1028 template<>
1029 inline TraceCallback HeapPage<FinalizedHeapObjectHeader>::traceCallback(FinalizedHeapObjectHeader* header)
1030 {
1031     return header->traceCallback();
1032 }
1033
1034 template<typename Header>
1035 void LargeHeapObject<Header>::getStats(HeapStats& stats)
1036 {
1037     stats.increaseAllocatedSpace(size());
1038     stats.increaseObjectSpace(payloadSize());
1039 }
1040
1041 HeapContainsCache::HeapContainsCache()
1042     : m_entries(adoptArrayPtr(new Entry[HeapContainsCache::numberOfEntries]))
1043 {
1044 }
1045
1046 void HeapContainsCache::flush()
1047 {
1048     for (int i = 0; i < numberOfEntries; i++)
1049         m_entries[i] = Entry();
1050 }
1051
1052 size_t HeapContainsCache::hash(Address address)
1053 {
1054     size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2);
1055     value ^= value >> numberOfEntriesLog2;
1056     value ^= value >> (numberOfEntriesLog2 * 2);
1057     value &= numberOfEntries - 1;
1058     return value & ~1; // Returns only even number.
1059 }
1060
1061 bool HeapContainsCache::lookup(Address address, BaseHeapPage** page)
1062 {
1063     ASSERT(page);
1064     size_t index = hash(address);
1065     ASSERT(!(index & 1));
1066     Address cachePage = roundToBlinkPageStart(address);
1067     if (m_entries[index].address() == cachePage) {
1068         *page = m_entries[index].containingPage();
1069         return true;
1070     }
1071     if (m_entries[index + 1].address() == cachePage) {
1072         *page = m_entries[index + 1].containingPage();
1073         return true;
1074     }
1075     *page = 0;
1076     return false;
1077 }
1078
1079 void HeapContainsCache::addEntry(Address address, BaseHeapPage* page)
1080 {
1081     size_t index = hash(address);
1082     ASSERT(!(index & 1));
1083     Address cachePage = roundToBlinkPageStart(address);
1084     m_entries[index + 1] = m_entries[index];
1085     m_entries[index] = Entry(cachePage, page);
1086 }
1087
1088 void CallbackStack::init(CallbackStack** first)
1089 {
1090     // The stacks are chained, so we start by setting this to null as terminator.
1091     *first = 0;
1092     *first = new CallbackStack(first);
1093 }
1094
1095 void CallbackStack::shutdown(CallbackStack** first)
1096 {
1097     CallbackStack* next;
1098     for (CallbackStack* current = *first; current; current = next) {
1099         next = current->m_next;
1100         delete current;
1101     }
1102     *first = 0;
1103 }
1104
1105 CallbackStack::~CallbackStack()
1106 {
1107 #ifndef NDEBUG
1108     clearUnused();
1109 #endif
1110 }
1111
1112 void CallbackStack::clearUnused()
1113 {
1114     ASSERT(m_current == &(m_buffer[0]));
1115     for (size_t i = 0; i < bufferSize; i++)
1116         m_buffer[i] = Item(0, 0);
1117 }
1118
1119 void CallbackStack::assertIsEmpty()
1120 {
1121     ASSERT(m_current == &(m_buffer[0]));
1122     ASSERT(!m_next);
1123 }
1124
1125 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor)
1126 {
1127     if (m_current == &(m_buffer[0])) {
1128         if (!m_next) {
1129 #ifndef NDEBUG
1130             clearUnused();
1131 #endif
1132             return false;
1133         }
1134         CallbackStack* nextStack = m_next;
1135         *first = nextStack;
1136         delete this;
1137         return nextStack->popAndInvokeCallback(first, visitor);
1138     }
1139     Item* item = --m_current;
1140
1141     VisitorCallback callback = item->callback();
1142     callback(visitor, item->object());
1143
1144     return true;
1145 }
1146
1147 class MarkingVisitor : public Visitor {
1148 public:
1149     inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback)
1150     {
1151         ASSERT(header);
1152         ASSERT(objectPointer);
1153         if (header->isMarked())
1154             return;
1155         header->mark();
1156         if (callback)
1157             Heap::pushTraceCallback(const_cast<void*>(objectPointer), callback);
1158     }
1159
1160     virtual void mark(HeapObjectHeader* header, TraceCallback callback) OVERRIDE
1161     {
1162         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1163         // version to correctly find the payload.
1164         visitHeader(header, header->payload(), callback);
1165     }
1166
1167     virtual void mark(FinalizedHeapObjectHeader* header, TraceCallback callback) OVERRIDE
1168     {
1169         // We need both the HeapObjectHeader and FinalizedHeapObjectHeader
1170         // version to correctly find the payload.
1171         visitHeader(header, header->payload(), callback);
1172     }
1173
1174     virtual void mark(const void* objectPointer, TraceCallback callback) OVERRIDE
1175     {
1176         if (!objectPointer)
1177             return;
1178         FinalizedHeapObjectHeader* header = FinalizedHeapObjectHeader::fromPayload(objectPointer);
1179         visitHeader(header, header->payload(), callback);
1180     }
1181
1182     virtual void registerWeakMembers(const void* containingObject, WeakPointerCallback callback) OVERRIDE
1183     {
1184         Heap::pushWeakPointerCallback(const_cast<void*>(containingObject), callback);
1185     }
1186
1187     virtual bool isMarked(const void* objectPointer) OVERRIDE
1188     {
1189         return FinalizedHeapObjectHeader::fromPayload(objectPointer)->isMarked();
1190     }
1191
1192     // This macro defines the necessary visitor methods for typed heaps
1193 #define DEFINE_VISITOR_METHODS(Type)                                              \
1194     virtual void mark(const Type* objectPointer, TraceCallback callback) OVERRIDE \
1195     {                                                                             \
1196         if (!objectPointer)                                                       \
1197             return;                                                               \
1198         HeapObjectHeader* header =                                                \
1199             HeapObjectHeader::fromPayload(objectPointer);                         \
1200         visitHeader(header, header->payload(), callback);                         \
1201     }                                                                             \
1202     virtual bool isMarked(const Type* objectPointer) OVERRIDE                     \
1203     {                                                                             \
1204         return HeapObjectHeader::fromPayload(objectPointer)->isMarked();          \
1205     }
1206
1207     FOR_EACH_TYPED_HEAP(DEFINE_VISITOR_METHODS)
1208 #undef DEFINE_VISITOR_METHODS
1209 };
1210
1211 void Heap::init()
1212 {
1213     ThreadState::init();
1214     CallbackStack::init(&s_markingStack);
1215     CallbackStack::init(&s_weakCallbackStack);
1216 }
1217
1218 void Heap::shutdown()
1219 {
1220     ThreadState::shutdown();
1221     CallbackStack::shutdown(&s_markingStack);
1222     CallbackStack::shutdown(&s_weakCallbackStack);
1223 }
1224
1225 bool Heap::contains(Address address)
1226 {
1227     ASSERT(ThreadState::isAnyThreadInGC());
1228     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1229     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1230         if ((*it)->contains(address))
1231             return true;
1232     }
1233     return false;
1234 }
1235
1236 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1237 {
1238     ASSERT(ThreadState::isAnyThreadInGC());
1239     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1240     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1241         if ((*it)->checkAndMarkPointer(visitor, address)) {
1242             // Pointer found and marked.
1243             return address;
1244         }
1245     }
1246     return 0;
1247 }
1248
1249 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1250 {
1251     ASSERT(Heap::contains(object));
1252     CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1253     *slot = CallbackStack::Item(object, callback);
1254 }
1255
1256 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1257 {
1258     return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor);
1259 }
1260
1261 void Heap::pushWeakPointerCallback(void* object, WeakPointerCallback callback)
1262 {
1263     ASSERT(Heap::contains(object));
1264     CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallbackStack);
1265     *slot = CallbackStack::Item(object, callback);
1266 }
1267
1268 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1269 {
1270     return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visitor);
1271 }
1272
1273 void Heap::prepareForGC()
1274 {
1275     ASSERT(ThreadState::isAnyThreadInGC());
1276     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1277     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1278         (*it)->prepareForGC();
1279 }
1280
1281 void Heap::collectGarbage(ThreadState::StackState stackState, GCType gcType)
1282 {
1283     ThreadState::current()->clearGCRequested();
1284     GCScope gcScope(stackState);
1285
1286     // Disallow allocation during garbage collection.
1287     NoAllocationScope<AnyThread> noAllocationScope;
1288     prepareForGC();
1289     MarkingVisitor marker;
1290
1291     ThreadState::visitRoots(&marker);
1292     // Recursively mark all objects that are reachable from the roots.
1293     while (popAndInvokeTraceCallback(&marker)) { }
1294
1295     // Call weak callbacks on objects that may now be pointing to dead
1296     // objects.
1297     while (popAndInvokeWeakPointerCallback(&marker)) { }
1298
1299     // It is not permitted to trace pointers of live objects in the weak
1300     // callback phase, so the marking stack should still be empty here.
1301     s_markingStack->assertIsEmpty();
1302 }
1303
1304 void Heap::getStats(HeapStats* stats)
1305 {
1306     stats->clear();
1307     ASSERT(ThreadState::isAnyThreadInGC());
1308     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1309     typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1310     for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1311         HeapStats temp;
1312         (*it)->getStats(temp);
1313         stats->add(&temp);
1314     }
1315 }
1316
1317 bool Heap::isConsistentForGC()
1318 {
1319     ASSERT(ThreadState::isAnyThreadInGC());
1320     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1321     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1322         return (*it)->isConsistentForGC();
1323     return true;
1324 }
1325
1326 void Heap::makeConsistentForGC()
1327 {
1328     ASSERT(ThreadState::isAnyThreadInGC());
1329     ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
1330     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1331         (*it)->makeConsistentForGC();
1332 }
1333
1334 // Force template instantiations for the types that we need.
1335 template class HeapPage<FinalizedHeapObjectHeader>;
1336 template class HeapPage<HeapObjectHeader>;
1337 template class ThreadHeap<FinalizedHeapObjectHeader>;
1338 template class ThreadHeap<HeapObjectHeader>;
1339
1340 CallbackStack* Heap::s_markingStack;
1341 CallbackStack* Heap::s_weakCallbackStack;
1342 }