2 * Copyright (C) 1999-2000 Harri Porten (porten@kde.org)
3 * Copyright (C) 2001 Peter Kelly (pmk@post.com)
4 * Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "HeapBlock.h"
29 #include <wtf/Bitmap.h>
30 #include <wtf/DataLog.h>
31 #include <wtf/DoublyLinkedList.h>
32 #include <wtf/HashFunctions.h>
33 #include <wtf/PageAllocationAligned.h>
34 #include <wtf/StdLibExtras.h>
35 #include <wtf/Vector.h>
37 // Set to log state transitions of blocks.
38 #define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
40 #if HEAP_LOG_BLOCK_STATE_TRANSITIONS
41 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
43 "%s:%d %s: block %s = %p, %d\n", \
44 __FILE__, __LINE__, __FUNCTION__, \
45 #block, (block), (block)->m_state); \
48 #define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
56 typedef uintptr_t Bits;
58 static const size_t MB = 1024 * 1024;
60 bool isZapped(const JSCell*);
62 // A marked block is a page-aligned container for heap-allocated objects.
63 // Objects are allocated within cells of the marked block. For a given
64 // marked block, all cells have the same size. Objects smaller than the
65 // cell size may be allocated in the marked block, in which case the
66 // allocation suffers from internal fragmentation: wasted space whose
67 // size is equal to the difference between the cell size and the object
70 class MarkedBlock : public HeapBlock<MarkedBlock> {
72 // Ensure natural alignment for native types whilst recognizing that the smallest
73 // object the heap will commonly allocate is four words.
74 static const size_t atomSize = 4 * sizeof(void*);
75 static const size_t atomShift = 5;
76 static const size_t blockSize = 64 * KB;
77 static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
79 static const size_t atomsPerBlock = blockSize / atomSize; // ~0.4% overhead
80 static const size_t atomMask = atomsPerBlock - 1;
81 static const int cardShift = 8; // This is log2 of bytes per card.
82 static const size_t bytesPerCard = 1 << cardShift;
83 static const int cardCount = blockSize / bytesPerCard;
84 static const int cardMask = cardCount - 1;
95 FreeList(FreeCell*, size_t);
99 typedef void ReturnType;
100 void returnValue() { }
105 typedef size_t ReturnType;
107 CountFunctor() : m_count(0) { }
108 void count(size_t count) { m_count += count; }
109 ReturnType returnValue() { return m_count; }
115 static MarkedBlock* create(const PageAllocationAligned&, Heap*, size_t cellSize, bool cellsNeedDestruction, bool onlyContainsStructures);
117 static bool isAtomAligned(const void*);
118 static MarkedBlock* blockFor(const void*);
119 static size_t firstAtom();
121 void lastChanceToFinalize();
126 enum SweepMode { SweepOnly, SweepToFreeList };
127 FreeList sweep(SweepMode = SweepOnly);
131 void visitWeakSet(HeapRootVisitor&);
134 // While allocating from a free list, MarkedBlock temporarily has bogus
135 // cell liveness data. To restore accurate cell liveness data, call one
136 // of these functions:
137 void didConsumeFreeList(); // Call this once you've allocated all the items in the free list.
138 void zapFreeList(const FreeList&); // Call this to undo the free list.
145 bool cellsNeedDestruction();
146 bool onlyContainsStructures();
151 bool isMarked(const void*);
152 bool testAndSetMarked(const void*);
153 bool isLive(const JSCell*);
154 bool isLiveCell(const void*);
155 void setMarked(const void*);
157 bool needsSweeping();
160 void setDirtyObject(const void* atom)
162 ASSERT(MarkedBlock::blockFor(atom) == this);
163 m_cards.markCardForAtom(atom);
166 uint8_t* addressOfCardFor(const void* atom)
168 ASSERT(MarkedBlock::blockFor(atom) == this);
169 return &m_cards.cardForAtom(atom);
172 static inline size_t offsetOfCards()
174 return OBJECT_OFFSETOF(MarkedBlock, m_cards);
177 static inline size_t offsetOfMarks()
179 return OBJECT_OFFSETOF(MarkedBlock, m_marks);
182 typedef Vector<JSCell*, 32> DirtyCellVector;
183 inline void gatherDirtyCells(DirtyCellVector&);
184 template <int size> inline void gatherDirtyCellsWithSize(DirtyCellVector&);
187 template <typename Functor> void forEachCell(Functor&);
190 static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two.
192 enum BlockState { New, FreeListed, Allocated, Marked, Zapped };
193 template<bool destructorCallNeeded> FreeList sweepHelper(SweepMode = SweepOnly);
195 typedef char Atom[atomSize];
197 MarkedBlock(const PageAllocationAligned&, Heap*, size_t cellSize, bool cellsNeedDestruction, bool onlyContainsStructures);
199 size_t atomNumber(const void*);
200 void callDestructor(JSCell*);
201 template<BlockState, SweepMode, bool destructorCallNeeded> FreeList specializedSweep();
204 CardSet<bytesPerCard, blockSize> m_cards;
207 size_t m_atomsPerCell;
208 size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
209 #if ENABLE(PARALLEL_GC)
210 WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic> m_marks;
212 WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks;
214 bool m_cellsNeedDestruction;
215 bool m_onlyContainsStructures;
220 inline MarkedBlock::FreeList::FreeList()
226 inline MarkedBlock::FreeList::FreeList(FreeCell* head, size_t bytes)
232 inline size_t MarkedBlock::firstAtom()
234 return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize;
237 inline MarkedBlock::Atom* MarkedBlock::atoms()
239 return reinterpret_cast<Atom*>(this);
242 inline bool MarkedBlock::isAtomAligned(const void* p)
244 return !(reinterpret_cast<Bits>(p) & atomAlignmentMask);
247 inline MarkedBlock* MarkedBlock::blockFor(const void* p)
249 return reinterpret_cast<MarkedBlock*>(reinterpret_cast<Bits>(p) & blockMask);
252 inline void MarkedBlock::lastChanceToFinalize()
254 m_weakSet.lastChanceToFinalize();
260 inline Heap* MarkedBlock::heap() const
262 return m_weakSet.heap();
265 inline WeakSet& MarkedBlock::weakSet()
270 inline void MarkedBlock::shrink()
275 inline void MarkedBlock::visitWeakSet(HeapRootVisitor& heapRootVisitor)
277 m_weakSet.visit(heapRootVisitor);
280 inline void MarkedBlock::reapWeakSet()
285 inline void MarkedBlock::didConsumeFreeList()
287 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
289 ASSERT(m_state == FreeListed);
293 inline void MarkedBlock::clearMarks()
295 HEAP_LOG_BLOCK_STATE_TRANSITION(this);
297 ASSERT(m_state != New && m_state != FreeListed);
300 // This will become true at the end of the mark phase. We set it now to
301 // avoid an extra pass to do so later.
305 inline size_t MarkedBlock::markCount()
307 return m_marks.count();
310 inline bool MarkedBlock::isEmpty()
312 return m_marks.isEmpty() && m_weakSet.isEmpty();
315 inline size_t MarkedBlock::cellSize()
317 return m_atomsPerCell * atomSize;
320 inline bool MarkedBlock::cellsNeedDestruction()
322 return m_cellsNeedDestruction;
325 inline bool MarkedBlock::onlyContainsStructures()
327 return m_onlyContainsStructures;
330 inline size_t MarkedBlock::size()
332 return markCount() * cellSize();
335 inline size_t MarkedBlock::capacity()
337 return allocation().size();
340 inline size_t MarkedBlock::atomNumber(const void* p)
342 return (reinterpret_cast<Bits>(p) - reinterpret_cast<Bits>(this)) / atomSize;
345 inline bool MarkedBlock::isMarked(const void* p)
347 return m_marks.get(atomNumber(p));
350 inline bool MarkedBlock::testAndSetMarked(const void* p)
352 return m_marks.concurrentTestAndSet(atomNumber(p));
355 inline void MarkedBlock::setMarked(const void* p)
357 m_marks.set(atomNumber(p));
360 inline bool MarkedBlock::isLive(const JSCell* cell)
366 if (isZapped(cell)) {
367 // Object dead in previous collection, not allocated since previous collection: mark bit should not be set.
368 ASSERT(!m_marks.get(atomNumber(cell)));
372 // Newly allocated objects: mark bit not set.
373 // Objects that survived prior collection: mark bit set.
376 return m_marks.get(atomNumber(cell));
380 ASSERT_NOT_REACHED();
384 ASSERT_NOT_REACHED();
388 inline bool MarkedBlock::isLiveCell(const void* p)
390 ASSERT(MarkedBlock::isAtomAligned(p));
391 size_t atomNumber = this->atomNumber(p);
392 size_t firstAtom = this->firstAtom();
393 if (atomNumber < firstAtom) // Filters pointers into MarkedBlock metadata.
395 if ((atomNumber - firstAtom) % m_atomsPerCell) // Filters pointers into cell middles.
397 if (atomNumber >= m_endAtom) // Filters pointers into invalid cells out of the range.
400 return isLive(static_cast<const JSCell*>(p));
403 template <typename Functor> inline void MarkedBlock::forEachCell(Functor& functor)
405 for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
406 JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
414 inline bool MarkedBlock::needsSweeping()
416 return m_state == Marked;
420 template <int _cellSize> void MarkedBlock::gatherDirtyCellsWithSize(DirtyCellVector& dirtyCells)
422 if (m_cards.testAndClear(0)) {
423 char* ptr = reinterpret_cast<char*>(&atoms()[firstAtom()]);
424 const char* end = reinterpret_cast<char*>(this) + bytesPerCard;
426 JSCell* cell = reinterpret_cast<JSCell*>(ptr);
428 dirtyCells.append(cell);
433 const size_t cellOffset = firstAtom() * atomSize % _cellSize;
434 for (size_t i = 1; i < m_cards.cardCount; i++) {
435 if (!m_cards.testAndClear(i))
437 char* ptr = reinterpret_cast<char*>(this) + i * bytesPerCard + cellOffset;
438 char* end = reinterpret_cast<char*>(this) + (i + 1) * bytesPerCard;
441 JSCell* cell = reinterpret_cast<JSCell*>(ptr);
443 dirtyCells.append(cell);
449 void MarkedBlock::gatherDirtyCells(DirtyCellVector& dirtyCells)
451 COMPILE_ASSERT((int)m_cards.cardCount == (int)cardCount, MarkedBlockCardCountsMatch);
453 ASSERT(m_state != New && m_state != FreeListed);
455 // This is an optimisation to avoid having to walk the set of marked
456 // blocks twice during GC.
462 size_t cellSize = this->cellSize();
463 if (cellSize == 32) {
464 gatherDirtyCellsWithSize<32>(dirtyCells);
467 if (cellSize == 64) {
468 gatherDirtyCellsWithSize<64>(dirtyCells);
472 const size_t firstCellOffset = firstAtom() * atomSize % cellSize;
474 if (m_cards.testAndClear(0)) {
475 char* ptr = reinterpret_cast<char*>(this) + firstAtom() * atomSize;
476 char* end = reinterpret_cast<char*>(this) + bytesPerCard;
478 JSCell* cell = reinterpret_cast<JSCell*>(ptr);
480 dirtyCells.append(cell);
484 for (size_t i = 1; i < m_cards.cardCount; i++) {
485 if (!m_cards.testAndClear(i))
487 char* ptr = reinterpret_cast<char*>(this) + firstCellOffset + cellSize * ((i * bytesPerCard + cellSize - 1 - firstCellOffset) / cellSize);
488 char* end = reinterpret_cast<char*>(this) + std::min((i + 1) * bytesPerCard, m_endAtom * atomSize);
491 JSCell* cell = reinterpret_cast<JSCell*>(ptr);
493 dirtyCells.append(cell);
504 struct MarkedBlockHash : PtrHash<JSC::MarkedBlock*> {
505 static unsigned hash(JSC::MarkedBlock* const& key)
507 // Aligned VM regions tend to be monotonically increasing integers,
508 // which is a great hash function, but we have to remove the low bits,
509 // since they're always zero, which is a terrible hash function!
510 return reinterpret_cast<JSC::Bits>(key) / JSC::MarkedBlock::blockSize;
514 template<> struct DefaultHash<JSC::MarkedBlock*> {
515 typedef MarkedBlockHash Hash;
520 #endif // MarkedBlock_h