2 * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "MarkStack.h"
29 #include "ConservativeRoots.h"
31 #include "Heuristics.h"
35 #include "ScopeChain.h"
36 #include "Structure.h"
37 #include "WriteBarrier.h"
38 #include <wtf/MainThread.h>
42 MarkStackSegmentAllocator::MarkStackSegmentAllocator()
43 : m_nextFreeSegment(0)
47 MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
52 MarkStackSegment* MarkStackSegmentAllocator::allocate()
55 MutexLocker locker(m_lock);
56 if (m_nextFreeSegment) {
57 MarkStackSegment* result = m_nextFreeSegment;
58 m_nextFreeSegment = result->m_previous;
63 return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Heuristics::gcMarkStackSegmentSize));
66 void MarkStackSegmentAllocator::release(MarkStackSegment* segment)
68 MutexLocker locker(m_lock);
69 segment->m_previous = m_nextFreeSegment;
70 m_nextFreeSegment = segment;
73 void MarkStackSegmentAllocator::shrinkReserve()
75 MarkStackSegment* segments;
77 MutexLocker locker(m_lock);
78 segments = m_nextFreeSegment;
79 m_nextFreeSegment = 0;
82 MarkStackSegment* toFree = segments;
83 segments = segments->m_previous;
84 OSAllocator::decommitAndRelease(toFree, Heuristics::gcMarkStackSegmentSize);
88 MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator)
89 : m_allocator(allocator)
90 , m_segmentCapacity(MarkStackSegment::capacityFromSize(Heuristics::gcMarkStackSegmentSize))
92 , m_numberOfPreviousSegments(0)
94 m_topSegment = m_allocator.allocate();
96 m_topSegment->m_top = 0;
98 m_topSegment->m_previous = 0;
101 MarkStackArray::~MarkStackArray()
103 ASSERT(!m_topSegment->m_previous);
104 m_allocator.release(m_topSegment);
107 void MarkStackArray::expand()
109 ASSERT(m_topSegment->m_top == m_segmentCapacity);
111 m_numberOfPreviousSegments++;
113 MarkStackSegment* nextSegment = m_allocator.allocate();
115 nextSegment->m_top = 0;
117 nextSegment->m_previous = m_topSegment;
118 m_topSegment = nextSegment;
119 setTopForEmptySegment();
123 bool MarkStackArray::refill()
128 MarkStackSegment* toFree = m_topSegment;
129 MarkStackSegment* previous = m_topSegment->m_previous;
132 ASSERT(m_numberOfPreviousSegments);
133 m_numberOfPreviousSegments--;
134 m_topSegment = previous;
135 m_allocator.release(toFree);
136 setTopForFullSegment();
141 bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
143 ASSERT(m_segmentCapacity == other.m_segmentCapacity);
145 other.validatePrevious();
147 // Fast check: see if the other mark stack already has enough segments.
148 if (other.m_numberOfPreviousSegments + 1 >= Heuristics::maximumNumberOfSharedSegments)
151 size_t numberOfCellsToKeep = Heuristics::minimumNumberOfCellsToKeep;
152 ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous);
154 // Looks like we should donate! Give the other mark stack all of our
155 // previous segments, and then top it off.
156 MarkStackSegment* previous = m_topSegment->m_previous;
158 ASSERT(m_numberOfPreviousSegments);
160 MarkStackSegment* current = previous;
161 previous = current->m_previous;
163 current->m_previous = other.m_topSegment->m_previous;
164 other.m_topSegment->m_previous = current;
166 m_numberOfPreviousSegments--;
167 other.m_numberOfPreviousSegments++;
169 ASSERT(!m_numberOfPreviousSegments);
170 m_topSegment->m_previous = 0;
172 other.validatePrevious();
174 // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if
175 // we really have a lot of work, we give up half.
176 if (m_top > numberOfCellsToKeep * 2)
177 numberOfCellsToKeep = m_top / 2;
178 while (m_top > numberOfCellsToKeep)
179 other.append(removeLast());
184 void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other)
186 ASSERT(m_segmentCapacity == other.m_segmentCapacity);
188 other.validatePrevious();
190 // If other has an entire segment, steal it and return.
191 if (other.m_topSegment->m_previous) {
192 ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity);
194 // First remove a segment from other.
195 MarkStackSegment* current = other.m_topSegment->m_previous;
196 other.m_topSegment->m_previous = current->m_previous;
197 other.m_numberOfPreviousSegments--;
199 ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous);
201 // Now add it to this.
202 current->m_previous = m_topSegment->m_previous;
203 m_topSegment->m_previous = current;
204 m_numberOfPreviousSegments++;
207 other.validatePrevious();
211 // Otherwise drain 1/Nth of the shared array where N is the number of
212 // workers, or Heuristics::minimumNumberOfCellsToKeep, whichever is bigger.
213 size_t numberOfCellsToSteal = std::max((size_t)Heuristics::minimumNumberOfCellsToKeep, other.size() / Heuristics::numberOfGCMarkers);
214 while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
215 append(other.removeLast());
218 #if ENABLE(PARALLEL_GC)
219 void MarkStackThreadSharedData::markingThreadMain()
221 WTF::registerGCThread();
222 SlotVisitor slotVisitor(*this, m_globalData->jsArrayVPtr, m_globalData->jsFinalObjectVPtr, m_globalData->jsStringVPtr);
223 ParallelModeEnabler enabler(slotVisitor);
224 slotVisitor.drainFromShared(SlotVisitor::SlaveDrain);
227 void* MarkStackThreadSharedData::markingThreadStartFunc(void* shared)
229 static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain();
234 MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData)
235 : m_globalData(globalData)
236 , m_sharedMarkStack(m_segmentAllocator)
237 , m_numberOfActiveParallelMarkers(0)
238 , m_parallelMarkersShouldExit(false)
240 #if ENABLE(PARALLEL_GC)
241 for (unsigned i = 1; i < Heuristics::numberOfGCMarkers; ++i) {
242 m_markingThreads.append(createThread(markingThreadStartFunc, this, "JavaScriptCore::Marking"));
243 ASSERT(m_markingThreads.last());
248 MarkStackThreadSharedData::~MarkStackThreadSharedData()
250 #if ENABLE(PARALLEL_GC)
251 // Destroy our marking threads.
253 MutexLocker locker(m_markingLock);
254 m_parallelMarkersShouldExit = true;
255 m_markingCondition.broadcast();
257 for (unsigned i = 0; i < m_markingThreads.size(); ++i)
258 waitForThreadCompletion(m_markingThreads[i], 0);
262 void MarkStackThreadSharedData::reset()
264 ASSERT(!m_numberOfActiveParallelMarkers);
265 ASSERT(!m_parallelMarkersShouldExit);
266 ASSERT(m_sharedMarkStack.isEmpty());
268 #if ENABLE(PARALLEL_GC)
269 m_segmentAllocator.shrinkReserve();
270 m_opaqueRoots.clear();
272 ASSERT(m_opaqueRoots.isEmpty());
275 m_weakReferenceHarvesters.removeAll();
278 void MarkStack::reset()
281 ASSERT(m_stack.isEmpty());
282 #if ENABLE(PARALLEL_GC)
283 ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
285 m_opaqueRoots.clear();
289 void MarkStack::append(ConservativeRoots& conservativeRoots)
291 JSCell** roots = conservativeRoots.roots();
292 size_t size = conservativeRoots.size();
293 for (size_t i = 0; i < size; ++i)
294 internalAppend(roots[i]);
297 ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell, void* jsFinalObjectVPtr, void* jsArrayVPtr, void* jsStringVPtr)
299 #if ENABLE(SIMPLE_HEAP_PROFILING)
300 m_visitedTypeCounts.count(cell);
303 ASSERT(Heap::isMarked(cell));
305 if (cell->vptr() == jsStringVPtr) {
306 JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
310 if (cell->vptr() == jsFinalObjectVPtr) {
311 JSObject::visitChildren(const_cast<JSCell*>(cell), visitor);
315 if (cell->vptr() == jsArrayVPtr) {
316 JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
320 cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
323 void SlotVisitor::donateSlow()
325 // Refuse to donate if shared has more entries than I do.
326 if (m_shared.m_sharedMarkStack.size() > m_stack.size())
328 MutexLocker locker(m_shared.m_markingLock);
329 if (m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack)) {
330 // Only wake up threads if the shared stack is big enough; otherwise assume that
331 // it's more profitable for us to just scan this ourselves later.
332 if (m_shared.m_sharedMarkStack.size() >= Heuristics::sharedStackWakeupThreshold)
333 m_shared.m_markingCondition.broadcast();
337 void SlotVisitor::drain()
339 ASSERT(m_isInParallelMode);
341 void* jsFinalObjectVPtr = m_jsFinalObjectVPtr;
342 void* jsArrayVPtr = m_jsArrayVPtr;
343 void* jsStringVPtr = m_jsStringVPtr;
345 #if ENABLE(PARALLEL_GC)
346 if (Heuristics::numberOfGCMarkers > 1) {
347 while (!m_stack.isEmpty()) {
349 for (unsigned countdown = Heuristics::minimumNumberOfScansBetweenRebalance; m_stack.canRemoveLast() && countdown--;)
350 visitChildren(*this, m_stack.removeLast(), jsFinalObjectVPtr, jsArrayVPtr, jsStringVPtr);
351 donateKnownParallel();
354 mergeOpaqueRootsIfNecessary();
359 while (!m_stack.isEmpty()) {
361 while (m_stack.canRemoveLast())
362 visitChildren(*this, m_stack.removeLast(), jsFinalObjectVPtr, jsArrayVPtr, jsStringVPtr);
366 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
368 ASSERT(m_isInParallelMode);
370 ASSERT(Heuristics::numberOfGCMarkers);
372 bool shouldBeParallel;
374 #if ENABLE(PARALLEL_GC)
375 shouldBeParallel = Heuristics::numberOfGCMarkers > 1;
377 ASSERT(Heuristics::numberOfGCMarkers == 1);
378 shouldBeParallel = false;
381 if (!shouldBeParallel) {
382 // This call should be a no-op.
383 ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
384 ASSERT(m_stack.isEmpty());
385 ASSERT(m_shared.m_sharedMarkStack.isEmpty());
389 #if ENABLE(PARALLEL_GC)
391 MutexLocker locker(m_shared.m_markingLock);
392 m_shared.m_numberOfActiveParallelMarkers++;
396 MutexLocker locker(m_shared.m_markingLock);
397 m_shared.m_numberOfActiveParallelMarkers--;
399 // How we wait differs depending on drain mode.
400 if (sharedDrainMode == MasterDrain) {
401 // Wait until either termination is reached, or until there is some work
404 // Did we reach termination?
405 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
408 // Is there work to be done?
409 if (!m_shared.m_sharedMarkStack.isEmpty())
413 m_shared.m_markingCondition.wait(m_shared.m_markingLock);
416 ASSERT(sharedDrainMode == SlaveDrain);
418 // Did we detect termination? If so, let the master know.
419 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
420 m_shared.m_markingCondition.broadcast();
422 while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit)
423 m_shared.m_markingCondition.wait(m_shared.m_markingLock);
425 // Is the VM exiting? If so, exit this thread.
426 if (m_shared.m_parallelMarkersShouldExit)
430 m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack);
431 m_shared.m_numberOfActiveParallelMarkers++;
439 void MarkStack::mergeOpaqueRoots()
441 ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
443 MutexLocker locker(m_shared.m_opaqueRootsLock);
444 HashSet<void*>::iterator begin = m_opaqueRoots.begin();
445 HashSet<void*>::iterator end = m_opaqueRoots.end();
446 for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
447 m_shared.m_opaqueRoots.add(*iter);
449 m_opaqueRoots.clear();
452 void SlotVisitor::harvestWeakReferences()
454 for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
455 current->visitWeakReferences(*this);
458 void SlotVisitor::finalizeUnconditionalFinalizers()
460 while (m_shared.m_unconditionalFinalizers.hasNext())
461 m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
464 #if ENABLE(GC_VALIDATION)
465 void MarkStack::validate(JSCell* cell)
470 if (!cell->structure())
473 // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
474 // I hate this sentence.
475 if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo())
479 void MarkStack::validate(JSCell*)