tizen beta release
[profile/ivi/webkit-efl.git] / Source / JavaScriptCore / heap / MarkStack.cpp
1 /*
2  * Copyright (C) 2009, 2011 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27 #include "MarkStack.h"
28
29 #include "ConservativeRoots.h"
30 #include "Heap.h"
31 #include "Heuristics.h"
32 #include "JSArray.h"
33 #include "JSCell.h"
34 #include "JSObject.h"
35 #include "ScopeChain.h"
36 #include "Structure.h"
37 #include "WriteBarrier.h"
38 #include <wtf/MainThread.h>
39
40 namespace JSC {
41
42 MarkStackSegmentAllocator::MarkStackSegmentAllocator()
43     : m_nextFreeSegment(0)
44 {
45 }
46
47 MarkStackSegmentAllocator::~MarkStackSegmentAllocator()
48 {
49     shrinkReserve();
50 }
51
52 MarkStackSegment* MarkStackSegmentAllocator::allocate()
53 {
54     {
55         MutexLocker locker(m_lock);
56         if (m_nextFreeSegment) {
57             MarkStackSegment* result = m_nextFreeSegment;
58             m_nextFreeSegment = result->m_previous;
59             return result;
60         }
61     }
62
63     return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Heuristics::gcMarkStackSegmentSize));
64 }
65
66 void MarkStackSegmentAllocator::release(MarkStackSegment* segment)
67 {
68     MutexLocker locker(m_lock);
69     segment->m_previous = m_nextFreeSegment;
70     m_nextFreeSegment = segment;
71 }
72
73 void MarkStackSegmentAllocator::shrinkReserve()
74 {
75     MarkStackSegment* segments;
76     {
77         MutexLocker locker(m_lock);
78         segments = m_nextFreeSegment;
79         m_nextFreeSegment = 0;
80     }
81     while (segments) {
82         MarkStackSegment* toFree = segments;
83         segments = segments->m_previous;
84         OSAllocator::decommitAndRelease(toFree, Heuristics::gcMarkStackSegmentSize);
85     }
86 }
87
88 MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator)
89     : m_allocator(allocator)
90     , m_segmentCapacity(MarkStackSegment::capacityFromSize(Heuristics::gcMarkStackSegmentSize))
91     , m_top(0)
92     , m_numberOfPreviousSegments(0)
93 {
94     m_topSegment = m_allocator.allocate();
95 #if !ASSERT_DISABLED
96     m_topSegment->m_top = 0;
97 #endif
98     m_topSegment->m_previous = 0;
99 }
100
101 MarkStackArray::~MarkStackArray()
102 {
103     ASSERT(!m_topSegment->m_previous);
104     m_allocator.release(m_topSegment);
105 }
106
107 void MarkStackArray::expand()
108 {
109     ASSERT(m_topSegment->m_top == m_segmentCapacity);
110     
111     m_numberOfPreviousSegments++;
112     
113     MarkStackSegment* nextSegment = m_allocator.allocate();
114 #if !ASSERT_DISABLED
115     nextSegment->m_top = 0;
116 #endif
117     nextSegment->m_previous = m_topSegment;
118     m_topSegment = nextSegment;
119     setTopForEmptySegment();
120     validatePrevious();
121 }
122
123 bool MarkStackArray::refill()
124 {
125     validatePrevious();
126     if (top())
127         return true;
128     MarkStackSegment* toFree = m_topSegment;
129     MarkStackSegment* previous = m_topSegment->m_previous;
130     if (!previous)
131         return false;
132     ASSERT(m_numberOfPreviousSegments);
133     m_numberOfPreviousSegments--;
134     m_topSegment = previous;
135     m_allocator.release(toFree);
136     setTopForFullSegment();
137     validatePrevious();
138     return true;
139 }
140
141 bool MarkStackArray::donateSomeCellsTo(MarkStackArray& other)
142 {
143     ASSERT(m_segmentCapacity == other.m_segmentCapacity);
144     validatePrevious();
145     other.validatePrevious();
146         
147     // Fast check: see if the other mark stack already has enough segments.
148     if (other.m_numberOfPreviousSegments + 1 >= Heuristics::maximumNumberOfSharedSegments)
149         return false;
150         
151     size_t numberOfCellsToKeep = Heuristics::minimumNumberOfCellsToKeep;
152     ASSERT(m_top > numberOfCellsToKeep || m_topSegment->m_previous);
153         
154     // Looks like we should donate! Give the other mark stack all of our
155     // previous segments, and then top it off.
156     MarkStackSegment* previous = m_topSegment->m_previous;
157     while (previous) {
158         ASSERT(m_numberOfPreviousSegments);
159
160         MarkStackSegment* current = previous;
161         previous = current->m_previous;
162             
163         current->m_previous = other.m_topSegment->m_previous;
164         other.m_topSegment->m_previous = current;
165             
166         m_numberOfPreviousSegments--;
167         other.m_numberOfPreviousSegments++;
168     }
169     ASSERT(!m_numberOfPreviousSegments);
170     m_topSegment->m_previous = 0;
171     validatePrevious();
172     other.validatePrevious();
173         
174     // Now top off. We want to keep at a minimum numberOfCellsToKeep, but if
175     // we really have a lot of work, we give up half.
176     if (m_top > numberOfCellsToKeep * 2)
177         numberOfCellsToKeep = m_top / 2;
178     while (m_top > numberOfCellsToKeep)
179         other.append(removeLast());
180         
181     return true;
182 }
183
184 void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other)
185 {
186     ASSERT(m_segmentCapacity == other.m_segmentCapacity);
187     validatePrevious();
188     other.validatePrevious();
189         
190     // If other has an entire segment, steal it and return.
191     if (other.m_topSegment->m_previous) {
192         ASSERT(other.m_topSegment->m_previous->m_top == m_segmentCapacity);
193             
194         // First remove a segment from other.
195         MarkStackSegment* current = other.m_topSegment->m_previous;
196         other.m_topSegment->m_previous = current->m_previous;
197         other.m_numberOfPreviousSegments--;
198             
199         ASSERT(!!other.m_numberOfPreviousSegments == !!other.m_topSegment->m_previous);
200             
201         // Now add it to this.
202         current->m_previous = m_topSegment->m_previous;
203         m_topSegment->m_previous = current;
204         m_numberOfPreviousSegments++;
205             
206         validatePrevious();
207         other.validatePrevious();
208         return;
209     }
210         
211     // Otherwise drain 1/Nth of the shared array where N is the number of
212     // workers, or Heuristics::minimumNumberOfCellsToKeep, whichever is bigger.
213     size_t numberOfCellsToSteal = std::max((size_t)Heuristics::minimumNumberOfCellsToKeep, other.size() / Heuristics::numberOfGCMarkers);
214     while (numberOfCellsToSteal-- > 0 && other.canRemoveLast())
215         append(other.removeLast());
216 }
217
218 #if ENABLE(PARALLEL_GC)
219 void MarkStackThreadSharedData::markingThreadMain()
220 {
221     WTF::registerGCThread();
222     SlotVisitor slotVisitor(*this, m_globalData->jsArrayVPtr, m_globalData->jsFinalObjectVPtr, m_globalData->jsStringVPtr);
223     ParallelModeEnabler enabler(slotVisitor);
224     slotVisitor.drainFromShared(SlotVisitor::SlaveDrain);
225 }
226
227 void* MarkStackThreadSharedData::markingThreadStartFunc(void* shared)
228 {
229     static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain();
230     return 0;
231 }
232 #endif
233
234 MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData)
235     : m_globalData(globalData)
236     , m_sharedMarkStack(m_segmentAllocator)
237     , m_numberOfActiveParallelMarkers(0)
238     , m_parallelMarkersShouldExit(false)
239 {
240 #if ENABLE(PARALLEL_GC)
241     for (unsigned i = 1; i < Heuristics::numberOfGCMarkers; ++i) {
242         m_markingThreads.append(createThread(markingThreadStartFunc, this, "JavaScriptCore::Marking"));
243         ASSERT(m_markingThreads.last());
244     }
245 #endif
246 }
247
248 MarkStackThreadSharedData::~MarkStackThreadSharedData()
249 {
250 #if ENABLE(PARALLEL_GC)    
251     // Destroy our marking threads.
252     {
253         MutexLocker locker(m_markingLock);
254         m_parallelMarkersShouldExit = true;
255         m_markingCondition.broadcast();
256     }
257     for (unsigned i = 0; i < m_markingThreads.size(); ++i)
258         waitForThreadCompletion(m_markingThreads[i], 0);
259 #endif
260 }
261     
262 void MarkStackThreadSharedData::reset()
263 {
264     ASSERT(!m_numberOfActiveParallelMarkers);
265     ASSERT(!m_parallelMarkersShouldExit);
266     ASSERT(m_sharedMarkStack.isEmpty());
267     
268 #if ENABLE(PARALLEL_GC)
269     m_segmentAllocator.shrinkReserve();
270     m_opaqueRoots.clear();
271 #else
272     ASSERT(m_opaqueRoots.isEmpty());
273 #endif
274     
275     m_weakReferenceHarvesters.removeAll();
276 }
277
278 void MarkStack::reset()
279 {
280     m_visitCount = 0;
281     ASSERT(m_stack.isEmpty());
282 #if ENABLE(PARALLEL_GC)
283     ASSERT(m_opaqueRoots.isEmpty()); // Should have merged by now.
284 #else
285     m_opaqueRoots.clear();
286 #endif
287 }
288
289 void MarkStack::append(ConservativeRoots& conservativeRoots)
290 {
291     JSCell** roots = conservativeRoots.roots();
292     size_t size = conservativeRoots.size();
293     for (size_t i = 0; i < size; ++i)
294         internalAppend(roots[i]);
295 }
296
297 ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell, void* jsFinalObjectVPtr, void* jsArrayVPtr, void* jsStringVPtr)
298 {
299 #if ENABLE(SIMPLE_HEAP_PROFILING)
300     m_visitedTypeCounts.count(cell);
301 #endif
302
303     ASSERT(Heap::isMarked(cell));
304
305     if (cell->vptr() == jsStringVPtr) {
306         JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
307         return;
308     }
309
310     if (cell->vptr() == jsFinalObjectVPtr) {
311         JSObject::visitChildren(const_cast<JSCell*>(cell), visitor);
312         return;
313     }
314
315     if (cell->vptr() == jsArrayVPtr) {
316         JSArray::visitChildren(const_cast<JSCell*>(cell), visitor);
317         return;
318     }
319
320     cell->methodTable()->visitChildren(const_cast<JSCell*>(cell), visitor);
321 }
322
323 void SlotVisitor::donateSlow()
324 {
325     // Refuse to donate if shared has more entries than I do.
326     if (m_shared.m_sharedMarkStack.size() > m_stack.size())
327         return;
328     MutexLocker locker(m_shared.m_markingLock);
329     if (m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack)) {
330         // Only wake up threads if the shared stack is big enough; otherwise assume that
331         // it's more profitable for us to just scan this ourselves later.
332         if (m_shared.m_sharedMarkStack.size() >= Heuristics::sharedStackWakeupThreshold)
333             m_shared.m_markingCondition.broadcast();
334     }
335 }
336
337 void SlotVisitor::drain()
338 {
339     ASSERT(m_isInParallelMode);
340     
341     void* jsFinalObjectVPtr = m_jsFinalObjectVPtr;
342     void* jsArrayVPtr = m_jsArrayVPtr;
343     void* jsStringVPtr = m_jsStringVPtr;
344
345 #if ENABLE(PARALLEL_GC)
346     if (Heuristics::numberOfGCMarkers > 1) {
347         while (!m_stack.isEmpty()) {
348             m_stack.refill();
349             for (unsigned countdown = Heuristics::minimumNumberOfScansBetweenRebalance; m_stack.canRemoveLast() && countdown--;)
350                 visitChildren(*this, m_stack.removeLast(), jsFinalObjectVPtr, jsArrayVPtr, jsStringVPtr);
351             donateKnownParallel();
352         }
353         
354         mergeOpaqueRootsIfNecessary();
355         return;
356     }
357 #endif
358     
359     while (!m_stack.isEmpty()) {
360         m_stack.refill();
361         while (m_stack.canRemoveLast())
362             visitChildren(*this, m_stack.removeLast(), jsFinalObjectVPtr, jsArrayVPtr, jsStringVPtr);
363     }
364 }
365
366 void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
367 {
368     ASSERT(m_isInParallelMode);
369     
370     ASSERT(Heuristics::numberOfGCMarkers);
371     
372     bool shouldBeParallel;
373
374 #if ENABLE(PARALLEL_GC)
375     shouldBeParallel = Heuristics::numberOfGCMarkers > 1;
376 #else
377     ASSERT(Heuristics::numberOfGCMarkers == 1);
378     shouldBeParallel = false;
379 #endif
380     
381     if (!shouldBeParallel) {
382         // This call should be a no-op.
383         ASSERT_UNUSED(sharedDrainMode, sharedDrainMode == MasterDrain);
384         ASSERT(m_stack.isEmpty());
385         ASSERT(m_shared.m_sharedMarkStack.isEmpty());
386         return;
387     }
388     
389 #if ENABLE(PARALLEL_GC)
390     {
391         MutexLocker locker(m_shared.m_markingLock);
392         m_shared.m_numberOfActiveParallelMarkers++;
393     }
394     while (true) {
395         {
396             MutexLocker locker(m_shared.m_markingLock);
397             m_shared.m_numberOfActiveParallelMarkers--;
398
399             // How we wait differs depending on drain mode.
400             if (sharedDrainMode == MasterDrain) {
401                 // Wait until either termination is reached, or until there is some work
402                 // for us to do.
403                 while (true) {
404                     // Did we reach termination?
405                     if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
406                         return;
407                     
408                     // Is there work to be done?
409                     if (!m_shared.m_sharedMarkStack.isEmpty())
410                         break;
411                     
412                     // Otherwise wait.
413                     m_shared.m_markingCondition.wait(m_shared.m_markingLock);
414                 }
415             } else {
416                 ASSERT(sharedDrainMode == SlaveDrain);
417                 
418                 // Did we detect termination? If so, let the master know.
419                 if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
420                     m_shared.m_markingCondition.broadcast();
421                 
422                 while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit)
423                     m_shared.m_markingCondition.wait(m_shared.m_markingLock);
424                 
425                 // Is the VM exiting? If so, exit this thread.
426                 if (m_shared.m_parallelMarkersShouldExit)
427                     return;
428             }
429             
430             m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack);
431             m_shared.m_numberOfActiveParallelMarkers++;
432         }
433         
434         drain();
435     }
436 #endif
437 }
438
439 void MarkStack::mergeOpaqueRoots()
440 {
441     ASSERT(!m_opaqueRoots.isEmpty()); // Should only be called when opaque roots are non-empty.
442     {
443         MutexLocker locker(m_shared.m_opaqueRootsLock);
444         HashSet<void*>::iterator begin = m_opaqueRoots.begin();
445         HashSet<void*>::iterator end = m_opaqueRoots.end();
446         for (HashSet<void*>::iterator iter = begin; iter != end; ++iter)
447             m_shared.m_opaqueRoots.add(*iter);
448     }
449     m_opaqueRoots.clear();
450 }
451
452 void SlotVisitor::harvestWeakReferences()
453 {
454     for (WeakReferenceHarvester* current = m_shared.m_weakReferenceHarvesters.head(); current; current = current->next())
455         current->visitWeakReferences(*this);
456 }
457
458 void SlotVisitor::finalizeUnconditionalFinalizers()
459 {
460     while (m_shared.m_unconditionalFinalizers.hasNext())
461         m_shared.m_unconditionalFinalizers.removeNext()->finalizeUnconditionally();
462 }
463
464 #if ENABLE(GC_VALIDATION)
465 void MarkStack::validate(JSCell* cell)
466 {
467     if (!cell)
468         CRASH();
469
470     if (!cell->structure())
471         CRASH();
472
473     // Both the cell's structure, and the cell's structure's structure should be the Structure Structure.
474     // I hate this sentence.
475     if (cell->structure()->structure()->JSCell::classInfo() != cell->structure()->JSCell::classInfo())
476         CRASH();
477 }
478 #else
479 void MarkStack::validate(JSCell*)
480 {
481 }
482 #endif
483
484 } // namespace JSC