2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "heap/ThreadState.h"
34 #include "heap/Handle.h"
35 #include "heap/Heap.h"
36 #include "wtf/ThreadingPrimitives.h"
42 #elif defined(__GLIBC__)
43 extern "C" void* __libc_stack_end; // NOLINT
48 static void* getStackStart()
50 #if defined(__GLIBC__) || OS(ANDROID)
52 if (!pthread_getattr_np(pthread_self(), &attr)) {
55 int error = pthread_attr_getstack(&attr, &base, &size);
56 RELEASE_ASSERT(!error);
57 pthread_attr_destroy(&attr);
58 return reinterpret_cast<Address>(base) + size;
60 #if defined(__GLIBC__)
61 // pthread_getattr_np can fail for the main thread. In this case
62 // just like NaCl we rely on the __libc_stack_end to give us
63 // the start of the stack.
64 // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
65 return __libc_stack_end;
71 return pthread_get_stackaddr_np(pthread_self());
72 #elif OS(WIN) && COMPILER(MSVC)
73 // On Windows stack limits for the current thread are available in
74 // the thread information block (TIB). Its fields can be accessed through
75 // FS segment register on x86 and GS segment register on x86_64.
77 return reinterpret_cast<void*>(__readgsqword(offsetof(NT_TIB64, StackBase)));
79 return reinterpret_cast<void*>(__readfsdword(offsetof(NT_TIB, StackBase)));
82 #error Unsupported getStackStart on this platform.
87 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0;
88 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
89 SafePointBarrier* ThreadState::s_safePointBarrier = 0;
90 bool ThreadState::s_inGC = false;
92 static Mutex& threadAttachMutex()
94 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
98 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr_t*);
99 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegistersCallback);
101 class SafePointBarrier {
103 SafePointBarrier() : m_canResume(1), m_unparkedThreadCount(0) { }
104 ~SafePointBarrier() { }
106 // Request other attached threads that are not at safe points to park themselves on safepoints.
109 ASSERT(ThreadState::current()->isAtSafePoint());
111 // Lock threadAttachMutex() to prevent threads from attaching.
112 threadAttachMutex().lock();
114 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
116 MutexLocker locker(m_mutex);
117 atomicAdd(&m_unparkedThreadCount, threads.size());
118 atomicSetOneToZero(&m_canResume);
120 ThreadState* current = ThreadState::current();
121 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
125 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->interruptors();
126 for (size_t i = 0; i < interruptors.size(); i++)
127 interruptors[i]->requestInterrupt();
130 while (m_unparkedThreadCount > 0)
131 m_parked.wait(m_mutex);
136 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
137 atomicSubtract(&m_unparkedThreadCount, threads.size());
138 atomicTestAndSetToOne(&m_canResume);
140 // FIXME: Resumed threads will all contend for
141 // m_mutex just to unlock it later which is a waste of
143 MutexLocker locker(m_mutex);
144 m_resume.broadcast();
147 ThreadState* current = ThreadState::current();
148 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
152 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->interruptors();
153 for (size_t i = 0; i < interruptors.size(); i++)
154 interruptors[i]->clearInterrupt();
157 threadAttachMutex().unlock();
158 ASSERT(ThreadState::current()->isAtSafePoint());
161 void doPark(ThreadState* state, intptr_t* stackEnd)
163 state->recordStackEnd(stackEnd);
164 MutexLocker locker(m_mutex);
165 if (!atomicDecrement(&m_unparkedThreadCount))
168 m_resume.wait(m_mutex);
169 atomicIncrement(&m_unparkedThreadCount);
172 void checkAndPark(ThreadState* state)
174 ASSERT(!state->isSweepInProgress());
176 pushAllRegisters(this, state, parkAfterPushRegisters);
177 state->performPendingSweep();
181 void doEnterSafePoint(ThreadState* state, intptr_t* stackEnd)
183 state->recordStackEnd(stackEnd);
184 // m_unparkedThreadCount tracks amount of unparked threads. It is
185 // positive if and only if we have requested other threads to park
186 // at safe-points in preparation for GC. The last thread to park
187 // itself will make the counter hit zero and should notify GC thread
188 // that it is safe to proceed.
189 // If no other thread is waiting for other threads to park then
190 // this counter can be negative: if N threads are at safe-points
191 // the counter will be -N.
192 if (!atomicDecrement(&m_unparkedThreadCount)) {
193 MutexLocker locker(m_mutex);
194 m_parked.signal(); // Safe point reached.
196 state->copyStackUntilSafePointScope();
199 void enterSafePoint(ThreadState* state)
201 ASSERT(!state->isSweepInProgress());
202 pushAllRegisters(this, state, enterSafePointAfterPushRegisters);
205 void leaveSafePoint(ThreadState* state)
207 if (atomicIncrement(&m_unparkedThreadCount) > 0)
209 state->performPendingSweep();
213 static void parkAfterPushRegisters(SafePointBarrier* barrier, ThreadState* state, intptr_t* stackEnd)
215 barrier->doPark(state, stackEnd);
218 static void enterSafePointAfterPushRegisters(SafePointBarrier* barrier, ThreadState* state, intptr_t* stackEnd)
220 barrier->doEnterSafePoint(state, stackEnd);
223 volatile int m_canResume;
224 volatile int m_unparkedThreadCount;
226 ThreadCondition m_parked;
227 ThreadCondition m_resume;
230 ThreadState::ThreadState()
231 : m_thread(currentThread())
232 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
233 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
234 , m_safePointScopeMarker(0)
235 , m_atSafePoint(false)
237 , m_gcRequested(false)
238 , m_sweepRequested(0)
239 , m_sweepInProgress(false)
240 , m_noAllocationCount(0)
242 , m_heapContainsCache(new HeapContainsCache())
244 ASSERT(!**s_threadSpecific);
245 **s_threadSpecific = this;
247 m_persistents = new PersistentAnchor();
249 m_statsAfterLastGC.clear();
250 // First allocate the general heap, second iterate through to
251 // allocate the type specific heaps
252 m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this);
253 for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++)
254 m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this);
257 ThreadState::~ThreadState()
260 for (int i = GeneralHeap; i < NumberOfHeaps; i++)
262 delete m_persistents;
264 deleteAllValues(m_interruptors);
265 **s_threadSpecific = 0;
268 void ThreadState::init()
270 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>();
271 s_safePointBarrier = new SafePointBarrier;
272 new(s_mainThreadStateStorage) ThreadState();
273 attachedThreads().add(mainThreadState());
276 void ThreadState::shutdown()
278 mainThreadState()->~ThreadState();
281 void ThreadState::attach()
283 MutexLocker locker(threadAttachMutex());
284 ThreadState* state = new ThreadState();
285 attachedThreads().add(state);
288 void ThreadState::detach()
290 ThreadState* state = current();
291 MutexLocker locker(threadAttachMutex());
292 attachedThreads().remove(state);
296 void ThreadState::visitRoots(Visitor* visitor)
298 AttachedThreadStateSet& threads = attachedThreads();
299 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
300 (*it)->trace(visitor);
304 void ThreadState::visitStack(Visitor* visitor)
306 Address* end = reinterpret_cast<Address*>(m_startOfStack);
307 for (Address* current = reinterpret_cast<Address*>(m_endOfStack); current < end; ++current) {
308 Heap::checkAndMarkPointer(visitor, *current);
311 for (Vector<Address>::iterator it = m_safePointStackCopy.begin(); it != m_safePointStackCopy.end(); ++it)
312 Heap::checkAndMarkPointer(visitor, *it);
315 void ThreadState::visitPersistents(Visitor* visitor)
317 for (PersistentNode* current = m_persistents->m_next; current != m_persistents; current = current->m_next) {
318 current->trace(visitor);
322 void ThreadState::trace(Visitor* visitor)
324 if (m_stackState == HeapPointersOnStack)
326 visitPersistents(visitor);
329 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address)
331 BaseHeapPage* page = heapPageFromAddress(address);
333 return page->checkAndMarkPointer(visitor, address);
334 // Not in heap pages, check large objects
335 for (int i = 0; i < NumberOfHeaps; i++) {
336 if (m_heaps[i]->checkAndMarkLargeHeapObject(visitor, address))
342 // Trigger garbage collection on a 50% increase in size, but not for
343 // less than 2 pages.
344 static bool increasedEnoughToGC(size_t newSize, size_t oldSize)
346 if (newSize < 2 * blinkPagePayloadSize())
348 return newSize > oldSize + (oldSize >> 1);
351 // FIXME: The heuristics are local for a thread at this
352 // point. Consider using heuristics that take memory for all threads
354 bool ThreadState::shouldGC()
356 return increasedEnoughToGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
359 // Trigger conservative garbage collection on a 100% increase in size,
360 // but not for less than 2 pages.
361 static bool increasedEnoughToForceConservativeGC(size_t newSize, size_t oldSize)
363 if (newSize < 2 * blinkPagePayloadSize())
365 return newSize > 2 * oldSize;
368 // FIXME: The heuristics are local for a thread at this
369 // point. Consider using heuristics that take memory for all threads
371 bool ThreadState::shouldForceConservativeGC()
373 return increasedEnoughToForceConservativeGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
376 bool ThreadState::sweepRequested()
378 ASSERT(isAnyThreadInGC() || checkThread());
379 return m_sweepRequested;
382 void ThreadState::setSweepRequested()
384 // Sweep requested is set from the thread that initiates garbage
385 // collection which could be different from the thread for this
386 // thread state. Therefore the setting of m_sweepRequested needs a
388 atomicTestAndSetToOne(&m_sweepRequested);
391 void ThreadState::clearSweepRequested()
394 m_sweepRequested = 0;
397 bool ThreadState::gcRequested()
400 return m_gcRequested;
403 void ThreadState::setGCRequested()
406 m_gcRequested = true;
409 void ThreadState::clearGCRequested()
412 m_gcRequested = false;
415 bool ThreadState::isConsistentForGC()
417 for (int i = 0; i < NumberOfHeaps; i++) {
418 if (!m_heaps[i]->isConsistentForGC())
424 void ThreadState::makeConsistentForGC()
426 for (int i = 0; i < NumberOfHeaps; i++)
427 m_heaps[i]->makeConsistentForGC();
430 void ThreadState::prepareForGC()
432 for (int i = 0; i < NumberOfHeaps; i++) {
433 BaseHeap* heap = m_heaps[i];
434 heap->makeConsistentForGC();
435 // If there are parked threads with outstanding sweep requests, clear their mark bits.
436 // This happens if a thread did not have time to wake up and sweep,
437 // before the next GC arrived.
438 if (sweepRequested())
444 BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
447 bool found = heapContainsCache()->lookup(address, &page);
451 for (int i = 0; i < NumberOfHeaps; i++) {
452 page = m_heaps[i]->heapPageFromAddress(address);
454 Address blinkPageAddr = roundToBlinkPageStart(address);
456 ASSERT(page == m_heaps[i]->heapPageFromAddress(blinkPageAddr));
457 ASSERT(page == m_heaps[i]->heapPageFromAddress(blinkPageAddr + blinkPageSize - 1));
461 heapContainsCache()->addEntry(address, page);
462 return page; // 0 if not found.
465 bool ThreadState::contains(Address address)
467 // Check heap contains cache first.
468 BaseHeapPage* page = heapPageFromAddress(address);
471 // If no heap page was found check large objects.
472 for (int i = 0; i < NumberOfHeaps; i++) {
473 if (m_heaps[i]->largeHeapObjectFromAddress(address))
479 void ThreadState::getStats(HeapStats& stats)
483 if (isConsistentForGC()) {
484 HeapStats scannedStats;
485 scannedStats.clear();
486 for (int i = 0; i < NumberOfHeaps; i++)
487 m_heaps[i]->getScannedStats(scannedStats);
488 ASSERT(scannedStats == stats);
493 void ThreadState::stopThreads()
495 s_safePointBarrier->parkOthers();
498 void ThreadState::resumeThreads()
500 s_safePointBarrier->resumeOthers();
503 void ThreadState::safePoint(StackState stackState)
506 if (stackState == NoHeapPointersOnStack && gcRequested())
507 Heap::collectGarbage(NoHeapPointersOnStack);
508 m_stackState = stackState;
509 s_safePointBarrier->checkAndPark(this);
510 m_stackState = HeapPointersOnStack;
513 void ThreadState::enterSafePoint(StackState stackState, void* scopeMarker)
515 ASSERT(stackState == NoHeapPointersOnStack || scopeMarker);
516 if (stackState == NoHeapPointersOnStack && gcRequested())
517 Heap::collectGarbage(NoHeapPointersOnStack);
519 ASSERT(!m_atSafePoint);
520 m_atSafePoint = true;
521 m_stackState = stackState;
522 m_safePointScopeMarker = scopeMarker;
523 s_safePointBarrier->enterSafePoint(this);
526 void ThreadState::leaveSafePoint()
529 ASSERT(m_atSafePoint);
530 m_atSafePoint = false;
531 m_stackState = HeapPointersOnStack;
532 clearSafePointScopeMarker();
533 s_safePointBarrier->leaveSafePoint(this);
536 void ThreadState::copyStackUntilSafePointScope()
538 if (!m_safePointScopeMarker || m_stackState == NoHeapPointersOnStack)
541 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker);
542 Address* from = reinterpret_cast<Address*>(m_endOfStack);
543 RELEASE_ASSERT(from < to);
544 RELEASE_ASSERT(to < reinterpret_cast<Address*>(m_startOfStack));
545 size_t slotCount = static_cast<size_t>(to - from);
546 ASSERT(slotCount < 1024); // Catch potential performance issues.
548 ASSERT(!m_safePointStackCopy.size());
549 m_safePointStackCopy.append(reinterpret_cast<Address*>(from), slotCount);
552 void ThreadState::performPendingSweep()
554 if (sweepRequested()) {
555 m_sweepInProgress = true;
557 // Diallow allocation during sweeping and finalization.
558 enterNoAllocationScope();
559 m_stats.clear(); // Sweeping will recalculate the stats
560 for (int i = 0; i < NumberOfHeaps; i++)
562 leaveNoAllocationScope();
564 getStats(m_statsAfterLastGC);
565 m_sweepInProgress = false;
567 clearSweepRequested();
571 void ThreadState::addInterruptor(Interruptor* interruptor)
573 SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting);
576 MutexLocker locker(threadAttachMutex());
577 m_interruptors.append(interruptor);
581 void ThreadState::removeInterruptor(Interruptor* interruptor)
583 SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting);
586 MutexLocker locker(threadAttachMutex());
587 size_t index = m_interruptors.find(interruptor);
588 RELEASE_ASSERT(index >= 0);
589 m_interruptors.remove(index);
593 void ThreadState::Interruptor::onInterrupted()
595 ThreadState* state = ThreadState::current();
597 ASSERT(!state->isAtSafePoint());
598 state->safePoint(HeapPointersOnStack);
601 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads()
603 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ());