2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "platform/heap/ThreadState.h"
34 #include "platform/TraceEvent.h"
35 #include "platform/heap/AddressSanitizer.h"
36 #include "platform/heap/Handle.h"
37 #include "platform/heap/Heap.h"
38 #include "public/platform/Platform.h"
39 #include "wtf/ThreadingPrimitives.h"
45 #elif defined(__GLIBC__)
46 extern "C" void* __libc_stack_end; // NOLINT
49 #if defined(MEMORY_SANITIZER)
50 #include <sanitizer/msan_interface.h>
55 static void* getStackStart()
57 #if defined(__GLIBC__) || OS(ANDROID)
59 if (!pthread_getattr_np(pthread_self(), &attr)) {
62 int error = pthread_attr_getstack(&attr, &base, &size);
63 RELEASE_ASSERT(!error);
64 pthread_attr_destroy(&attr);
65 return reinterpret_cast<Address>(base) + size;
67 #if defined(__GLIBC__)
68 // pthread_getattr_np can fail for the main thread. In this case
69 // just like NaCl we rely on the __libc_stack_end to give us
70 // the start of the stack.
71 // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
72 return __libc_stack_end;
78 return pthread_get_stackaddr_np(pthread_self());
79 #elif OS(WIN) && COMPILER(MSVC)
80 // On Windows stack limits for the current thread are available in
81 // the thread information block (TIB). Its fields can be accessed through
82 // FS segment register on x86 and GS segment register on x86_64.
84 return reinterpret_cast<void*>(__readgsqword(offsetof(NT_TIB64, StackBase)));
86 return reinterpret_cast<void*>(__readfsdword(offsetof(NT_TIB, StackBase)));
89 #error Unsupported getStackStart on this platform.
94 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0;
95 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
96 SafePointBarrier* ThreadState::s_safePointBarrier = 0;
97 bool ThreadState::s_inGC = false;
99 static Mutex& threadAttachMutex()
101 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
105 static double lockingTimeout()
107 // Wait time for parking all threads is at most 100 MS.
112 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr_t*);
113 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegistersCallback);
115 class SafePointBarrier {
117 SafePointBarrier() : m_canResume(1), m_unparkedThreadCount(0) { }
118 ~SafePointBarrier() { }
120 // Request other attached threads that are not at safe points to park themselves on safepoints.
123 ASSERT(ThreadState::current()->isAtSafePoint());
125 // Lock threadAttachMutex() to prevent threads from attaching.
126 threadAttachMutex().lock();
128 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
130 MutexLocker locker(m_mutex);
131 atomicAdd(&m_unparkedThreadCount, threads.size());
132 releaseStore(&m_canResume, 0);
134 ThreadState* current = ThreadState::current();
135 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
139 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->interruptors();
140 for (size_t i = 0; i < interruptors.size(); i++)
141 interruptors[i]->requestInterrupt();
144 while (acquireLoad(&m_unparkedThreadCount) > 0) {
145 double expirationTime = currentTime() + lockingTimeout();
146 if (!m_parked.timedWait(m_mutex, expirationTime)) {
147 // One of the other threads did not return to a safepoint within the maximum
148 // time we allow for threads to be parked. Abandon the GC and resume the
149 // currently parked threads.
157 void resumeOthers(bool barrierLocked = false)
159 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
160 atomicSubtract(&m_unparkedThreadCount, threads.size());
161 releaseStore(&m_canResume, 1);
163 // FIXME: Resumed threads will all contend for m_mutex just to unlock it
164 // later which is a waste of resources.
165 if (UNLIKELY(barrierLocked)) {
166 m_resume.broadcast();
168 // FIXME: Resumed threads will all contend for
169 // m_mutex just to unlock it later which is a waste of
171 MutexLocker locker(m_mutex);
172 m_resume.broadcast();
175 ThreadState* current = ThreadState::current();
176 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
180 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->interruptors();
181 for (size_t i = 0; i < interruptors.size(); i++)
182 interruptors[i]->clearInterrupt();
185 threadAttachMutex().unlock();
186 ASSERT(ThreadState::current()->isAtSafePoint());
189 void checkAndPark(ThreadState* state, SafePointAwareMutexLocker* locker = 0)
191 ASSERT(!state->isSweepInProgress());
192 if (!acquireLoad(&m_canResume)) {
193 // If we are leaving the safepoint from a SafePointAwareMutexLocker
194 // call out to release the lock before going to sleep. This enables the
195 // lock to be acquired in the sweep phase, e.g. during weak processing
196 // or finalization. The SafePointAwareLocker will reenter the safepoint
197 // and reacquire the lock after leaving this safepoint.
200 pushAllRegisters(this, state, parkAfterPushRegisters);
201 state->performPendingSweep();
205 void enterSafePoint(ThreadState* state)
207 ASSERT(!state->isSweepInProgress());
208 pushAllRegisters(this, state, enterSafePointAfterPushRegisters);
211 void leaveSafePoint(ThreadState* state, SafePointAwareMutexLocker* locker = 0)
213 if (atomicIncrement(&m_unparkedThreadCount) > 0)
214 checkAndPark(state, locker);
218 void doPark(ThreadState* state, intptr_t* stackEnd)
220 state->recordStackEnd(stackEnd);
221 MutexLocker locker(m_mutex);
222 if (!atomicDecrement(&m_unparkedThreadCount))
224 while (!acquireLoad(&m_canResume))
225 m_resume.wait(m_mutex);
226 atomicIncrement(&m_unparkedThreadCount);
229 static void parkAfterPushRegisters(SafePointBarrier* barrier, ThreadState* state, intptr_t* stackEnd)
231 barrier->doPark(state, stackEnd);
234 void doEnterSafePoint(ThreadState* state, intptr_t* stackEnd)
236 state->recordStackEnd(stackEnd);
237 state->copyStackUntilSafePointScope();
238 // m_unparkedThreadCount tracks amount of unparked threads. It is
239 // positive if and only if we have requested other threads to park
240 // at safe-points in preparation for GC. The last thread to park
241 // itself will make the counter hit zero and should notify GC thread
242 // that it is safe to proceed.
243 // If no other thread is waiting for other threads to park then
244 // this counter can be negative: if N threads are at safe-points
245 // the counter will be -N.
246 if (!atomicDecrement(&m_unparkedThreadCount)) {
247 MutexLocker locker(m_mutex);
248 m_parked.signal(); // Safe point reached.
252 static void enterSafePointAfterPushRegisters(SafePointBarrier* barrier, ThreadState* state, intptr_t* stackEnd)
254 barrier->doEnterSafePoint(state, stackEnd);
257 volatile int m_canResume;
258 volatile int m_unparkedThreadCount;
260 ThreadCondition m_parked;
261 ThreadCondition m_resume;
264 ThreadState::ThreadState()
265 : m_thread(currentThread())
266 , m_persistents(adoptPtr(new PersistentAnchor()))
267 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
268 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
269 , m_safePointScopeMarker(0)
270 , m_atSafePoint(false)
272 , m_gcRequested(false)
273 , m_forcePreciseGCForTesting(false)
274 , m_sweepRequested(0)
275 , m_sweepInProgress(false)
276 , m_noAllocationCount(0)
278 , m_heapContainsCache(adoptPtr(new HeapContainsCache()))
279 , m_isCleaningUp(false)
280 #if defined(ADDRESS_SANITIZER)
281 , m_asanFakeStack(__asan_get_current_fake_stack())
284 ASSERT(!**s_threadSpecific);
285 **s_threadSpecific = this;
288 m_statsAfterLastGC.clear();
289 // First allocate the general heap, second iterate through to
290 // allocate the type specific heaps
291 m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this);
292 for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++)
293 m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this);
295 CallbackStack::init(&m_weakCallbackStack);
298 ThreadState::~ThreadState()
301 CallbackStack::shutdown(&m_weakCallbackStack);
302 for (int i = GeneralHeap; i < NumberOfHeaps; i++)
304 deleteAllValues(m_interruptors);
305 **s_threadSpecific = 0;
308 void ThreadState::init()
310 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>();
311 s_safePointBarrier = new SafePointBarrier;
314 void ThreadState::shutdown()
316 delete s_safePointBarrier;
317 s_safePointBarrier = 0;
319 // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpecific().
322 void ThreadState::attachMainThread()
324 RELEASE_ASSERT(!Heap::s_shutdownCalled);
325 MutexLocker locker(threadAttachMutex());
326 ThreadState* state = new(s_mainThreadStateStorage) ThreadState();
327 attachedThreads().add(state);
330 void ThreadState::detachMainThread()
332 // Enter a safe point before trying to acquire threadAttachMutex
333 // to avoid dead lock if another thread is preparing for GC, has acquired
334 // threadAttachMutex and waiting for other threads to pause or reach a
336 ThreadState* state = mainThreadState();
337 if (!state->isAtSafePoint())
338 state->enterSafePointWithoutPointers();
341 MutexLocker locker(threadAttachMutex());
342 state->leaveSafePoint();
343 ASSERT(attachedThreads().contains(state));
344 attachedThreads().remove(state);
345 state->~ThreadState();
347 shutdownHeapIfNecessary();
350 void ThreadState::shutdownHeapIfNecessary()
352 // We don't need to enter a safe point before acquiring threadAttachMutex
353 // because this thread is already detached.
355 MutexLocker locker(threadAttachMutex());
356 // We start shutting down the heap if there is no running thread
357 // and Heap::shutdown() is already called.
358 if (!attachedThreads().size() && Heap::s_shutdownCalled)
362 void ThreadState::attach()
364 RELEASE_ASSERT(!Heap::s_shutdownCalled);
365 MutexLocker locker(threadAttachMutex());
366 ThreadState* state = new ThreadState();
367 attachedThreads().add(state);
370 void ThreadState::cleanup()
372 // From here on ignore all conservatively discovered
373 // pointers into the heap owned by this thread.
374 m_isCleaningUp = true;
376 // After this GC we expect heap to be empty because
377 // preCleanup tasks should have cleared all persistent
378 // handles that were externally owned.
379 Heap::collectAllGarbage();
381 // Verify that all heaps are empty now.
382 for (int i = 0; i < NumberOfHeaps; i++)
383 m_heaps[i]->assertEmpty();
386 void ThreadState::preCleanup()
388 for (size_t i = 0; i < m_cleanupTasks.size(); i++)
389 m_cleanupTasks[i]->preCleanup();
392 void ThreadState::postCleanup()
394 for (size_t i = 0; i < m_cleanupTasks.size(); i++)
395 m_cleanupTasks[i]->postCleanup();
396 m_cleanupTasks.clear();
399 void ThreadState::detach()
401 ThreadState* state = current();
405 // Enter a safe point before trying to acquire threadAttachMutex
406 // to avoid dead lock if another thread is preparing for GC, has acquired
407 // threadAttachMutex and waiting for other threads to pause or reach a
409 if (!state->isAtSafePoint())
410 state->enterSafePointWithoutPointers();
413 MutexLocker locker(threadAttachMutex());
414 state->leaveSafePoint();
415 state->postCleanup();
416 ASSERT(attachedThreads().contains(state));
417 attachedThreads().remove(state);
420 shutdownHeapIfNecessary();
423 void ThreadState::visitRoots(Visitor* visitor)
426 // All threads are at safepoints so this is not strictly necessary.
427 // However we acquire the mutex to make mutation and traversal of this
429 MutexLocker locker(globalRootsMutex());
430 globalRoots()->trace(visitor);
433 AttachedThreadStateSet& threads = attachedThreads();
434 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
435 (*it)->trace(visitor);
439 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr)
441 #if defined(ADDRESS_SANITIZER)
442 Address* start = reinterpret_cast<Address*>(m_startOfStack);
443 Address* end = reinterpret_cast<Address*>(m_endOfStack);
444 Address* fakeFrameStart = 0;
445 Address* fakeFrameEnd = 0;
446 Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr);
447 Address* realFrameForFakeFrame =
448 reinterpret_cast<Address*>(
449 __asan_addr_is_in_fake_stack(
450 m_asanFakeStack, maybeFakeFrame,
451 reinterpret_cast<void**>(&fakeFrameStart),
452 reinterpret_cast<void**>(&fakeFrameEnd)));
453 if (realFrameForFakeFrame) {
454 // This is a fake frame from the asan fake stack.
455 if (realFrameForFakeFrame > end && start > realFrameForFakeFrame) {
456 // The real stack address for the asan fake frame is
457 // within the stack range that we need to scan so we need
458 // to visit the values in the fake frame.
459 for (Address* p = fakeFrameStart; p < fakeFrameEnd; p++)
460 Heap::checkAndMarkPointer(visitor, *p);
467 void ThreadState::visitStack(Visitor* visitor)
469 Address* start = reinterpret_cast<Address*>(m_startOfStack);
470 // If there is a safepoint scope marker we should stop the stack
471 // scanning there to not touch active parts of the stack. Anything
472 // interesting beyond that point is in the safepoint stack copy.
473 // If there is no scope marker the thread is blocked and we should
474 // scan all the way to the recorded end stack pointer.
475 Address* end = reinterpret_cast<Address*>(m_endOfStack);
476 Address* safePointScopeMarker = reinterpret_cast<Address*>(m_safePointScopeMarker);
477 Address* current = safePointScopeMarker ? safePointScopeMarker : end;
479 // Ensure that current is aligned by address size otherwise the loop below
480 // will read past start address.
481 current = reinterpret_cast<Address*>(reinterpret_cast<intptr_t>(current) & ~(sizeof(Address) - 1));
483 for (; current < start; ++current) {
484 Address ptr = *current;
485 #if defined(MEMORY_SANITIZER)
486 // |ptr| may be uninitialized by design. Mark it as initialized to keep
487 // MSan from complaining.
488 // Note: it may be tempting to get rid of |ptr| and simply use |current|
489 // here, but that would be incorrect. We intentionally use a local
490 // variable because we don't want to unpoison the original stack.
491 __msan_unpoison(&ptr, sizeof(ptr));
493 Heap::checkAndMarkPointer(visitor, ptr);
494 visitAsanFakeStackForPointer(visitor, ptr);
497 for (Vector<Address>::iterator it = m_safePointStackCopy.begin(); it != m_safePointStackCopy.end(); ++it) {
499 #if defined(MEMORY_SANITIZER)
500 // See the comment above.
501 __msan_unpoison(&ptr, sizeof(ptr));
503 Heap::checkAndMarkPointer(visitor, ptr);
504 visitAsanFakeStackForPointer(visitor, ptr);
508 void ThreadState::visitPersistents(Visitor* visitor)
510 m_persistents->trace(visitor);
513 void ThreadState::trace(Visitor* visitor)
515 if (m_stackState == HeapPointersOnStack)
517 visitPersistents(visitor);
520 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address)
522 // If thread is cleaning up ignore conservative pointers.
526 // This checks for normal pages and for large objects which span the extent
527 // of several normal pages.
528 BaseHeapPage* page = heapPageFromAddress(address);
530 page->checkAndMarkPointer(visitor, address);
531 // Whether or not the pointer was within an object it was certainly
532 // within a page that is part of the heap, so we don't want to ask the
533 // other other heaps or put this address in the
534 // HeapDoesNotContainCache.
541 #if ENABLE(GC_TRACING)
542 const GCInfo* ThreadState::findGCInfo(Address address)
544 BaseHeapPage* page = heapPageFromAddress(address);
546 return page->findGCInfo(address);
552 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallback callback)
554 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallbackStack);
555 *slot = CallbackStack::Item(object, callback);
558 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor)
560 return m_weakCallbackStack->popAndInvokeCallback(&m_weakCallbackStack, visitor);
563 PersistentNode* ThreadState::globalRoots()
565 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor);
569 Mutex& ThreadState::globalRootsMutex()
571 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
575 // Trigger garbage collection on a 50% increase in size, but not for
576 // less than 512kbytes.
577 static bool increasedEnoughToGC(size_t newSize, size_t oldSize)
579 if (newSize < 1 << 19)
581 return newSize > oldSize + (oldSize >> 1);
584 // FIXME: The heuristics are local for a thread at this
585 // point. Consider using heuristics that take memory for all threads
587 bool ThreadState::shouldGC()
589 // Do not GC during sweeping. We allow allocation during
590 // finalization, but those allocations are not allowed
591 // to lead to nested garbage collections.
592 return !m_sweepInProgress && increasedEnoughToGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
595 // Trigger conservative garbage collection on a 100% increase in size,
596 // but not for less than 4Mbytes.
597 static bool increasedEnoughToForceConservativeGC(size_t newSize, size_t oldSize)
599 if (newSize < 1 << 22)
601 return newSize > 2 * oldSize;
604 // FIXME: The heuristics are local for a thread at this
605 // point. Consider using heuristics that take memory for all threads
607 bool ThreadState::shouldForceConservativeGC()
609 // Do not GC during sweeping. We allow allocation during
610 // finalization, but those allocations are not allowed
611 // to lead to nested garbage collections.
612 return !m_sweepInProgress && increasedEnoughToForceConservativeGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
615 bool ThreadState::sweepRequested()
617 ASSERT(isAnyThreadInGC() || checkThread());
618 return m_sweepRequested;
621 void ThreadState::setSweepRequested()
623 // Sweep requested is set from the thread that initiates garbage
624 // collection which could be different from the thread for this
625 // thread state. Therefore the setting of m_sweepRequested needs a
627 atomicTestAndSetToOne(&m_sweepRequested);
630 void ThreadState::clearSweepRequested()
633 m_sweepRequested = 0;
636 bool ThreadState::gcRequested()
639 return m_gcRequested;
642 void ThreadState::setGCRequested()
645 m_gcRequested = true;
648 void ThreadState::clearGCRequested()
651 m_gcRequested = false;
654 void ThreadState::performPendingGC(StackState stackState)
656 if (stackState == NoHeapPointersOnStack) {
657 if (forcePreciseGCForTesting()) {
658 setForcePreciseGCForTesting(false);
659 Heap::collectAllGarbage();
660 } else if (gcRequested()) {
661 Heap::collectGarbage(NoHeapPointersOnStack);
666 void ThreadState::setForcePreciseGCForTesting(bool value)
669 m_forcePreciseGCForTesting = value;
672 bool ThreadState::forcePreciseGCForTesting()
675 return m_forcePreciseGCForTesting;
678 bool ThreadState::isConsistentForGC()
680 for (int i = 0; i < NumberOfHeaps; i++) {
681 if (!m_heaps[i]->isConsistentForGC())
687 void ThreadState::makeConsistentForGC()
689 for (int i = 0; i < NumberOfHeaps; i++)
690 m_heaps[i]->makeConsistentForGC();
693 void ThreadState::prepareForGC()
695 for (int i = 0; i < NumberOfHeaps; i++) {
696 BaseHeap* heap = m_heaps[i];
697 heap->makeConsistentForGC();
698 // If there are parked threads with outstanding sweep requests, clear their mark bits.
699 // This happens if a thread did not have time to wake up and sweep,
700 // before the next GC arrived.
701 if (sweepRequested())
707 BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
709 BaseHeapPage* cachedPage = heapContainsCache()->lookup(address);
715 for (int i = 0; i < NumberOfHeaps; i++) {
716 BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address);
718 // Asserts that make sure heapPageFromAddress takes addresses from
719 // the whole aligned blinkPageSize memory area. This is necessary
720 // for the negative cache to work.
721 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFromAddress(roundToBlinkPageStart(address)));
722 if (roundToBlinkPageStart(address) != roundToBlinkPageEnd(address))
723 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFromAddress(roundToBlinkPageEnd(address) - 1));
724 ASSERT(!cachedPage || page == cachedPage);
726 heapContainsCache()->addEntry(address, page);
734 void ThreadState::getStats(HeapStats& stats)
738 if (isConsistentForGC()) {
739 HeapStats scannedStats;
740 scannedStats.clear();
741 for (int i = 0; i < NumberOfHeaps; i++)
742 m_heaps[i]->getScannedStats(scannedStats);
743 ASSERT(scannedStats == stats);
748 bool ThreadState::stopThreads()
750 return s_safePointBarrier->parkOthers();
753 void ThreadState::resumeThreads()
755 s_safePointBarrier->resumeOthers();
758 void ThreadState::safePoint(StackState stackState)
761 performPendingGC(stackState);
762 ASSERT(!m_atSafePoint);
763 m_stackState = stackState;
764 m_atSafePoint = true;
765 s_safePointBarrier->checkAndPark(this);
766 m_atSafePoint = false;
767 m_stackState = HeapPointersOnStack;
770 #ifdef ADDRESS_SANITIZER
771 // When we are running under AddressSanitizer with detect_stack_use_after_return=1
772 // then stack marker obtained from SafePointScope will point into a fake stack.
773 // Detect this case by checking if it falls in between current stack frame
774 // and stack start and use an arbitrary high enough value for it.
775 // Don't adjust stack marker in any other case to match behavior of code running
776 // without AddressSanitizer.
777 NO_SANITIZE_ADDRESS static void* adjustScopeMarkerForAdressSanitizer(void* scopeMarker)
779 Address start = reinterpret_cast<Address>(getStackStart());
780 Address end = reinterpret_cast<Address>(&start);
781 RELEASE_ASSERT(end < start);
783 if (end <= scopeMarker && scopeMarker < start)
786 // 256 is as good an approximation as any else.
787 const size_t bytesToCopy = sizeof(Address) * 256;
788 if (static_cast<size_t>(start - end) < bytesToCopy)
791 return end + bytesToCopy;
795 void ThreadState::enterSafePoint(StackState stackState, void* scopeMarker)
797 #ifdef ADDRESS_SANITIZER
798 if (stackState == HeapPointersOnStack)
799 scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker);
801 ASSERT(stackState == NoHeapPointersOnStack || scopeMarker);
802 performPendingGC(stackState);
804 ASSERT(!m_atSafePoint);
805 m_atSafePoint = true;
806 m_stackState = stackState;
807 m_safePointScopeMarker = scopeMarker;
808 s_safePointBarrier->enterSafePoint(this);
811 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker)
814 ASSERT(m_atSafePoint);
815 s_safePointBarrier->leaveSafePoint(this, locker);
816 m_atSafePoint = false;
817 m_stackState = HeapPointersOnStack;
818 clearSafePointScopeMarker();
819 performPendingSweep();
822 void ThreadState::copyStackUntilSafePointScope()
824 if (!m_safePointScopeMarker || m_stackState == NoHeapPointersOnStack)
827 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker);
828 Address* from = reinterpret_cast<Address*>(m_endOfStack);
829 RELEASE_ASSERT(from < to);
830 RELEASE_ASSERT(to <= reinterpret_cast<Address*>(m_startOfStack));
831 size_t slotCount = static_cast<size_t>(to - from);
832 ASSERT(slotCount < 1024); // Catch potential performance issues.
834 ASSERT(!m_safePointStackCopy.size());
835 m_safePointStackCopy.resize(slotCount);
836 for (size_t i = 0; i < slotCount; ++i) {
837 m_safePointStackCopy[i] = from[i];
841 void ThreadState::performPendingSweep()
843 if (!sweepRequested())
846 TRACE_EVENT0("Blink", "ThreadState::performPendingSweep");
847 double timeStamp = WTF::currentTimeMS();
848 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
850 TRACE_EVENT_SET_SAMPLING_STATE("Blink", "BlinkGCSweeping");
852 m_sweepInProgress = true;
853 // Disallow allocation during weak processing.
854 enterNoAllocationScope();
855 // Perform thread-specific weak processing.
856 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { }
857 leaveNoAllocationScope();
858 // Perform sweeping and finalization.
859 m_stats.clear(); // Sweeping will recalculate the stats
860 for (int i = 0; i < NumberOfHeaps; i++)
862 getStats(m_statsAfterLastGC);
863 m_sweepInProgress = false;
865 clearSweepRequested();
867 if (blink::Platform::current()) {
868 blink::Platform::current()->histogramCustomCounts("BlinkGC.PerformPendingSweep", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
872 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
875 void ThreadState::addInterruptor(Interruptor* interruptor)
877 SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting);
880 MutexLocker locker(threadAttachMutex());
881 m_interruptors.append(interruptor);
885 void ThreadState::removeInterruptor(Interruptor* interruptor)
887 SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting);
890 MutexLocker locker(threadAttachMutex());
891 size_t index = m_interruptors.find(interruptor);
892 RELEASE_ASSERT(index >= 0);
893 m_interruptors.remove(index);
897 void ThreadState::Interruptor::onInterrupted()
899 ThreadState* state = ThreadState::current();
901 ASSERT(!state->isAtSafePoint());
902 state->safePoint(HeapPointersOnStack);
905 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads()
907 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ());
911 #if ENABLE(GC_TRACING)
912 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address)
914 bool needLockForIteration = !isAnyThreadInGC();
915 if (needLockForIteration)
916 threadAttachMutex().lock();
918 ThreadState::AttachedThreadStateSet& threads = attachedThreads();
919 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
920 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) {
921 if (needLockForIteration)
922 threadAttachMutex().unlock();
926 if (needLockForIteration)
927 threadAttachMutex().unlock();