2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "platform/PlatformExport.h"
35 #include "platform/heap/AddressSanitizer.h"
36 #include "public/platform/WebThread.h"
37 #include "wtf/HashSet.h"
38 #include "wtf/OwnPtr.h"
39 #include "wtf/PassOwnPtr.h"
40 #include "wtf/ThreadSpecific.h"
41 #include "wtf/Threading.h"
42 #include "wtf/ThreadingPrimitives.h"
43 #include "wtf/Vector.h"
45 #if ENABLE(GC_PROFILE_HEAP)
46 #include "wtf/HashMap.h"
53 class FinalizedHeapObjectHeader;
55 class HeapContainsCache;
56 class HeapObjectHeader;
59 class WrapperPersistentRegion;
61 class SafePointBarrier;
62 class SafePointAwareMutexLocker;
63 template<typename Header> class ThreadHeap;
66 typedef uint8_t* Address;
68 typedef void (*FinalizationCallback)(void*);
69 typedef void (*VisitorCallback)(Visitor*, void* self);
70 typedef VisitorCallback TraceCallback;
71 typedef VisitorCallback WeakPointerCallback;
72 typedef VisitorCallback EphemeronCallback;
74 // ThreadAffinity indicates which threads objects can be used on. We
75 // distinguish between objects that can be used on the main thread
76 // only and objects that can be used on any thread.
78 // For objects that can only be used on the main thread we avoid going
79 // through thread-local storage to get to the thread state.
81 // FIXME: We should evaluate the performance gain. Having
82 // ThreadAffinity is complicating the implementation and we should get
83 // rid of it if it is fast enough to go through thread-local storage
93 template<typename T, bool derivesNode = WTF::IsSubclass<typename WTF::RemoveConst<T>::Type, Node>::value> struct DefaultThreadingTrait;
96 struct DefaultThreadingTrait<T, false> {
97 static const ThreadAffinity Affinity = AnyThread;
101 struct DefaultThreadingTrait<T, true> {
102 static const ThreadAffinity Affinity = MainThreadOnly;
106 struct ThreadingTrait {
107 static const ThreadAffinity Affinity = DefaultThreadingTrait<T>::Affinity;
110 // Marks the specified class as being used from multiple threads. When
111 // a class is used from multiple threads we go through thread local
112 // storage to get the heap in which to allocate an object of that type
113 // and when allocating a Persistent handle for an object with that
114 // type. Notice that marking the base class does not automatically
115 // mark its descendants and they have to be explicitly marked.
116 #define USED_FROM_MULTIPLE_THREADS(Class) \
118 template<> struct ThreadingTrait<Class> { \
119 static const ThreadAffinity Affinity = AnyThread; \
122 #define USED_FROM_MULTIPLE_THREADS_NAMESPACE(Namespace, Class) \
123 namespace Namespace { \
127 template<> struct ThreadingTrait<Namespace::Class> { \
128 static const ThreadAffinity Affinity = AnyThread; \
132 template<typename U> class ThreadingTrait<const U> : public ThreadingTrait<U> { };
134 // List of typed heaps. The list is used to generate the implementation
135 // of typed heap related methods.
137 // To create a new typed heap add a H(<ClassName>) to the
138 // FOR_EACH_TYPED_HEAP macro below.
139 #define FOR_EACH_TYPED_HEAP(H) \
142 #define TypedHeapEnumName(Type) Type##Heap,
143 #define TypedHeapEnumNameNonFinalized(Type) Type##HeapNonFinalized,
147 CollectionBackingHeap,
148 FOR_EACH_TYPED_HEAP(TypedHeapEnumName)
149 GeneralHeapNonFinalized,
150 CollectionBackingHeapNonFinalized,
151 FOR_EACH_TYPED_HEAP(TypedHeapEnumNameNonFinalized)
152 // Values used for iteration of heap segments.
154 FirstFinalizedHeap = GeneralHeap,
155 FirstNonFinalizedHeap = GeneralHeapNonFinalized,
156 NumberOfFinalizedHeaps = GeneralHeapNonFinalized,
157 NumberOfNonFinalizedHeaps = NumberOfHeaps - NumberOfFinalizedHeaps,
158 NonFinalizedHeapOffset = FirstNonFinalizedHeap
161 // Base implementation for HeapIndexTrait found below.
162 template<int heapIndex>
163 struct HeapIndexTraitBase {
164 typedef FinalizedHeapObjectHeader HeaderType;
165 typedef ThreadHeap<HeaderType> HeapType;
166 static const int finalizedIndex = heapIndex;
167 static const int nonFinalizedIndex = heapIndex + static_cast<int>(NonFinalizedHeapOffset);
168 static int index(bool isFinalized)
170 return isFinalized ? finalizedIndex : nonFinalizedIndex;
174 // HeapIndexTrait defines properties for each heap in the TypesHeaps enum.
176 struct HeapIndexTrait;
179 struct HeapIndexTrait<GeneralHeap> : public HeapIndexTraitBase<GeneralHeap> { };
181 struct HeapIndexTrait<GeneralHeapNonFinalized> : public HeapIndexTrait<GeneralHeap> { };
184 struct HeapIndexTrait<CollectionBackingHeap> : public HeapIndexTraitBase<CollectionBackingHeap> { };
186 struct HeapIndexTrait<CollectionBackingHeapNonFinalized> : public HeapIndexTrait<CollectionBackingHeap> { };
188 #define DEFINE_TYPED_HEAP_INDEX_TRAIT(Type) \
190 struct HeapIndexTrait<Type##Heap> : public HeapIndexTraitBase<Type##Heap> { \
191 typedef HeapObjectHeader HeaderType; \
192 typedef ThreadHeap<HeaderType> HeapType; \
195 struct HeapIndexTrait<Type##HeapNonFinalized> : public HeapIndexTrait<Type##Heap> { };
196 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_INDEX_TRAIT)
197 #undef DEFINE_TYPED_HEAP_INDEX_TRAIT
199 // HeapTypeTrait defines which heap to use for particular types.
200 // By default objects are allocated in the GeneralHeap.
202 struct HeapTypeTrait : public HeapIndexTrait<GeneralHeap> { };
204 // We don't have any type-based mappings to the CollectionBackingHeap.
206 // Each typed-heap maps the respective type to its heap.
207 #define DEFINE_TYPED_HEAP_TRAIT(Type) \
210 struct HeapTypeTrait<class Type> : public HeapIndexTrait<Type##Heap> { };
211 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT)
212 #undef DEFINE_TYPED_HEAP_TRAIT
214 // A HeapStats structure keeps track of the amount of memory allocated
215 // for a Blink heap and how much of that memory is used for actual
216 // Blink objects. These stats are used in the heuristics to determine
217 // when to perform garbage collections.
220 HeapStats() : m_totalObjectSpace(0), m_totalAllocatedSpace(0) { }
222 size_t totalObjectSpace() const { return m_totalObjectSpace; }
223 size_t totalAllocatedSpace() const { return m_totalAllocatedSpace; }
225 void add(HeapStats* other)
227 m_totalObjectSpace += other->m_totalObjectSpace;
228 m_totalAllocatedSpace += other->m_totalAllocatedSpace;
231 void inline increaseObjectSpace(size_t newObjectSpace)
233 m_totalObjectSpace += newObjectSpace;
236 void inline decreaseObjectSpace(size_t deadObjectSpace)
238 m_totalObjectSpace -= deadObjectSpace;
241 void inline increaseAllocatedSpace(size_t newAllocatedSpace)
243 m_totalAllocatedSpace += newAllocatedSpace;
246 void inline decreaseAllocatedSpace(size_t deadAllocatedSpace)
248 m_totalAllocatedSpace -= deadAllocatedSpace;
253 m_totalObjectSpace = 0;
254 m_totalAllocatedSpace = 0;
257 bool operator==(const HeapStats& other)
259 return m_totalAllocatedSpace == other.m_totalAllocatedSpace
260 && m_totalObjectSpace == other.m_totalObjectSpace;
264 size_t m_totalObjectSpace; // Actually contains objects that may be live, not including headers.
265 size_t m_totalAllocatedSpace; // Allocated from the OS.
267 friend class HeapTester;
270 class PLATFORM_EXPORT ThreadState {
271 WTF_MAKE_NONCOPYABLE(ThreadState);
273 // When garbage collecting we need to know whether or not there
274 // can be pointers to Blink GC managed objects on the stack for
275 // each thread. When threads reach a safe point they record
276 // whether or not they have pointers on the stack.
278 NoHeapPointersOnStack,
282 // When profiling we would like to identify forced GC requests.
290 explicit NoSweepScope(ThreadState* state) : m_state(state)
292 ASSERT(!m_state->m_sweepInProgress);
293 m_state->m_sweepInProgress = true;
297 ASSERT(m_state->m_sweepInProgress);
298 m_state->m_sweepInProgress = false;
301 ThreadState* m_state;
304 // The set of ThreadStates for all threads attached to the Blink
305 // garbage collector.
306 typedef HashSet<ThreadState*> AttachedThreadStateSet;
307 static AttachedThreadStateSet& attachedThreads();
309 // Initialize threading infrastructure. Should be called from the main
312 static void shutdown();
313 static void shutdownHeapIfNecessary();
314 bool isTerminating() { return m_isTerminating; }
316 static void attachMainThread();
317 static void detachMainThread();
319 // Trace all persistent roots, called when marking the managed heap objects.
320 static void visitPersistentRoots(Visitor*);
322 // Trace all objects found on the stack, used when doing conservative GCs.
323 static void visitStackRoots(Visitor*);
325 // Associate ThreadState object with the current thread. After this
326 // call thread can start using the garbage collected heap infrastructure.
327 // It also has to periodically check for safepoints.
328 static void attach();
330 // Disassociate attached ThreadState from the current thread. The thread
331 // can no longer use the garbage collected heap after this call.
332 static void detach();
334 static ThreadState* current() { return **s_threadSpecific; }
335 static ThreadState* mainThreadState()
337 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage);
340 bool isMainThread() const { return this == mainThreadState(); }
341 inline bool checkThread() const
343 ASSERT(m_thread == currentThread());
347 // shouldGC and shouldForceConservativeGC implement the heuristics
348 // that are used to determine when to collect garbage. If
349 // shouldForceConservativeGC returns true, we force the garbage
350 // collection immediately. Otherwise, if shouldGC returns true, we
351 // record that we should garbage collect the next time we return
352 // to the event loop. If both return false, we don't need to
353 // collect garbage at this point.
355 bool shouldForceConservativeGC();
356 bool increasedEnoughToGC(size_t, size_t);
357 bool increasedEnoughToForceConservativeGC(size_t, size_t);
359 // If gcRequested returns true when a thread returns to its event
360 // loop the thread will initiate a garbage collection.
362 void setGCRequested();
363 void clearGCRequested();
365 // Was the last GC forced for testing? This is set when garbage collection
366 // is forced for testing and there are pointers on the stack. It remains
367 // set until a garbage collection is triggered with no pointers on the stack.
368 // This is used for layout tests that trigger GCs and check if objects are
369 // dead at a given point in time. That only reliably works when we get
370 // precise GCs with no conservative stack scanning.
371 void setForcePreciseGCForTesting(bool);
372 bool forcePreciseGCForTesting();
374 bool sweepRequested();
375 void setSweepRequested();
376 void clearSweepRequested();
377 void performPendingSweep();
379 // Support for disallowing allocation. Mainly used for sanity
381 bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocationCount; }
382 void enterNoAllocationScope() { m_noAllocationCount++; }
383 void leaveNoAllocationScope() { m_noAllocationCount--; }
385 // Before performing GC the thread-specific heap state should be
386 // made consistent for sweeping.
387 void makeConsistentForSweeping();
389 bool isConsistentForSweeping();
392 // Is the thread corresponding to this thread state currently
394 bool isInGC() const { return m_inGC; }
396 // Is any of the threads registered with the blink garbage collection
397 // infrastructure currently performing GC?
398 static bool isAnyThreadInGC() { return s_inGC; }
414 // Is the thread corresponding to this thread state currently
416 bool isSweepInProgress() const { return m_sweepInProgress; }
420 // Safepoint related functionality.
422 // When a thread attempts to perform GC it needs to stop all other threads
423 // that use the heap or at least guarantee that they will not touch any
424 // heap allocated object until GC is complete.
426 // We say that a thread is at a safepoint if this thread is guaranteed to
427 // not touch any heap allocated object or any heap related functionality until
428 // it leaves the safepoint.
430 // Notice that a thread does not have to be paused if it is at safepoint it
431 // can continue to run and perform tasks that do not require interaction
432 // with the heap. It will be paused if it attempts to leave the safepoint and
433 // there is a GC in progress.
435 // Each thread that has ThreadState attached must:
436 // - periodically check if GC is requested from another thread by calling a safePoint() method;
437 // - use SafePointScope around long running loops that have no safePoint() invocation inside,
438 // such loops must not touch any heap object;
439 // - register an Interruptor that can interrupt long running loops that have no calls to safePoint and
440 // are not wrapped in a SafePointScope (e.g. Interruptor for JavaScript code)
443 // Request all other threads to stop. Must only be called if the current thread is at safepoint.
444 static bool stopThreads();
445 static void resumeThreads();
447 // Check if GC is requested by another thread and pause this thread if this is the case.
448 // Can only be called when current thread is in a consistent state.
449 void safePoint(StackState);
451 // Mark current thread as running inside safepoint.
452 void enterSafePointWithoutPointers() { enterSafePoint(NoHeapPointersOnStack, 0); }
453 void enterSafePointWithPointers(void* scopeMarker) { enterSafePoint(HeapPointersOnStack, scopeMarker); }
454 void leaveSafePoint(SafePointAwareMutexLocker* = 0);
455 bool isAtSafePoint() const { return m_atSafePoint; }
457 class SafePointScope {
464 explicit SafePointScope(StackState stackState, ScopeNesting nesting = NoNesting)
465 : m_state(ThreadState::current())
467 if (m_state->isAtSafePoint()) {
468 RELEASE_ASSERT(nesting == AllowNesting);
469 // We can ignore stackState because there should be no heap object
470 // pointers manipulation after outermost safepoint was entered.
473 m_state->enterSafePoint(stackState, this);
480 m_state->leaveSafePoint();
484 ThreadState* m_state;
487 // If attached thread enters long running loop that can call back
488 // into Blink and leaving and reentering safepoint at every
489 // transition between this loop and Blink is deemed too expensive
490 // then instead of marking this loop as a GC safepoint thread
491 // can provide an interruptor object which would allow GC
492 // to temporarily interrupt and pause this long running loop at
493 // an arbitrary moment creating a safepoint for a GC.
494 class PLATFORM_EXPORT Interruptor {
496 virtual ~Interruptor() { }
498 // Request the interruptor to interrupt the thread and
499 // call onInterrupted on that thread once interruption
501 virtual void requestInterrupt() = 0;
503 // Clear previous interrupt request.
504 virtual void clearInterrupt() = 0;
507 // This method is called on the interrupted thread to
508 // create a safepoint for a GC.
509 void onInterrupted();
512 void addInterruptor(Interruptor*);
513 void removeInterruptor(Interruptor*);
515 // CleanupTasks are executed when ThreadState performs
516 // cleanup before detaching.
519 virtual ~CleanupTask() { }
521 // Executed before the final GC.
522 virtual void preCleanup() { }
524 // Executed after the final GC. Thread heap is empty at this point.
525 virtual void postCleanup() { }
528 void addCleanupTask(PassOwnPtr<CleanupTask> cleanupTask)
530 m_cleanupTasks.append(cleanupTask);
533 // Should only be called under protection of threadAttachMutex().
534 const Vector<Interruptor*>& interruptors() const { return m_interruptors; }
536 void recordStackEnd(intptr_t* endOfStack)
538 m_endOfStack = endOfStack;
541 // Get one of the heap structures for this thread.
543 // The heap is split into multiple heap parts based on object
544 // types. To get the index for a given type, use
545 // HeapTypeTrait<Type>::index.
546 BaseHeap* heap(int index) const { return m_heaps[index]; }
548 // Infrastructure to determine if an address is within one of the
549 // address ranges for the Blink heap. If the address is in the Blink
550 // heap the containing heap page is returned.
551 HeapContainsCache* heapContainsCache() { return m_heapContainsCache.get(); }
552 BaseHeapPage* contains(Address address) { return heapPageFromAddress(address); }
553 BaseHeapPage* contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
554 BaseHeapPage* contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
556 WrapperPersistentRegion* wrapperRoots() const
558 ASSERT(m_liveWrapperPersistents);
559 return m_liveWrapperPersistents;
561 WrapperPersistentRegion* takeWrapperPersistentRegion();
562 void freeWrapperPersistentRegion(WrapperPersistentRegion*);
564 // List of persistent roots allocated on the given thread.
565 PersistentNode* roots() const { return m_persistents.get(); }
567 // List of global persistent roots not owned by any particular thread.
568 // globalRootsMutex must be acquired before any modifications.
569 static PersistentNode* globalRoots();
570 static Mutex& globalRootsMutex();
572 // Visit local thread stack and trace all pointers conservatively.
573 void visitStack(Visitor*);
575 // Visit the asan fake stack frame corresponding to a slot on the
576 // real machine stack if there is one.
577 void visitAsanFakeStackForPointer(Visitor*, Address);
579 // Visit all persistents allocated on this thread.
580 void visitPersistents(Visitor*);
582 // Checks a given address and if a pointer into the oilpan heap marks
583 // the object to which it points.
584 bool checkAndMarkPointer(Visitor*, Address);
586 #if ENABLE(GC_PROFILE_MARKING)
587 const GCInfo* findGCInfo(Address);
588 static const GCInfo* findGCInfoFromAllThreads(Address);
591 #if ENABLE(GC_PROFILE_HEAP)
592 struct SnapshotInfo {
598 // Map from base-classes to a snapshot class-ids (used as index below).
599 HashMap<const GCInfo*, size_t> classTags;
601 // Map from class-id (index) to count/size.
602 Vector<int> liveCount;
603 Vector<int> deadCount;
604 Vector<size_t> liveSize;
605 Vector<size_t> deadSize;
607 // Map from class-id (index) to a vector of generation counts.
608 // For i < 7, the count is the number of objects that died after surviving |i| GCs.
609 // For i == 7, the count is the number of objects that survived at least 7 GCs.
610 Vector<Vector<int, 8> > generations;
612 explicit SnapshotInfo(ThreadState* state) : state(state), freeSize(0), pageCount(0) { }
614 size_t getClassTag(const GCInfo*);
620 void pushWeakObjectPointerCallback(void*, WeakPointerCallback);
621 bool popAndInvokeWeakPointerCallback(Visitor*);
623 void getStats(HeapStats&);
624 HeapStats& stats() { return m_stats; }
625 HeapStats& statsAfterLastGC() { return m_statsAfterLastGC; }
627 void setupHeapsForTermination();
629 void registerSweepingTask();
630 void unregisterSweepingTask();
632 Mutex& sweepMutex() { return m_sweepMutex; }
635 explicit ThreadState();
638 friend class SafePointBarrier;
639 friend class SafePointAwareMutexLocker;
641 void enterSafePoint(StackState, void*);
642 NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope();
643 void clearSafePointScopeMarker()
645 m_safePointStackCopy.clear();
646 m_safePointScopeMarker = 0;
649 void performPendingGC(StackState);
651 // Finds the Blink HeapPage in this thread-specific heap
652 // corresponding to a given address. Return 0 if the address is
653 // not contained in any of the pages. This does not consider
655 BaseHeapPage* heapPageFromAddress(Address);
657 // When ThreadState is detaching from non-main thread its
658 // heap is expected to be empty (because it is going away).
659 // Perform registered cleanup tasks and garbage collection
660 // to sweep away any objects that are left on this heap.
661 // We assert that nothing must remain after this cleanup.
662 // If assertion does not hold we crash as we are potentially
663 // in the dangling pointer situation.
667 void setLowCollectionRate(bool value) { m_lowCollectionRate = value; }
669 void waitUntilSweepersDone();
671 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific;
672 static SafePointBarrier* s_safePointBarrier;
674 // This variable is flipped to true after all threads are stoped
675 // and outermost GC has started.
678 // We can't create a static member of type ThreadState here
679 // because it will introduce global constructor and destructor.
680 // We would like to manage lifetime of the ThreadState attached
681 // to the main thread explicitly instead and still use normal
682 // constructor and destructor for the ThreadState class.
683 // For this we reserve static storage for the main ThreadState
684 // and lazily construct ThreadState in it using placement new.
685 static uint8_t s_mainThreadStateStorage[];
687 ThreadIdentifier m_thread;
688 WrapperPersistentRegion* m_liveWrapperPersistents;
689 WrapperPersistentRegion* m_pooledWrapperPersistents;
690 size_t m_pooledWrapperPersistentRegionCount;
691 OwnPtr<PersistentNode> m_persistents;
692 StackState m_stackState;
693 intptr_t* m_startOfStack;
694 intptr_t* m_endOfStack;
695 void* m_safePointScopeMarker;
696 Vector<Address> m_safePointStackCopy;
698 Vector<Interruptor*> m_interruptors;
700 bool m_forcePreciseGCForTesting;
701 volatile int m_sweepRequested;
702 bool m_sweepInProgress;
703 size_t m_noAllocationCount;
705 BaseHeap* m_heaps[NumberOfHeaps];
706 OwnPtr<HeapContainsCache> m_heapContainsCache;
708 HeapStats m_statsAfterLastGC;
710 Vector<OwnPtr<CleanupTask> > m_cleanupTasks;
711 bool m_isTerminating;
713 bool m_lowCollectionRate;
715 OwnPtr<blink::WebThread> m_sweeperThread;
716 int m_numberOfSweeperTasks;
718 ThreadCondition m_sweepThreadCondition;
720 CallbackStack* m_weakCallbackStack;
722 #if defined(ADDRESS_SANITIZER)
723 void* m_asanFakeStack;
727 template<ThreadAffinity affinity> class ThreadStateFor;
729 template<> class ThreadStateFor<MainThreadOnly> {
731 static ThreadState* state()
733 // This specialization must only be used from the main thread.
734 ASSERT(ThreadState::current()->isMainThread());
735 return ThreadState::mainThreadState();
739 template<> class ThreadStateFor<AnyThread> {
741 static ThreadState* state() { return ThreadState::current(); }
744 // The SafePointAwareMutexLocker is used to enter a safepoint while waiting for
745 // a mutex lock. It also ensures that the lock is not held while waiting for a GC
746 // to complete in the leaveSafePoint method, by releasing the lock if the
747 // leaveSafePoint method cannot complete without blocking, see
748 // SafePointBarrier::checkAndPark.
749 class SafePointAwareMutexLocker {
750 WTF_MAKE_NONCOPYABLE(SafePointAwareMutexLocker);
752 explicit SafePointAwareMutexLocker(MutexBase& mutex, ThreadState::StackState stackState = ThreadState::HeapPointersOnStack)
756 ThreadState* state = ThreadState::current();
758 bool leaveSafePoint = false;
759 // We cannot enter a safepoint if we are currently sweeping. In that
760 // case we just try to acquire the lock without being at a safepoint.
761 // If another thread tries to do a GC at that time it might time out
762 // due to this thread not being at a safepoint and waiting on the lock.
763 if (!state->isSweepInProgress() && !state->isAtSafePoint()) {
764 state->enterSafePoint(stackState, this);
765 leaveSafePoint = true;
769 if (leaveSafePoint) {
770 // When leaving the safepoint we might end up release the mutex
771 // if another thread is requesting a GC, see
772 // SafePointBarrier::checkAndPark. This is the case where we
773 // loop around to reacquire the lock.
774 state->leaveSafePoint(this);
779 ~SafePointAwareMutexLocker()
786 friend class SafePointBarrier;
799 // Common header for heap pages. Needs to be defined before class Visitor.
802 BaseHeapPage(PageMemory*, const GCInfo*, ThreadState*);
803 virtual ~BaseHeapPage() { }
805 // Check if the given address points to an object in this
806 // heap page. If so, find the start of that object and mark it
807 // using the given Visitor. Otherwise do nothing. The pointer must
808 // be within the same aligned blinkPageSize as the this-pointer.
810 // This is used during conservative stack scanning to
811 // conservatively mark all objects that could be referenced from
813 virtual void checkAndMarkPointer(Visitor*, Address) = 0;
814 virtual bool contains(Address) = 0;
816 #if ENABLE(GC_PROFILE_MARKING)
817 virtual const GCInfo* findGCInfo(Address) = 0;
820 Address address() { return reinterpret_cast<Address>(this); }
821 PageMemory* storage() const { return m_storage; }
822 ThreadState* threadState() const { return m_threadState; }
823 const GCInfo* gcInfo() { return m_gcInfo; }
824 virtual bool isLargeObject() { return false; }
825 virtual void markOrphaned()
829 m_terminating = false;
830 m_tracedAfterOrphaned = false;
832 bool orphaned() { return !m_threadState; }
833 bool terminating() { return m_terminating; }
834 void setTerminating() { m_terminating = true; }
835 bool tracedAfterOrphaned() { return m_tracedAfterOrphaned; }
836 void setTracedAfterOrphaned() { m_tracedAfterOrphaned = true; }
837 size_t promptlyFreedSize() { return m_promptlyFreedSize; }
838 void resetPromptlyFreedSize() { m_promptlyFreedSize = 0; }
839 void addToPromptlyFreedSize(size_t size) { m_promptlyFreedSize += size; }
842 PageMemory* m_storage;
843 const GCInfo* m_gcInfo;
844 ThreadState* m_threadState;
845 // Pointer sized integer to ensure proper alignment of the
846 // HeapPage header. We use some of the bits to determine
847 // whether the page is part of a terminting thread or
848 // if the page is traced after being terminated (orphaned).
849 uintptr_t m_terminating : 1;
850 uintptr_t m_tracedAfterOrphaned : 1;
851 uintptr_t m_promptlyFreedSize : 17; // == blinkPageSizeLog2
856 #endif // ThreadState_h