2 * Copyright (C) 2013 Google Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "heap/AddressSanitizer.h"
35 #include "heap/HeapExport.h"
36 #include "wtf/HashSet.h"
37 #include "wtf/OwnPtr.h"
38 #include "wtf/PassOwnPtr.h"
39 #include "wtf/ThreadSpecific.h"
40 #include "wtf/Threading.h"
41 #include "wtf/ThreadingPrimitives.h"
42 #include "wtf/Vector.h"
48 class FinalizedHeapObjectHeader;
49 class HeapContainsCache;
50 class HeapObjectHeader;
53 class SafePointBarrier;
54 template<typename Header> class ThreadHeap;
56 typedef uint8_t* Address;
58 // ThreadAffinity indicates which threads objects can be used on. We
59 // distinguish between objects that can be used on the main thread
60 // only and objects that can be used on any thread.
62 // For objects that can only be used on the main thread we avoid going
63 // through thread-local storage to get to the thread state.
65 // FIXME: We should evaluate the performance gain. Having
66 // ThreadAffinity is complicating the implementation and we should get
67 // rid of it if it is fast enough to go through thread-local storage
74 // By default all types are considered to be used on the main thread only.
76 struct ThreadingTrait {
77 static const ThreadAffinity Affinity = MainThreadOnly;
80 // Marks the specified class as being used from multiple threads. When
81 // a class is used from multiple threads we go through thread local
82 // storage to get the heap in which to allocate an object of that type
83 // and when allocating a Persistent handle for an object with that
84 // type. Notice that marking the base class does not automatically
85 // mark its descendants and they have to be explicitly marked.
86 #define USED_FROM_MULTIPLE_THREADS(Class) \
88 template<> struct ThreadingTrait<Class> { \
89 static const ThreadAffinity Affinity = AnyThread; \
92 #define USED_FROM_MULTIPLE_THREADS_NAMESPACE(Namespace, Class) \
93 namespace Namespace { \
97 template<> struct ThreadingTrait<Namespace::Class> { \
98 static const ThreadAffinity Affinity = AnyThread; \
102 template<typename U> class ThreadingTrait<const U> : public ThreadingTrait<U> { };
104 // List of typed heaps. The list is used to generate the implementation
105 // of typed heap related methods.
107 // To create a new typed heap add a H(<ClassName>) to the
108 // FOR_EACH_TYPED_HEAP macro below.
109 // FIXME: When the Node hierarchy has been moved use Node in our
110 // tests instead of TestTypedHeapClass.
111 #define FOR_EACH_TYPED_HEAP(H) \
112 H(TestTypedHeapClass)
115 #define TypedHeapEnumName(Type) Type##Heap,
119 FOR_EACH_TYPED_HEAP(TypedHeapEnumName)
123 // Trait to give an index in the thread state to all the
124 // type-specialized heaps. The general heap is at index 0 in the
125 // thread state. The index for other type-specialized heaps are given
126 // by the TypedHeaps enum above.
129 static const int index = GeneralHeap;
130 typedef ThreadHeap<FinalizedHeapObjectHeader> HeapType;
133 #define DEFINE_HEAP_INDEX_TRAIT(Type) \
136 struct HeapTrait<class Type> { \
137 static const int index = Type##Heap; \
138 typedef ThreadHeap<HeapObjectHeader> HeapType; \
141 FOR_EACH_TYPED_HEAP(DEFINE_HEAP_INDEX_TRAIT)
143 // A HeapStats structure keeps track of the amount of memory allocated
144 // for a Blink heap and how much of that memory is used for actual
145 // Blink objects. These stats are used in the heuristics to determine
146 // when to perform garbage collections.
149 size_t totalObjectSpace() const { return m_totalObjectSpace; }
150 size_t totalAllocatedSpace() const { return m_totalAllocatedSpace; }
152 void add(HeapStats* other)
154 m_totalObjectSpace += other->m_totalObjectSpace;
155 m_totalAllocatedSpace += other->m_totalAllocatedSpace;
158 void inline increaseObjectSpace(size_t newObjectSpace)
160 m_totalObjectSpace += newObjectSpace;
163 void inline decreaseObjectSpace(size_t deadObjectSpace)
165 m_totalObjectSpace -= deadObjectSpace;
168 void inline increaseAllocatedSpace(size_t newAllocatedSpace)
170 m_totalAllocatedSpace += newAllocatedSpace;
173 void inline decreaseAllocatedSpace(size_t deadAllocatedSpace)
175 m_totalAllocatedSpace -= deadAllocatedSpace;
180 m_totalObjectSpace = 0;
181 m_totalAllocatedSpace = 0;
184 bool operator==(const HeapStats& other)
186 return m_totalAllocatedSpace == other.m_totalAllocatedSpace
187 && m_totalObjectSpace == other.m_totalObjectSpace;
191 size_t m_totalObjectSpace; // Actually contains objects that may be live, not including headers.
192 size_t m_totalAllocatedSpace; // Allocated from the OS.
194 friend class HeapTester;
197 class HEAP_EXPORT ThreadState {
198 WTF_MAKE_NONCOPYABLE(ThreadState);
200 // When garbage collecting we need to know whether or not there
201 // can be pointers to Blink GC managed objects on the stack for
202 // each thread. When threads reach a safe point they record
203 // whether or not they have pointers on the stack.
205 NoHeapPointersOnStack,
209 // The set of ThreadStates for all threads attached to the Blink
210 // garbage collector.
211 typedef HashSet<ThreadState*> AttachedThreadStateSet;
212 static AttachedThreadStateSet& attachedThreads();
214 // Initialize threading infrastructure. Should be called from the main
217 static void shutdown();
219 // Trace all GC roots, called when marking the managed heap objects.
220 static void visitRoots(Visitor*);
222 // Associate ThreadState object with the current thread. After this
223 // call thread can start using the garbage collected heap infrastructure.
224 // It also has to periodically check for safepoints.
225 static void attach();
227 // When ThreadState is detaching from non-main thread its
228 // heap is expected to be empty (because it is going away).
229 // Perform registered cleanup tasks and garbage collection
230 // to sweep away any objects that are left on this heap.
231 // We assert that nothing must remain after this cleanup.
232 // If assertion does not hold we crash as we are potentially
233 // in the dangling pointer situation.
236 // Disassociate attached ThreadState from the current thread. The thread
237 // can no longer use the garbage collected heap after this call.
238 static void detach();
240 static ThreadState* current() { return **s_threadSpecific; }
241 static ThreadState* mainThreadState()
243 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage);
246 static bool isMainThread() { return current() == mainThreadState(); }
248 inline bool checkThread() const
250 ASSERT(m_thread == currentThread());
254 // shouldGC and shouldForceConservativeGC implement the heuristics
255 // that are used to determine when to collect garbage. If
256 // shouldForceConservativeGC returns true, we force the garbage
257 // collection immediately. Otherwise, if shouldGC returns true, we
258 // record that we should garbage collect the next time we return
259 // to the event loop. If both return false, we don't need to
260 // collect garbage at this point.
262 bool shouldForceConservativeGC();
264 // If gcRequested returns true when a thread returns to its event
265 // loop the thread will initiate a garbage collection.
267 void setGCRequested();
268 void clearGCRequested();
270 bool sweepRequested();
271 void setSweepRequested();
272 void clearSweepRequested();
273 void performPendingSweep();
275 // Support for disallowing allocation. Mainly used for sanity
277 bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocationCount; }
278 void enterNoAllocationScope() { m_noAllocationCount++; }
279 void leaveNoAllocationScope() { m_noAllocationCount--; }
281 // Before performing GC the thread-specific heap state should be
282 // made consistent for garbage collection.
283 bool isConsistentForGC();
284 void makeConsistentForGC();
286 // Is the thread corresponding to this thread state currently
288 bool isInGC() const { return m_inGC; }
290 // Is any of the threads registered with the blink garbage collection
291 // infrastructure currently perform GC?
292 static bool isAnyThreadInGC() { return s_inGC; }
308 // Is the thread corresponding to this thread state currently
310 bool isSweepInProgress() const { return m_sweepInProgress; }
314 // Safepoint related functionality.
316 // When a thread attempts to perform GC it needs to stop all other threads
317 // that use the heap or at least guarantee that they will not touch any
318 // heap allocated object until GC is complete.
320 // We say that a thread is at a safepoint if this thread is guaranteed to
321 // not touch any heap allocated object or any heap related functionality until
322 // it leaves the safepoint.
324 // Notice that a thread does not have to be paused if it is at safepoint it
325 // can continue to run and perform tasks that do not require interaction
326 // with the heap. It will be paused if it attempts to leave the safepoint and
327 // there is a GC in progress.
329 // Each thread that has ThreadState attached must:
330 // - periodically check if GC is requested from another thread by calling a safePoint() method;
331 // - use SafePointScope around long running loops that have no safePoint() invocation inside,
332 // such loops must not touch any heap object;
333 // - register an Interruptor that can interrupt long running loops that have no calls to safePoint and
334 // are not wrapped in a SafePointScope (e.g. Interruptor for JavaScript code)
337 // Request all other threads to stop. Must only be called if the current thread is at safepoint.
338 static void stopThreads();
339 static void resumeThreads();
341 // Check if GC is requested by another thread and pause this thread if this is the case.
342 // Can only be called when current thread is in a consistent state.
343 void safePoint(StackState);
345 // Mark current thread as running inside safepoint.
346 void enterSafePointWithoutPointers() { enterSafePoint(NoHeapPointersOnStack, 0); }
347 void enterSafePointWithPointers(void* scopeMarker) { enterSafePoint(HeapPointersOnStack, scopeMarker); }
348 void leaveSafePoint();
349 bool isAtSafePoint() const { return m_atSafePoint; }
351 class SafePointScope {
358 explicit SafePointScope(StackState stackState, ScopeNesting nesting = NoNesting)
359 : m_state(ThreadState::current())
361 if (m_state->isAtSafePoint()) {
362 RELEASE_ASSERT(nesting == AllowNesting);
363 // We can ignore stackState because there should be no heap object
364 // pointers manipulation after outermost safepoint was entered.
367 m_state->enterSafePoint(stackState, this);
374 m_state->leaveSafePoint();
378 ThreadState* m_state;
381 // If attached thread enters long running loop that can call back
382 // into Blink and leaving and reentering safepoint at every
383 // transition between this loop and Blink is deemed too expensive
384 // then instead of marking this loop as a GC safepoint thread
385 // can provide an interruptor object which would allow GC
386 // to temporarily interrupt and pause this long running loop at
387 // an arbitrary moment creating a safepoint for a GC.
388 class HEAP_EXPORT Interruptor {
390 virtual ~Interruptor() { }
392 // Request the interruptor to interrupt the thread and
393 // call onInterrupted on that thread once interruption
395 virtual void requestInterrupt() = 0;
397 // Clear previous interrupt request.
398 virtual void clearInterrupt() = 0;
401 // This method is called on the interrupted thread to
402 // create a safepoint for a GC.
403 void onInterrupted();
406 void addInterruptor(Interruptor*);
407 void removeInterruptor(Interruptor*);
409 // CleanupTasks are executed when ThreadState performs
410 // cleanup before detaching.
413 virtual ~CleanupTask() { }
415 // Executed before the final GC.
416 virtual void preCleanup() { }
418 // Executed after the final GC. Thread heap is empty at this point.
419 virtual void postCleanup() { }
422 void addCleanupTask(PassOwnPtr<CleanupTask> cleanupTask)
424 m_cleanupTasks.append(cleanupTask);
427 // Should only be called under protection of threadAttachMutex().
428 const Vector<Interruptor*>& interruptors() const { return m_interruptors; }
430 void recordStackEnd(intptr_t* endOfStack)
432 m_endOfStack = endOfStack;
435 // Get one of the heap structures for this thread.
437 // The heap is split into multiple heap parts based on object
438 // types. To get the index for a given type, use
439 // HeapTrait<Type>::index.
440 BaseHeap* heap(int index) const { return m_heaps[index]; }
442 // Infrastructure to determine if an address is within one of the
443 // address ranges for the Blink heap.
444 HeapContainsCache* heapContainsCache() { return m_heapContainsCache; }
445 bool contains(Address);
446 bool contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
447 bool contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
449 // Finds the Blink HeapPage in this thread-specific heap
450 // corresponding to a given address. Return 0 if the address is
451 // not contained in any of the pages.
452 BaseHeapPage* heapPageFromAddress(Address);
454 // List of persistent roots allocated on the given thread.
455 PersistentNode* roots() const { return m_persistents; }
457 // List of global persistent roots not owned by any particular thread.
458 // globalRootsMutex must be acquired before any modifications.
459 static PersistentNode* globalRoots();
460 static Mutex& globalRootsMutex();
462 // Visit local thread stack and trace all pointers conservatively.
463 void visitStack(Visitor*);
465 // Visit all persistents allocated on this thread.
466 void visitPersistents(Visitor*);
468 // Checks a given address and if a pointer into the oilpan heap marks
469 // the object to which it points.
470 bool checkAndMarkPointer(Visitor*, Address);
472 void getStats(HeapStats&);
473 HeapStats& stats() { return m_stats; }
474 HeapStats& statsAfterLastGC() { return m_statsAfterLastGC; }
477 explicit ThreadState();
480 friend class SafePointBarrier;
482 void enterSafePoint(StackState, void*);
483 NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope();
484 void clearSafePointScopeMarker()
486 m_safePointStackCopy.clear();
487 m_safePointScopeMarker = 0;
490 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific;
491 static SafePointBarrier* s_safePointBarrier;
493 // This variable is flipped to true after all threads are stoped
494 // and outermost GC has started.
497 // We can't create a static member of type ThreadState here
498 // because it will introduce global constructor and destructor.
499 // We would like to manage lifetime of the ThreadState attached
500 // to the main thread explicitly instead and still use normal
501 // constructor and destructor for the ThreadState class.
502 // For this we reserve static storage for the main ThreadState
503 // and lazily construct ThreadState in it using placement new.
504 static uint8_t s_mainThreadStateStorage[];
506 void trace(Visitor*);
508 ThreadIdentifier m_thread;
509 PersistentNode* m_persistents;
510 StackState m_stackState;
511 intptr_t* m_startOfStack;
512 intptr_t* m_endOfStack;
513 void* m_safePointScopeMarker;
514 Vector<Address> m_safePointStackCopy;
516 Vector<Interruptor*> m_interruptors;
518 volatile int m_sweepRequested;
519 bool m_sweepInProgress;
520 size_t m_noAllocationCount;
522 BaseHeap* m_heaps[NumberOfHeaps];
523 HeapContainsCache* m_heapContainsCache;
525 HeapStats m_statsAfterLastGC;
527 Vector<OwnPtr<CleanupTask> > m_cleanupTasks;
531 template<ThreadAffinity affinity> class ThreadStateFor;
533 template<> class ThreadStateFor<MainThreadOnly> {
535 static ThreadState* state()
537 // This specialization must only be used from the main thread.
538 ASSERT(ThreadState::isMainThread());
539 return ThreadState::mainThreadState();
543 template<> class ThreadStateFor<AnyThread> {
545 static ThreadState* state() { return ThreadState::current(); }
548 // FIXME: Experiment if the threading affinity really matters for performance.
549 // FIXME: Move these macros and other related structures to a separate file.
550 USED_FROM_MULTIPLE_THREADS(Algorithm);
551 USED_FROM_MULTIPLE_THREADS(Crypto);
552 USED_FROM_MULTIPLE_THREADS(DeprecatedStorageQuota);
553 USED_FROM_MULTIPLE_THREADS(Key);
554 USED_FROM_MULTIPLE_THREADS(KeyPair);
555 USED_FROM_MULTIPLE_THREADS(Notification);
556 USED_FROM_MULTIPLE_THREADS(NotificationCenter);
557 USED_FROM_MULTIPLE_THREADS(SubtleCrypto);
558 USED_FROM_MULTIPLE_THREADS(TextDecoder);
559 USED_FROM_MULTIPLE_THREADS(TextEncoder);
560 USED_FROM_MULTIPLE_THREADS(WebKitNotification);
561 USED_FROM_MULTIPLE_THREADS(WorkerCrypto);
562 USED_FROM_MULTIPLE_THREADS(WorkerPerformance);
566 #endif // ThreadState_h