Upstream version 5.34.92.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / heap / ThreadState.cpp
1 /*
2  * Copyright (C) 2013 Google Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are
6  * met:
7  *
8  *     * Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  *     * Redistributions in binary form must reproduce the above
11  * copyright notice, this list of conditions and the following disclaimer
12  * in the documentation and/or other materials provided with the
13  * distribution.
14  *     * Neither the name of Google Inc. nor the names of its
15  * contributors may be used to endorse or promote products derived from
16  * this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 #include "config.h"
32 #include "heap/ThreadState.h"
33
34 #include "heap/Handle.h"
35 #include "heap/Heap.h"
36 #include "wtf/ThreadingPrimitives.h"
37
38 #if OS(WIN)
39 #include <stddef.h>
40 #include <windows.h>
41 #include <winnt.h>
42 #elif defined(__GLIBC__)
43 extern "C" void* __libc_stack_end;  // NOLINT
44 #endif
45
46 namespace WebCore {
47
48 static void* getStackStart()
49 {
50 #if defined(__GLIBC__) || OS(ANDROID)
51     pthread_attr_t attr;
52     if (!pthread_getattr_np(pthread_self(), &attr)) {
53         void* base;
54         size_t size;
55         int error = pthread_attr_getstack(&attr, &base, &size);
56         RELEASE_ASSERT(!error);
57         pthread_attr_destroy(&attr);
58         return reinterpret_cast<Address>(base) + size;
59     }
60 #if defined(__GLIBC__)
61     // pthread_getattr_np can fail for the main thread. In this case
62     // just like NaCl we rely on the __libc_stack_end to give us
63     // the start of the stack.
64     // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
65     return __libc_stack_end;
66 #else
67     ASSERT_NOT_REACHED();
68     return 0;
69 #endif
70 #elif OS(MACOSX)
71     return pthread_get_stackaddr_np(pthread_self());
72 #elif OS(WIN) && COMPILER(MSVC)
73     // On Windows stack limits for the current thread are available in
74     // the thread information block (TIB). Its fields can be accessed through
75     // FS segment register on x86 and GS segment register on x86_64.
76 #ifdef _WIN64
77     return reinterpret_cast<void*>(__readgsqword(offsetof(NT_TIB64, StackBase)));
78 #else
79     return reinterpret_cast<void*>(__readfsdword(offsetof(NT_TIB, StackBase)));
80 #endif
81 #else
82 #error Unsupported getStackStart on this platform.
83 #endif
84 }
85
86
87 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0;
88 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
89 SafePointBarrier* ThreadState::s_safePointBarrier = 0;
90 bool ThreadState::s_inGC = false;
91
92 static Mutex& threadAttachMutex()
93 {
94     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
95     return mutex;
96 }
97
98 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr_t*);
99 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegistersCallback);
100
101 class SafePointBarrier {
102 public:
103     SafePointBarrier() : m_canResume(1), m_unparkedThreadCount(0) { }
104     ~SafePointBarrier() { }
105
106     // Request other attached threads that are not at safe points to park themselves on safepoints.
107     void parkOthers()
108     {
109         ASSERT(ThreadState::current()->isAtSafePoint());
110
111         // Lock threadAttachMutex() to prevent threads from attaching.
112         threadAttachMutex().lock();
113
114         ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
115
116         MutexLocker locker(m_mutex);
117         atomicAdd(&m_unparkedThreadCount, threads.size());
118         atomicSetOneToZero(&m_canResume);
119
120         ThreadState* current = ThreadState::current();
121         for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
122             if (*it == current)
123                 continue;
124
125             const Vector<ThreadState::Interruptor*>& interruptors = (*it)->interruptors();
126             for (size_t i = 0; i < interruptors.size(); i++)
127                 interruptors[i]->requestInterrupt();
128         }
129
130         while (m_unparkedThreadCount > 0)
131             m_parked.wait(m_mutex);
132     }
133
134     void resumeOthers()
135     {
136         ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
137         atomicSubtract(&m_unparkedThreadCount, threads.size());
138         atomicTestAndSetToOne(&m_canResume);
139         {
140             // FIXME: Resumed threads will all contend for
141             // m_mutex just to unlock it later which is a waste of
142             // resources.
143             MutexLocker locker(m_mutex);
144             m_resume.broadcast();
145         }
146
147         ThreadState* current = ThreadState::current();
148         for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
149             if (*it == current)
150                 continue;
151
152             const Vector<ThreadState::Interruptor*>& interruptors = (*it)->interruptors();
153             for (size_t i = 0; i < interruptors.size(); i++)
154                 interruptors[i]->clearInterrupt();
155         }
156
157         threadAttachMutex().unlock();
158         ASSERT(ThreadState::current()->isAtSafePoint());
159     }
160
161     void doPark(ThreadState* state, intptr_t* stackEnd)
162     {
163         state->recordStackEnd(stackEnd);
164         MutexLocker locker(m_mutex);
165         if (!atomicDecrement(&m_unparkedThreadCount))
166             m_parked.signal();
167         while (!m_canResume)
168             m_resume.wait(m_mutex);
169         atomicIncrement(&m_unparkedThreadCount);
170     }
171
172     void checkAndPark(ThreadState* state)
173     {
174         ASSERT(!state->isSweepInProgress());
175         if (!m_canResume) {
176             pushAllRegisters(this, state, parkAfterPushRegisters);
177             state->performPendingSweep();
178         }
179     }
180
181     void doEnterSafePoint(ThreadState* state, intptr_t* stackEnd)
182     {
183         state->recordStackEnd(stackEnd);
184         // m_unparkedThreadCount tracks amount of unparked threads. It is
185         // positive if and only if we have requested other threads to park
186         // at safe-points in preparation for GC. The last thread to park
187         // itself will make the counter hit zero and should notify GC thread
188         // that it is safe to proceed.
189         // If no other thread is waiting for other threads to park then
190         // this counter can be negative: if N threads are at safe-points
191         // the counter will be -N.
192         if (!atomicDecrement(&m_unparkedThreadCount)) {
193             MutexLocker locker(m_mutex);
194             m_parked.signal(); // Safe point reached.
195         }
196         state->copyStackUntilSafePointScope();
197     }
198
199     void enterSafePoint(ThreadState* state)
200     {
201         ASSERT(!state->isSweepInProgress());
202         pushAllRegisters(this, state, enterSafePointAfterPushRegisters);
203     }
204
205     void leaveSafePoint(ThreadState* state)
206     {
207         if (atomicIncrement(&m_unparkedThreadCount) > 0)
208             checkAndPark(state);
209         state->performPendingSweep();
210     }
211
212 private:
213     static void parkAfterPushRegisters(SafePointBarrier* barrier, ThreadState* state, intptr_t* stackEnd)
214     {
215         barrier->doPark(state, stackEnd);
216     }
217
218     static void enterSafePointAfterPushRegisters(SafePointBarrier* barrier, ThreadState* state, intptr_t* stackEnd)
219     {
220         barrier->doEnterSafePoint(state, stackEnd);
221     }
222
223     volatile int m_canResume;
224     volatile int m_unparkedThreadCount;
225     Mutex m_mutex;
226     ThreadCondition m_parked;
227     ThreadCondition m_resume;
228 };
229
230 ThreadState::ThreadState()
231     : m_thread(currentThread())
232     , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
233     , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
234     , m_safePointScopeMarker(0)
235     , m_atSafePoint(false)
236     , m_interruptors()
237     , m_gcRequested(false)
238     , m_sweepRequested(0)
239     , m_sweepInProgress(false)
240     , m_noAllocationCount(0)
241     , m_inGC(false)
242     , m_heapContainsCache(new HeapContainsCache())
243 {
244     ASSERT(!**s_threadSpecific);
245     **s_threadSpecific = this;
246
247     m_persistents = new PersistentAnchor();
248     m_stats.clear();
249     m_statsAfterLastGC.clear();
250     // First allocate the general heap, second iterate through to
251     // allocate the type specific heaps
252     m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this);
253     for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++)
254         m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this);
255 }
256
257 ThreadState::~ThreadState()
258 {
259     checkThread();
260     for (int i = GeneralHeap; i < NumberOfHeaps; i++)
261         delete m_heaps[i];
262     delete m_persistents;
263     m_persistents = 0;
264     deleteAllValues(m_interruptors);
265     **s_threadSpecific = 0;
266 }
267
268 void ThreadState::init()
269 {
270     s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>();
271     s_safePointBarrier = new SafePointBarrier;
272     new(s_mainThreadStateStorage) ThreadState();
273     attachedThreads().add(mainThreadState());
274 }
275
276 void ThreadState::shutdown()
277 {
278     mainThreadState()->~ThreadState();
279 }
280
281 void ThreadState::attach()
282 {
283     MutexLocker locker(threadAttachMutex());
284     ThreadState* state = new ThreadState();
285     attachedThreads().add(state);
286 }
287
288 void ThreadState::detach()
289 {
290     ThreadState* state = current();
291     MutexLocker locker(threadAttachMutex());
292     attachedThreads().remove(state);
293     delete state;
294 }
295
296 void ThreadState::visitRoots(Visitor* visitor)
297 {
298     AttachedThreadStateSet& threads = attachedThreads();
299     for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
300         (*it)->trace(visitor);
301 }
302
303 NO_SANITIZE_ADDRESS
304 void ThreadState::visitStack(Visitor* visitor)
305 {
306     Address* end = reinterpret_cast<Address*>(m_startOfStack);
307     for (Address* current = reinterpret_cast<Address*>(m_endOfStack); current < end; ++current) {
308         Heap::checkAndMarkPointer(visitor, *current);
309     }
310
311     for (Vector<Address>::iterator it = m_safePointStackCopy.begin(); it != m_safePointStackCopy.end(); ++it)
312         Heap::checkAndMarkPointer(visitor, *it);
313 }
314
315 void ThreadState::visitPersistents(Visitor* visitor)
316 {
317     for (PersistentNode* current = m_persistents->m_next; current != m_persistents; current = current->m_next) {
318         current->trace(visitor);
319     }
320 }
321
322 void ThreadState::trace(Visitor* visitor)
323 {
324     if (m_stackState == HeapPointersOnStack)
325         visitStack(visitor);
326     visitPersistents(visitor);
327 }
328
329 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address)
330 {
331     BaseHeapPage* page = heapPageFromAddress(address);
332     if (page)
333         return page->checkAndMarkPointer(visitor, address);
334     // Not in heap pages, check large objects
335     for (int i = 0; i < NumberOfHeaps; i++) {
336         if (m_heaps[i]->checkAndMarkLargeHeapObject(visitor, address))
337             return true;
338     }
339     return false;
340 }
341
342 // Trigger garbage collection on a 50% increase in size, but not for
343 // less than 2 pages.
344 static bool increasedEnoughToGC(size_t newSize, size_t oldSize)
345 {
346     if (newSize < 2 * blinkPagePayloadSize())
347         return false;
348     return newSize > oldSize + (oldSize >> 1);
349 }
350
351 // FIXME: The heuristics are local for a thread at this
352 // point. Consider using heuristics that take memory for all threads
353 // into account.
354 bool ThreadState::shouldGC()
355 {
356     return increasedEnoughToGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
357 }
358
359 // Trigger conservative garbage collection on a 100% increase in size,
360 // but not for less than 2 pages.
361 static bool increasedEnoughToForceConservativeGC(size_t newSize, size_t oldSize)
362 {
363     if (newSize < 2 * blinkPagePayloadSize())
364         return false;
365     return newSize > 2 * oldSize;
366 }
367
368 // FIXME: The heuristics are local for a thread at this
369 // point. Consider using heuristics that take memory for all threads
370 // into account.
371 bool ThreadState::shouldForceConservativeGC()
372 {
373     return increasedEnoughToForceConservativeGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
374 }
375
376 bool ThreadState::sweepRequested()
377 {
378     ASSERT(isAnyThreadInGC() || checkThread());
379     return m_sweepRequested;
380 }
381
382 void ThreadState::setSweepRequested()
383 {
384     // Sweep requested is set from the thread that initiates garbage
385     // collection which could be different from the thread for this
386     // thread state. Therefore the setting of m_sweepRequested needs a
387     // barrier.
388     atomicTestAndSetToOne(&m_sweepRequested);
389 }
390
391 void ThreadState::clearSweepRequested()
392 {
393     checkThread();
394     m_sweepRequested = 0;
395 }
396
397 bool ThreadState::gcRequested()
398 {
399     checkThread();
400     return m_gcRequested;
401 }
402
403 void ThreadState::setGCRequested()
404 {
405     checkThread();
406     m_gcRequested = true;
407 }
408
409 void ThreadState::clearGCRequested()
410 {
411     checkThread();
412     m_gcRequested = false;
413 }
414
415 bool ThreadState::isConsistentForGC()
416 {
417     for (int i = 0; i < NumberOfHeaps; i++) {
418         if (!m_heaps[i]->isConsistentForGC())
419             return false;
420     }
421     return true;
422 }
423
424 void ThreadState::makeConsistentForGC()
425 {
426     for (int i = 0; i < NumberOfHeaps; i++)
427         m_heaps[i]->makeConsistentForGC();
428 }
429
430 void ThreadState::prepareForGC()
431 {
432     for (int i = 0; i < NumberOfHeaps; i++) {
433         BaseHeap* heap = m_heaps[i];
434         heap->makeConsistentForGC();
435         // If there are parked threads with outstanding sweep requests, clear their mark bits.
436         // This happens if a thread did not have time to wake up and sweep,
437         // before the next GC arrived.
438         if (sweepRequested())
439             heap->clearMarks();
440     }
441     setSweepRequested();
442 }
443
444 BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
445 {
446     BaseHeapPage* page;
447     bool found = heapContainsCache()->lookup(address, &page);
448     if (found)
449         return page;
450
451     for (int i = 0; i < NumberOfHeaps; i++) {
452         page = m_heaps[i]->heapPageFromAddress(address);
453 #ifndef NDEBUG
454         Address blinkPageAddr = roundToBlinkPageStart(address);
455 #endif
456         ASSERT(page == m_heaps[i]->heapPageFromAddress(blinkPageAddr));
457         ASSERT(page == m_heaps[i]->heapPageFromAddress(blinkPageAddr + blinkPageSize - 1));
458         if (page)
459             break;
460     }
461     heapContainsCache()->addEntry(address, page);
462     return page; // 0 if not found.
463 }
464
465 bool ThreadState::contains(Address address)
466 {
467     // Check heap contains cache first.
468     BaseHeapPage* page = heapPageFromAddress(address);
469     if (page)
470         return true;
471     // If no heap page was found check large objects.
472     for (int i = 0; i < NumberOfHeaps; i++) {
473         if (m_heaps[i]->largeHeapObjectFromAddress(address))
474             return true;
475     }
476     return false;
477 }
478
479 void ThreadState::getStats(HeapStats& stats)
480 {
481     stats = m_stats;
482 #ifndef NDEBUG
483     if (isConsistentForGC()) {
484         HeapStats scannedStats;
485         scannedStats.clear();
486         for (int i = 0; i < NumberOfHeaps; i++)
487             m_heaps[i]->getScannedStats(scannedStats);
488         ASSERT(scannedStats == stats);
489     }
490 #endif
491 }
492
493 void ThreadState::stopThreads()
494 {
495     s_safePointBarrier->parkOthers();
496 }
497
498 void ThreadState::resumeThreads()
499 {
500     s_safePointBarrier->resumeOthers();
501 }
502
503 void ThreadState::safePoint(StackState stackState)
504 {
505     checkThread();
506     if (stackState == NoHeapPointersOnStack && gcRequested())
507         Heap::collectGarbage(NoHeapPointersOnStack);
508     m_stackState = stackState;
509     s_safePointBarrier->checkAndPark(this);
510     m_stackState = HeapPointersOnStack;
511 }
512
513 void ThreadState::enterSafePoint(StackState stackState, void* scopeMarker)
514 {
515     ASSERT(stackState == NoHeapPointersOnStack || scopeMarker);
516     if (stackState == NoHeapPointersOnStack && gcRequested())
517         Heap::collectGarbage(NoHeapPointersOnStack);
518     checkThread();
519     ASSERT(!m_atSafePoint);
520     m_atSafePoint = true;
521     m_stackState = stackState;
522     m_safePointScopeMarker = scopeMarker;
523     s_safePointBarrier->enterSafePoint(this);
524 }
525
526 void ThreadState::leaveSafePoint()
527 {
528     checkThread();
529     ASSERT(m_atSafePoint);
530     m_atSafePoint = false;
531     m_stackState = HeapPointersOnStack;
532     clearSafePointScopeMarker();
533     s_safePointBarrier->leaveSafePoint(this);
534 }
535
536 void ThreadState::copyStackUntilSafePointScope()
537 {
538     if (!m_safePointScopeMarker || m_stackState == NoHeapPointersOnStack)
539         return;
540
541     Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker);
542     Address* from = reinterpret_cast<Address*>(m_endOfStack);
543     RELEASE_ASSERT(from < to);
544     RELEASE_ASSERT(to < reinterpret_cast<Address*>(m_startOfStack));
545     size_t slotCount = static_cast<size_t>(to - from);
546     ASSERT(slotCount < 1024); // Catch potential performance issues.
547
548     ASSERT(!m_safePointStackCopy.size());
549     m_safePointStackCopy.append(reinterpret_cast<Address*>(from), slotCount);
550 }
551
552 void ThreadState::performPendingSweep()
553 {
554     if (sweepRequested()) {
555         m_sweepInProgress = true;
556         {
557             // Diallow allocation during sweeping and finalization.
558             enterNoAllocationScope();
559             m_stats.clear(); // Sweeping will recalculate the stats
560             for (int i = 0; i < NumberOfHeaps; i++)
561                 m_heaps[i]->sweep();
562             leaveNoAllocationScope();
563         }
564         getStats(m_statsAfterLastGC);
565         m_sweepInProgress = false;
566         clearGCRequested();
567         clearSweepRequested();
568     }
569 }
570
571 void ThreadState::addInterruptor(Interruptor* interruptor)
572 {
573     SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting);
574
575     {
576         MutexLocker locker(threadAttachMutex());
577         m_interruptors.append(interruptor);
578     }
579 }
580
581 void ThreadState::removeInterruptor(Interruptor* interruptor)
582 {
583     SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting);
584
585     {
586         MutexLocker locker(threadAttachMutex());
587         size_t index = m_interruptors.find(interruptor);
588         RELEASE_ASSERT(index >= 0);
589         m_interruptors.remove(index);
590     }
591 }
592
593 void ThreadState::Interruptor::onInterrupted()
594 {
595     ThreadState* state = ThreadState::current();
596     ASSERT(state);
597     ASSERT(!state->isAtSafePoint());
598     state->safePoint(HeapPointersOnStack);
599 }
600
601 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads()
602 {
603     DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ());
604     return threads;
605 }
606
607 }