Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / platform / heap / ThreadState.cpp
index 6ddefb0..8d48c23 100644 (file)
 #include "config.h"
 #include "platform/heap/ThreadState.h"
 
+#include "platform/ScriptForbiddenScope.h"
 #include "platform/TraceEvent.h"
 #include "platform/heap/AddressSanitizer.h"
 #include "platform/heap/Handle.h"
 #include "platform/heap/Heap.h"
+#include "public/platform/Platform.h"
 #include "wtf/ThreadingPrimitives.h"
+#if ENABLE(GC_PROFILE_HEAP)
+#include "platform/TracedValue.h"
+#endif
 
 #if OS(WIN)
 #include <stddef.h>
 extern "C" void* __libc_stack_end;  // NOLINT
 #endif
 
-namespace WebCore {
+#if defined(MEMORY_SANITIZER)
+#include <sanitizer/msan_interface.h>
+#endif
+
+namespace blink {
 
 static void* getStackStart()
 {
@@ -99,7 +108,7 @@ static Mutex& threadAttachMutex()
 
 static double lockingTimeout()
 {
-    // Wait time for parking all threads is at most 500 MS.
+    // Wait time for parking all threads is at most 100 MS.
     return 0.100;
 }
 
@@ -181,12 +190,18 @@ public:
         ASSERT(ThreadState::current()->isAtSafePoint());
     }
 
-    void checkAndPark(ThreadState* state)
+    void checkAndPark(ThreadState* state, SafePointAwareMutexLocker* locker = 0)
     {
         ASSERT(!state->isSweepInProgress());
         if (!acquireLoad(&m_canResume)) {
+            // If we are leaving the safepoint from a SafePointAwareMutexLocker
+            // call out to release the lock before going to sleep. This enables the
+            // lock to be acquired in the sweep phase, e.g. during weak processing
+            // or finalization. The SafePointAwareLocker will reenter the safepoint
+            // and reacquire the lock after leaving this safepoint.
+            if (locker)
+                locker->reset();
             pushAllRegisters(this, state, parkAfterPushRegisters);
-            state->performPendingSweep();
         }
     }
 
@@ -196,10 +211,10 @@ public:
         pushAllRegisters(this, state, enterSafePointAfterPushRegisters);
     }
 
-    void leaveSafePoint(ThreadState* state)
+    void leaveSafePoint(ThreadState* state, SafePointAwareMutexLocker* locker = 0)
     {
         if (atomicIncrement(&m_unparkedThreadCount) > 0)
-            checkAndPark(state);
+            checkAndPark(state, locker);
     }
 
 private:
@@ -249,6 +264,17 @@ private:
     ThreadCondition m_resume;
 };
 
+
+BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state)
+    : m_storage(storage)
+    , m_gcInfo(gcInfo)
+    , m_threadState(state)
+    , m_terminating(false)
+    , m_tracedAfterOrphaned(false)
+{
+    ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
+}
+
 ThreadState::ThreadState()
     : m_thread(currentThread())
     , m_persistents(adoptPtr(new PersistentAnchor()))
@@ -264,7 +290,8 @@ ThreadState::ThreadState()
     , m_noAllocationCount(0)
     , m_inGC(false)
     , m_heapContainsCache(adoptPtr(new HeapContainsCache()))
-    , m_isCleaningUp(false)
+    , m_isTerminating(false)
+    , m_lowCollectionRate(false)
 #if defined(ADDRESS_SANITIZER)
     , m_asanFakeStack(__asan_get_current_fake_stack())
 #endif
@@ -276,9 +303,9 @@ ThreadState::ThreadState()
     m_statsAfterLastGC.clear();
     // First allocate the general heap, second iterate through to
     // allocate the type specific heaps
-    m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this);
+    m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this, GeneralHeap);
     for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++)
-        m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this);
+        m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this, i);
 
     CallbackStack::init(&m_weakCallbackStack);
 }
@@ -322,12 +349,14 @@ void ThreadState::detachMainThread()
     // threadAttachMutex and waiting for other threads to pause or reach a
     // safepoint.
     ThreadState* state = mainThreadState();
-    if (!state->isAtSafePoint())
-        state->enterSafePointWithoutPointers();
 
     {
-        MutexLocker locker(threadAttachMutex());
-        state->leaveSafePoint();
+        SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnStack);
+
+        // First add the main thread's heap pages to the orphaned pool.
+        state->cleanupPages();
+
+        // Second detach thread.
         ASSERT(attachedThreads().contains(state));
         attachedThreads().remove(state);
         state->~ThreadState();
@@ -355,53 +384,73 @@ void ThreadState::attach()
     attachedThreads().add(state);
 }
 
-void ThreadState::cleanup()
+void ThreadState::cleanupPages()
 {
-    // From here on ignore all conservatively discovered
-    // pointers into the heap owned by this thread.
-    m_isCleaningUp = true;
+    for (int i = GeneralHeap; i < NumberOfHeaps; ++i)
+        m_heaps[i]->cleanupPages();
+}
 
+void ThreadState::cleanup()
+{
     for (size_t i = 0; i < m_cleanupTasks.size(); i++)
         m_cleanupTasks[i]->preCleanup();
 
-    // After this GC we expect heap to be empty because
-    // preCleanup tasks should have cleared all persistent
-    // handles that were externally owned.
-    Heap::collectAllGarbage();
+    {
+        // Grab the threadAttachMutex to ensure only one thread can shutdown at
+        // a time and that no other thread can do a global GC. It also allows
+        // safe iteration of the attachedThreads set which happens as part of
+        // thread local GC asserts. We enter a safepoint while waiting for the
+        // lock to avoid a dead-lock where another thread has already requested
+        // GC.
+        SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnStack);
+
+        // From here on ignore all conservatively discovered
+        // pointers into the heap owned by this thread.
+        m_isTerminating = true;
+
+        // Set the terminate flag on all heap pages of this thread. This is used to
+        // ensure we don't trace pages on other threads that are not part of the
+        // thread local GC.
+        setupHeapsForTermination();
+
+        // Do thread local GC's as long as the count of thread local Persistents
+        // changes and is above zero.
+        PersistentAnchor* anchor = static_cast<PersistentAnchor*>(m_persistents.get());
+        int oldCount = -1;
+        int currentCount = anchor->numberOfPersistents();
+        ASSERT(currentCount >= 0);
+        while (currentCount != oldCount) {
+            Heap::collectGarbageForTerminatingThread(this);
+            oldCount = currentCount;
+            currentCount = anchor->numberOfPersistents();
+        }
+        // We should not have any persistents left when getting to this point,
+        // if we have it is probably a bug so adding a debug ASSERT to catch this.
+        ASSERT(!currentCount);
+
+        // Add pages to the orphaned page pool to ensure any global GCs from this point
+        // on will not trace objects on this thread's heaps.
+        cleanupPages();
 
-    // Verify that all heaps are empty now.
-    for (int i = 0; i < NumberOfHeaps; i++)
-        m_heaps[i]->assertEmpty();
+        ASSERT(attachedThreads().contains(this));
+        attachedThreads().remove(this);
+    }
 
     for (size_t i = 0; i < m_cleanupTasks.size(); i++)
         m_cleanupTasks[i]->postCleanup();
-
     m_cleanupTasks.clear();
 }
 
+
 void ThreadState::detach()
 {
     ThreadState* state = current();
     state->cleanup();
-
-    // Enter a safe point before trying to acquire threadAttachMutex
-    // to avoid dead lock if another thread is preparing for GC, has acquired
-    // threadAttachMutex and waiting for other threads to pause or reach a
-    // safepoint.
-    if (!state->isAtSafePoint())
-        state->enterSafePointWithoutPointers();
-
-    {
-        MutexLocker locker(threadAttachMutex());
-        state->leaveSafePoint();
-        ASSERT(attachedThreads().contains(state));
-        attachedThreads().remove(state);
-        delete state;
-    }
+    delete state;
     shutdownHeapIfNecessary();
 }
 
-void ThreadState::visitRoots(Visitor* visitor)
+void ThreadState::visitPersistentRoots(Visitor* visitor)
 {
     {
         // All threads are at safepoints so this is not strictly necessary.
@@ -413,7 +462,14 @@ void ThreadState::visitRoots(Visitor* visitor)
 
     AttachedThreadStateSet& threads = attachedThreads();
     for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
-        (*it)->trace(visitor);
+        (*it)->visitPersistents(visitor);
+}
+
+void ThreadState::visitStackRoots(Visitor* visitor)
+{
+    AttachedThreadStateSet& threads = attachedThreads();
+    for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
+        (*it)->visitStack(visitor);
 }
 
 NO_SANITIZE_ADDRESS
@@ -447,6 +503,9 @@ void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr)
 NO_SANITIZE_ADDRESS
 void ThreadState::visitStack(Visitor* visitor)
 {
+    if (m_stackState == NoHeapPointersOnStack)
+        return;
+
     Address* start = reinterpret_cast<Address*>(m_startOfStack);
     // If there is a safepoint scope marker we should stop the stack
     // scanning there to not touch active parts of the stack. Anything
@@ -462,13 +521,27 @@ void ThreadState::visitStack(Visitor* visitor)
     current = reinterpret_cast<Address*>(reinterpret_cast<intptr_t>(current) & ~(sizeof(Address) - 1));
 
     for (; current < start; ++current) {
-        Heap::checkAndMarkPointer(visitor, *current);
-        visitAsanFakeStackForPointer(visitor, *current);
+        Address ptr = *current;
+#if defined(MEMORY_SANITIZER)
+        // |ptr| may be uninitialized by design. Mark it as initialized to keep
+        // MSan from complaining.
+        // Note: it may be tempting to get rid of |ptr| and simply use |current|
+        // here, but that would be incorrect. We intentionally use a local
+        // variable because we don't want to unpoison the original stack.
+        __msan_unpoison(&ptr, sizeof(ptr));
+#endif
+        Heap::checkAndMarkPointer(visitor, ptr);
+        visitAsanFakeStackForPointer(visitor, ptr);
     }
 
     for (Vector<Address>::iterator it = m_safePointStackCopy.begin(); it != m_safePointStackCopy.end(); ++it) {
-        Heap::checkAndMarkPointer(visitor, *it);
-        visitAsanFakeStackForPointer(visitor, *it);
+        Address ptr = *it;
+#if defined(MEMORY_SANITIZER)
+        // See the comment above.
+        __msan_unpoison(&ptr, sizeof(ptr));
+#endif
+        Heap::checkAndMarkPointer(visitor, ptr);
+        visitAsanFakeStackForPointer(visitor, ptr);
     }
 }
 
@@ -477,17 +550,10 @@ void ThreadState::visitPersistents(Visitor* visitor)
     m_persistents->trace(visitor);
 }
 
-void ThreadState::trace(Visitor* visitor)
-{
-    if (m_stackState == HeapPointersOnStack)
-        visitStack(visitor);
-    visitPersistents(visitor);
-}
-
 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address)
 {
-    // If thread is cleaning up ignore conservative pointers.
-    if (m_isCleaningUp)
+    // If thread is terminating ignore conservative pointers.
+    if (m_isTerminating)
         return false;
 
     // This checks for normal pages and for large objects which span the extent
@@ -505,7 +571,7 @@ bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address)
     return false;
 }
 
-#if ENABLE(GC_TRACING)
+#if ENABLE(GC_PROFILE_MARKING)
 const GCInfo* ThreadState::findGCInfo(Address address)
 {
     BaseHeapPage* page = heapPageFromAddress(address);
@@ -516,6 +582,66 @@ const GCInfo* ThreadState::findGCInfo(Address address)
 }
 #endif
 
+#if ENABLE(GC_PROFILE_HEAP)
+size_t ThreadState::SnapshotInfo::getClassTag(const GCInfo* gcinfo)
+{
+    HashMap<const GCInfo*, size_t>::AddResult result = classTags.add(gcinfo, classTags.size());
+    if (result.isNewEntry) {
+        liveCount.append(0);
+        deadCount.append(0);
+        generations.append(Vector<int, 8>());
+        generations.last().fill(0, 8);
+    }
+    return result.storedValue->value;
+}
+
+void ThreadState::snapshot()
+{
+    SnapshotInfo info(this);
+    RefPtr<TracedValue> json = TracedValue::create();
+
+#define SNAPSHOT_HEAP(HeapType)                                         \
+    {                                                                   \
+        json->beginDictionary();                                        \
+        json->setString("name", #HeapType);                             \
+        m_heaps[HeapType##Heap]->snapshot(json.get(), &info);          \
+        json->endDictionary();                                          \
+    }
+    json->beginArray("heaps");
+    SNAPSHOT_HEAP(General);
+    FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP);
+    json->endArray();
+#undef SNAPSHOT_HEAP
+
+    json->setInteger("allocatedSpace", m_stats.totalAllocatedSpace());
+    json->setInteger("objectSpace", m_stats.totalObjectSpace());
+    json->setInteger("liveSize", info.liveSize);
+    json->setInteger("deadSize", info.deadSize);
+    json->setInteger("freeSize", info.freeSize);
+    json->setInteger("pageCount", info.freeSize);
+
+    Vector<String> classNameVector(info.classTags.size());
+    for (HashMap<const GCInfo*, size_t>::iterator it = info.classTags.begin(); it != info.classTags.end(); ++it)
+        classNameVector[it->value] = it->key->m_className;
+
+    json->beginArray("classes");
+    for (size_t i = 0; i < classNameVector.size(); ++i) {
+        json->beginDictionary();
+        json->setString("name", classNameVector[i]);
+        json->setInteger("liveCount", info.liveCount[i]);
+        json->setInteger("deadCount", info.deadCount[i]);
+        json->beginArray("generations");
+        for (size_t j = 0; j < heapObjectGenerations; ++j)
+            json->pushInteger(info.generations[i][j]);
+        json->endArray();
+        json->endDictionary();
+    }
+    json->endArray();
+
+    TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "ThreadState", this, json);
+}
+#endif
+
 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallback callback)
 {
     CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallbackStack);
@@ -524,7 +650,7 @@ void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac
 
 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor)
 {
-    return m_weakCallbackStack->popAndInvokeCallback(&m_weakCallbackStack, visitor);
+    return m_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&m_weakCallbackStack, visitor);
 }
 
 PersistentNode* ThreadState::globalRoots()
@@ -540,12 +666,13 @@ Mutex& ThreadState::globalRootsMutex()
 }
 
 // Trigger garbage collection on a 50% increase in size, but not for
-// less than 2 pages.
-static bool increasedEnoughToGC(size_t newSize, size_t oldSize)
+// less than 512kbytes.
+bool ThreadState::increasedEnoughToGC(size_t newSize, size_t oldSize)
 {
-    if (newSize < 2 * blinkPagePayloadSize())
+    if (newSize < 1 << 19)
         return false;
-    return newSize > oldSize + (oldSize >> 1);
+    size_t limit = oldSize + (oldSize >> 1);
+    return newSize > limit;
 }
 
 // FIXME: The heuristics are local for a thread at this
@@ -560,12 +687,14 @@ bool ThreadState::shouldGC()
 }
 
 // Trigger conservative garbage collection on a 100% increase in size,
-// but not for less than 2 pages.
-static bool increasedEnoughToForceConservativeGC(size_t newSize, size_t oldSize)
+// but not for less than 4Mbytes. If the system currently has a low
+// collection rate, then require a 300% increase in size.
+bool ThreadState::increasedEnoughToForceConservativeGC(size_t newSize, size_t oldSize)
 {
-    if (newSize < 2 * blinkPagePayloadSize())
+    if (newSize < 1 << 22)
         return false;
-    return newSize > 2 * oldSize;
+    size_t limit = (m_lowCollectionRate ? 4 : 2) * oldSize;
+    return newSize > limit;
 }
 
 // FIXME: The heuristics are local for a thread at this
@@ -662,19 +791,28 @@ void ThreadState::prepareForGC()
     for (int i = 0; i < NumberOfHeaps; i++) {
         BaseHeap* heap = m_heaps[i];
         heap->makeConsistentForGC();
-        // If there are parked threads with outstanding sweep requests, clear their mark bits.
-        // This happens if a thread did not have time to wake up and sweep,
-        // before the next GC arrived.
+        // If a new GC is requested before this thread got around to sweep, ie. due to the
+        // thread doing a long running operation, we clear the mark bits and mark any of
+        // the dead objects as dead. The latter is used to ensure the next GC marking does
+        // not trace already dead objects. If we trace a dead object we could end up tracing
+        // into garbage or the middle of another object via the newly conservatively found
+        // object.
         if (sweepRequested())
-            heap->clearMarks();
+            heap->clearLiveAndMarkDead();
     }
     setSweepRequested();
 }
 
+void ThreadState::setupHeapsForTermination()
+{
+    for (int i = 0; i < NumberOfHeaps; i++)
+        m_heaps[i]->prepareHeapForTermination();
+}
+
 BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
 {
     BaseHeapPage* cachedPage = heapContainsCache()->lookup(address);
-#ifdef NDEBUG
+#if !ENABLE(ASSERT)
     if (cachedPage)
         return cachedPage;
 #endif
@@ -701,7 +839,7 @@ BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
 void ThreadState::getStats(HeapStats& stats)
 {
     stats = m_stats;
-#ifndef NDEBUG
+#if ENABLE(ASSERT)
     if (isConsistentForGC()) {
         HeapStats scannedStats;
         scannedStats.clear();
@@ -726,9 +864,13 @@ void ThreadState::safePoint(StackState stackState)
 {
     checkThread();
     performPendingGC(stackState);
+    ASSERT(!m_atSafePoint);
     m_stackState = stackState;
+    m_atSafePoint = true;
     s_safePointBarrier->checkAndPark(this);
+    m_atSafePoint = false;
     m_stackState = HeapPointersOnStack;
+    performPendingSweep();
 }
 
 #ifdef ADDRESS_SANITIZER
@@ -772,11 +914,11 @@ void ThreadState::enterSafePoint(StackState stackState, void* scopeMarker)
     s_safePointBarrier->enterSafePoint(this);
 }
 
-void ThreadState::leaveSafePoint()
+void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker)
 {
     checkThread();
     ASSERT(m_atSafePoint);
-    s_safePointBarrier->leaveSafePoint(this);
+    s_safePointBarrier->leaveSafePoint(this, locker);
     m_atSafePoint = false;
     m_stackState = HeapPointersOnStack;
     clearSafePointScopeMarker();
@@ -804,30 +946,56 @@ void ThreadState::copyStackUntilSafePointScope()
 
 void ThreadState::performPendingSweep()
 {
-    TRACE_EVENT0("Blink", "ThreadState::performPendingSweep");
+    if (!sweepRequested())
+        return;
+
+#if ENABLE(GC_PROFILE_HEAP)
+    // We snapshot the heap prior to sweeping to get numbers for both resources
+    // that have been allocated since the last GC and for resources that are
+    // going to be freed.
+    bool gcTracingEnabled;
+    TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled);
+    if (gcTracingEnabled && m_stats.totalObjectSpace() > 0)
+        snapshot();
+#endif
+
+    TRACE_EVENT0("blink_gc", "ThreadState::performPendingSweep");
+
+    double timeStamp = WTF::currentTimeMS();
     const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
-    if (isMainThread())
-        TRACE_EVENT_SET_SAMPLING_STATE("Blink", "BlinkGCSweeping");
-
-    if (sweepRequested()) {
-        m_sweepInProgress = true;
-        // Disallow allocation during weak processing.
-        enterNoAllocationScope();
-        // Perform thread-specific weak processing.
-        while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { }
-        leaveNoAllocationScope();
-        // Perform sweeping and finalization.
-        m_stats.clear(); // Sweeping will recalculate the stats
-        for (int i = 0; i < NumberOfHeaps; i++)
-            m_heaps[i]->sweep();
-        getStats(m_statsAfterLastGC);
-        m_sweepInProgress = false;
-        clearGCRequested();
-        clearSweepRequested();
+    if (isMainThread()) {
+        ScriptForbiddenScope::enter();
+        TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping");
     }
 
-    if (isMainThread())
+    m_sweepInProgress = true;
+    // Disallow allocation during weak processing.
+    enterNoAllocationScope();
+    // Perform thread-specific weak processing.
+    while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { }
+    leaveNoAllocationScope();
+    // Perform sweeping and finalization.
+    size_t objectSpaceBeforeSweep = m_stats.totalObjectSpace();
+    m_stats.clear(); // Sweeping will recalculate the stats
+    for (int i = 0; i < NumberOfHeaps; i++)
+        m_heaps[i]->sweep();
+    getStats(m_statsAfterLastGC);
+    m_sweepInProgress = false;
+    clearGCRequested();
+    clearSweepRequested();
+    // If we collected less than 50% of objects, record that the
+    // collection rate is low which we use to determine when to
+    // perform the next GC.
+    setLowCollectionRate(m_stats.totalObjectSpace() > (objectSpaceBeforeSweep >> 1));
+
+    if (blink::Platform::current()) {
+        blink::Platform::current()->histogramCustomCounts("BlinkGC.PerformPendingSweep", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
+    }
+
+    if (isMainThread()) {
         TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
+        ScriptForbiddenScope::exit();
+    }
 }
 
 void ThreadState::addInterruptor(Interruptor* interruptor)
@@ -866,4 +1034,25 @@ ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads()
     return threads;
 }
 
+#if ENABLE(GC_PROFILE_MARKING)
+const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address)
+{
+    bool needLockForIteration = !isAnyThreadInGC();
+    if (needLockForIteration)
+        threadAttachMutex().lock();
+
+    ThreadState::AttachedThreadStateSet& threads = attachedThreads();
+    for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
+        if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) {
+            if (needLockForIteration)
+                threadAttachMutex().unlock();
+            return gcInfo;
+        }
+    }
+    if (needLockForIteration)
+        threadAttachMutex().unlock();
+    return 0;
+}
+#endif
+
 }