Upstream version 10.39.225.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Source / platform / heap / ThreadState.cpp
index 8d48c23..d3bc5cf 100644 (file)
 #include "platform/ScriptForbiddenScope.h"
 #include "platform/TraceEvent.h"
 #include "platform/heap/AddressSanitizer.h"
+#include "platform/heap/CallbackStack.h"
 #include "platform/heap/Handle.h"
 #include "platform/heap/Heap.h"
 #include "public/platform/Platform.h"
+#include "public/platform/WebThread.h"
 #include "wtf/ThreadingPrimitives.h"
 #if ENABLE(GC_PROFILE_HEAP)
 #include "platform/TracedValue.h"
@@ -94,6 +96,9 @@ static void* getStackStart()
 #endif
 }
 
+// The maximum number of WrapperPersistentRegions to keep around in the
+// m_pooledWrapperPersistentRegions pool.
+static const size_t MaxPooledWrapperPersistentRegionCount = 2;
 
 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0;
 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
@@ -264,7 +269,6 @@ private:
     ThreadCondition m_resume;
 };
 
-
 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state)
     : m_storage(storage)
     , m_gcInfo(gcInfo)
@@ -275,8 +279,25 @@ BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat
     ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
 }
 
+// Statically unfold the heap initialization loop so the compiler statically
+// knows the heap index when using HeapIndexTrait.
+template<int num> struct InitializeHeaps {
+    static const int index = num - 1;
+    static void init(BaseHeap** heaps, ThreadState* state)
+    {
+        InitializeHeaps<index>::init(heaps, state);
+        heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index);
+    }
+};
+template<> struct InitializeHeaps<0> {
+    static void init(BaseHeap** heaps, ThreadState* state) { }
+};
+
 ThreadState::ThreadState()
     : m_thread(currentThread())
+    , m_liveWrapperPersistents(new WrapperPersistentRegion())
+    , m_pooledWrapperPersistents(0)
+    , m_pooledWrapperPersistentRegionCount(0)
     , m_persistents(adoptPtr(new PersistentAnchor()))
     , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
     , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
@@ -292,6 +313,7 @@ ThreadState::ThreadState()
     , m_heapContainsCache(adoptPtr(new HeapContainsCache()))
     , m_isTerminating(false)
     , m_lowCollectionRate(false)
+    , m_numberOfSweeperTasks(0)
 #if defined(ADDRESS_SANITIZER)
     , m_asanFakeStack(__asan_get_current_fake_stack())
 #endif
@@ -299,24 +321,30 @@ ThreadState::ThreadState()
     ASSERT(!**s_threadSpecific);
     **s_threadSpecific = this;
 
-    m_stats.clear();
-    m_statsAfterLastGC.clear();
-    // First allocate the general heap, second iterate through to
-    // allocate the type specific heaps
-    m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this, GeneralHeap);
-    for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++)
-        m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this, i);
+    InitializeHeaps<NumberOfHeaps>::init(m_heaps, this);
+
+    m_weakCallbackStack = new CallbackStack();
 
-    CallbackStack::init(&m_weakCallbackStack);
+    if (blink::Platform::current())
+        m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Blink GC Sweeper"));
 }
 
 ThreadState::~ThreadState()
 {
     checkThread();
-    CallbackStack::shutdown(&m_weakCallbackStack);
-    for (int i = GeneralHeap; i < NumberOfHeaps; i++)
+    delete m_weakCallbackStack;
+    m_weakCallbackStack = 0;
+    for (int i = 0; i < NumberOfHeaps; i++)
         delete m_heaps[i];
     deleteAllValues(m_interruptors);
+    while (m_liveWrapperPersistents) {
+        WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m_liveWrapperPersistents);
+        delete region;
+    }
+    while (m_pooledWrapperPersistents) {
+        WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m_pooledWrapperPersistents);
+        delete region;
+    }
     **s_threadSpecific = 0;
 }
 
@@ -386,7 +414,7 @@ void ThreadState::attach()
 
 void ThreadState::cleanupPages()
 {
-    for (int i = GeneralHeap; i < NumberOfHeaps; ++i)
+    for (int i = 0; i < NumberOfHeaps; ++i)
         m_heaps[i]->cleanupPages();
 }
 
@@ -452,6 +480,7 @@ void ThreadState::detach()
 
 void ThreadState::visitPersistentRoots(Visitor* visitor)
 {
+    TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots");
     {
         // All threads are at safepoints so this is not strictly necessary.
         // However we acquire the mutex to make mutation and traversal of this
@@ -467,6 +496,7 @@ void ThreadState::visitPersistentRoots(Visitor* visitor)
 
 void ThreadState::visitStackRoots(Visitor* visitor)
 {
+    TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots");
     AttachedThreadStateSet& threads = attachedThreads();
     for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
         (*it)->visitStack(visitor);
@@ -548,6 +578,7 @@ void ThreadState::visitStack(Visitor* visitor)
 void ThreadState::visitPersistents(Visitor* visitor)
 {
     m_persistents->trace(visitor);
+    WrapperPersistentRegion::trace(m_liveWrapperPersistents, visitor);
 }
 
 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address)
@@ -589,6 +620,8 @@ size_t ThreadState::SnapshotInfo::getClassTag(const GCInfo* gcinfo)
     if (result.isNewEntry) {
         liveCount.append(0);
         deadCount.append(0);
+        liveSize.append(0);
+        deadSize.append(0);
         generations.append(Vector<int, 8>());
         generations.last().fill(0, 8);
     }
@@ -600,36 +633,46 @@ void ThreadState::snapshot()
     SnapshotInfo info(this);
     RefPtr<TracedValue> json = TracedValue::create();
 
-#define SNAPSHOT_HEAP(HeapType)                                         \
-    {                                                                   \
-        json->beginDictionary();                                        \
-        json->setString("name", #HeapType);                             \
-        m_heaps[HeapType##Heap]->snapshot(json.get(), &info);          \
-        json->endDictionary();                                          \
+#define SNAPSHOT_HEAP(HeapType)                                           \
+    {                                                                     \
+        json->beginDictionary();                                          \
+        json->setString("name", #HeapType);                               \
+        m_heaps[HeapType##Heap]->snapshot(json.get(), &info);             \
+        json->endDictionary();                                            \
+        json->beginDictionary();                                          \
+        json->setString("name", #HeapType"NonFinalized");                 \
+        m_heaps[HeapType##HeapNonFinalized]->snapshot(json.get(), &info); \
+        json->endDictionary();                                            \
     }
     json->beginArray("heaps");
     SNAPSHOT_HEAP(General);
+    SNAPSHOT_HEAP(CollectionBacking);
     FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP);
     json->endArray();
 #undef SNAPSHOT_HEAP
 
     json->setInteger("allocatedSpace", m_stats.totalAllocatedSpace());
     json->setInteger("objectSpace", m_stats.totalObjectSpace());
-    json->setInteger("liveSize", info.liveSize);
-    json->setInteger("deadSize", info.deadSize);
+    json->setInteger("pageCount", info.pageCount);
     json->setInteger("freeSize", info.freeSize);
-    json->setInteger("pageCount", info.freeSize);
 
     Vector<String> classNameVector(info.classTags.size());
     for (HashMap<const GCInfo*, size_t>::iterator it = info.classTags.begin(); it != info.classTags.end(); ++it)
         classNameVector[it->value] = it->key->m_className;
 
+    size_t liveSize = 0;
+    size_t deadSize = 0;
     json->beginArray("classes");
     for (size_t i = 0; i < classNameVector.size(); ++i) {
         json->beginDictionary();
         json->setString("name", classNameVector[i]);
         json->setInteger("liveCount", info.liveCount[i]);
         json->setInteger("deadCount", info.deadCount[i]);
+        json->setInteger("liveSize", info.liveSize[i]);
+        json->setInteger("deadSize", info.deadSize[i]);
+        liveSize += info.liveSize[i];
+        deadSize += info.deadSize[i];
+
         json->beginArray("generations");
         for (size_t j = 0; j < heapObjectGenerations; ++j)
             json->pushInteger(info.generations[i][j]);
@@ -637,20 +680,59 @@ void ThreadState::snapshot()
         json->endDictionary();
     }
     json->endArray();
+    json->setInteger("liveSize", liveSize);
+    json->setInteger("deadSize", deadSize);
 
-    TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "ThreadState", this, json);
+    TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "ThreadState", this, json.release());
 }
 #endif
 
 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallback callback)
 {
-    CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallbackStack);
+    CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry();
     *slot = CallbackStack::Item(object, callback);
 }
 
 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor)
 {
-    return m_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&m_weakCallbackStack, visitor);
+    // For weak processing we should never reach orphaned pages since orphaned
+    // pages are not traced and thus objects on those pages are never be
+    // registered as objects on orphaned pages. We cannot assert this here since
+    // we might have an off-heap collection. We assert it in
+    // Heap::pushWeakObjectPointerCallback.
+    if (CallbackStack::Item* item = m_weakCallbackStack->pop()) {
+        item->call(visitor);
+        return true;
+    }
+    return false;
+}
+
+WrapperPersistentRegion* ThreadState::takeWrapperPersistentRegion()
+{
+    WrapperPersistentRegion* region;
+    if (m_pooledWrapperPersistentRegionCount) {
+        region = WrapperPersistentRegion::removeHead(&m_pooledWrapperPersistents);
+        m_pooledWrapperPersistentRegionCount--;
+    } else {
+        region = new WrapperPersistentRegion();
+    }
+    ASSERT(region);
+    WrapperPersistentRegion::insertHead(&m_liveWrapperPersistents, region);
+    return region;
+}
+
+void ThreadState::freeWrapperPersistentRegion(WrapperPersistentRegion* region)
+{
+    if (!region->removeIfNotLast(&m_liveWrapperPersistents))
+        return;
+
+    // Region was removed, ie. it was not the last region in the list.
+    if (m_pooledWrapperPersistentRegionCount < MaxPooledWrapperPersistentRegionCount) {
+        WrapperPersistentRegion::insertHead(&m_pooledWrapperPersistents, region);
+        m_pooledWrapperPersistentRegionCount++;
+    } else {
+        delete region;
+    }
 }
 
 PersistentNode* ThreadState::globalRoots()
@@ -771,26 +853,28 @@ bool ThreadState::forcePreciseGCForTesting()
     return m_forcePreciseGCForTesting;
 }
 
-bool ThreadState::isConsistentForGC()
+void ThreadState::makeConsistentForSweeping()
+{
+    for (int i = 0; i < NumberOfHeaps; i++)
+        m_heaps[i]->makeConsistentForSweeping();
+}
+
+#if ENABLE(ASSERT)
+bool ThreadState::isConsistentForSweeping()
 {
     for (int i = 0; i < NumberOfHeaps; i++) {
-        if (!m_heaps[i]->isConsistentForGC())
+        if (!m_heaps[i]->isConsistentForSweeping())
             return false;
     }
     return true;
 }
-
-void ThreadState::makeConsistentForGC()
-{
-    for (int i = 0; i < NumberOfHeaps; i++)
-        m_heaps[i]->makeConsistentForGC();
-}
+#endif
 
 void ThreadState::prepareForGC()
 {
     for (int i = 0; i < NumberOfHeaps; i++) {
         BaseHeap* heap = m_heaps[i];
-        heap->makeConsistentForGC();
+        heap->makeConsistentForSweeping();
         // If a new GC is requested before this thread got around to sweep, ie. due to the
         // thread doing a long running operation, we clear the mark bits and mark any of
         // the dead objects as dead. The latter is used to ensure the next GC marking does
@@ -840,9 +924,8 @@ void ThreadState::getStats(HeapStats& stats)
 {
     stats = m_stats;
 #if ENABLE(ASSERT)
-    if (isConsistentForGC()) {
+    if (isConsistentForSweeping()) {
         HeapStats scannedStats;
-        scannedStats.clear();
         for (int i = 0; i < NumberOfHeaps; i++)
             m_heaps[i]->getScannedStats(scannedStats);
         ASSERT(scannedStats == stats);
@@ -935,7 +1018,14 @@ void ThreadState::copyStackUntilSafePointScope()
     RELEASE_ASSERT(from < to);
     RELEASE_ASSERT(to <= reinterpret_cast<Address*>(m_startOfStack));
     size_t slotCount = static_cast<size_t>(to - from);
-    ASSERT(slotCount < 1024); // Catch potential performance issues.
+    // Catch potential performance issues.
+#if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+    // ASan/LSan use more space on the stack and we therefore
+    // increase the allowed stack copying for those builds.
+    ASSERT(slotCount < 2048);
+#else
+    ASSERT(slotCount < 1024);
+#endif
 
     ASSERT(!m_safePointStackCopy.size());
     m_safePointStackCopy.resize(slotCount);
@@ -944,6 +1034,51 @@ void ThreadState::copyStackUntilSafePointScope()
     }
 }
 
+void ThreadState::registerSweepingTask()
+{
+    MutexLocker locker(m_sweepMutex);
+    ++m_numberOfSweeperTasks;
+}
+
+void ThreadState::unregisterSweepingTask()
+{
+    MutexLocker locker(m_sweepMutex);
+    ASSERT(m_numberOfSweeperTasks > 0);
+    if (!--m_numberOfSweeperTasks)
+        m_sweepThreadCondition.signal();
+}
+
+void ThreadState::waitUntilSweepersDone()
+{
+    MutexLocker locker(m_sweepMutex);
+    while (m_numberOfSweeperTasks > 0)
+        m_sweepThreadCondition.wait(m_sweepMutex);
+}
+
+
+class SweepNonFinalizedHeapTask FINAL : public WebThread::Task {
+public:
+    SweepNonFinalizedHeapTask(ThreadState* state, BaseHeap* heap, HeapStats* stats)
+        : m_threadState(state)
+        , m_heap(heap)
+        , m_stats(stats)
+    {
+        m_threadState->registerSweepingTask();
+    }
+
+    virtual ~SweepNonFinalizedHeapTask()
+    {
+        m_threadState->unregisterSweepingTask();
+    }
+
+    virtual void run() { m_heap->sweep(m_stats); }
+
+private:
+    ThreadState* m_threadState;
+    BaseHeap* m_heap;
+    HeapStats* m_stats;
+};
+
 void ThreadState::performPendingSweep()
 {
     if (!sweepRequested())
@@ -968,19 +1103,97 @@ void ThreadState::performPendingSweep()
         TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping");
     }
 
-    m_sweepInProgress = true;
-    // Disallow allocation during weak processing.
-    enterNoAllocationScope();
-    // Perform thread-specific weak processing.
-    while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { }
-    leaveNoAllocationScope();
-    // Perform sweeping and finalization.
     size_t objectSpaceBeforeSweep = m_stats.totalObjectSpace();
-    m_stats.clear(); // Sweeping will recalculate the stats
-    for (int i = 0; i < NumberOfHeaps; i++)
-        m_heaps[i]->sweep();
-    getStats(m_statsAfterLastGC);
-    m_sweepInProgress = false;
+    {
+        NoSweepScope scope(this);
+
+        // Disallow allocation during weak processing.
+        enterNoAllocationScope();
+        {
+            TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing");
+            // Perform thread-specific weak processing.
+            while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { }
+        }
+        leaveNoAllocationScope();
+
+        // Perform sweeping and finalization.
+
+        // Sweeping will recalculate the stats
+        m_stats.clear();
+
+        // Sweep the non-finalized heap pages on multiple threads.
+        // Attempt to load-balance by having the sweeper thread sweep as
+        // close to half of the pages as possible.
+        int nonFinalizedPages = 0;
+        for (int i = 0; i < NumberOfNonFinalizedHeaps; i++)
+            nonFinalizedPages += m_heaps[FirstNonFinalizedHeap + i]->normalPageCount();
+
+        int finalizedPages = 0;
+        for (int i = 0; i < NumberOfFinalizedHeaps; i++)
+            finalizedPages += m_heaps[FirstFinalizedHeap + i]->normalPageCount();
+
+        int pagesToSweepInParallel = nonFinalizedPages < finalizedPages ? nonFinalizedPages : ((nonFinalizedPages + finalizedPages) / 2);
+
+        // Start the sweeper thread for the non finalized heaps. No
+        // finalizers need to run and therefore the pages can be
+        // swept on other threads.
+        static const int minNumberOfPagesForParallelSweep = 10;
+        HeapStats heapStatsVector[NumberOfNonFinalizedHeaps];
+        BaseHeap* splitOffHeaps[NumberOfNonFinalizedHeaps] = { 0 };
+        for (int i = 0; i < NumberOfNonFinalizedHeaps && pagesToSweepInParallel > 0; i++) {
+            BaseHeap* heap = m_heaps[FirstNonFinalizedHeap + i];
+            int pageCount = heap->normalPageCount();
+            // Only use the sweeper thread if it exists and there are
+            // pages to sweep.
+            if (m_sweeperThread && pageCount > minNumberOfPagesForParallelSweep) {
+                // Create a new thread heap instance to make sure that the
+                // state modified while sweeping is separate for the
+                // sweeper thread and the owner thread.
+                int pagesToSplitOff = std::min(pageCount, pagesToSweepInParallel);
+                pagesToSweepInParallel -= pagesToSplitOff;
+                BaseHeap* splitOff = heap->split(pagesToSplitOff);
+                splitOffHeaps[i] = splitOff;
+                HeapStats* stats = &heapStatsVector[i];
+                m_sweeperThread->postTask(new SweepNonFinalizedHeapTask(this, splitOff, stats));
+            }
+        }
+
+        {
+            // Sweep the remainder of the non-finalized pages (or all of them
+            // if there is no sweeper thread).
+            TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps");
+            for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) {
+                HeapStats stats;
+                m_heaps[FirstNonFinalizedHeap + i]->sweep(&stats);
+                m_stats.add(&stats);
+            }
+        }
+
+        {
+            // Sweep the finalized pages.
+            TRACE_EVENT0("blink_gc", "ThreadState::sweepFinalizedHeaps");
+            for (int i = 0; i < NumberOfFinalizedHeaps; i++) {
+                HeapStats stats;
+                m_heaps[FirstFinalizedHeap + i]->sweep(&stats);
+                m_stats.add(&stats);
+            }
+        }
+
+        // Wait for the sweeper threads and update the heap stats with the
+        // stats for the heap portions swept by those threads.
+        waitUntilSweepersDone();
+        for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) {
+            m_stats.add(&heapStatsVector[i]);
+            if (BaseHeap* splitOff = splitOffHeaps[i])
+                m_heaps[FirstNonFinalizedHeap + i]->merge(splitOff);
+        }
+
+        for (int i = 0; i < NumberOfHeaps; i++)
+            m_heaps[i]->postSweepProcessing();
+
+        getStats(m_statsAfterLastGC);
+
+    } // End NoSweepScope
     clearGCRequested();
     clearSweepRequested();
     // If we collected less than 50% of objects, record that the