"trace progress of the incremental marking")
DEFINE_bool(track_gc_object_stats, false,
"track object counts and memory usage")
+DEFINE_bool(parallel_sweeping, false, "enable parallel sweeping")
+DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
+DEFINE_int(sweeper_threads, 1,
+ "number of parallel and concurrent sweeping threads")
#ifdef VERIFY_HEAP
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
#endif
incremental_marking()->PrepareForScavenge();
- AdvanceSweepers(static_cast<int>(new_space_.Size()));
+ paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
+ paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
// 3. many lazy sweep steps.
// Use mark-sweep-compact events to count incremental GCs in a round.
-
if (incremental_marking()->IsStopped()) {
- if (!IsSweepingComplete() &&
+ if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
+ !IsSweepingComplete() &&
!AdvanceSweepers(static_cast<int>(step_size))) {
return false;
}
}
bool AdvanceSweepers(int step_size) {
+ ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
return sweeping_complete;
#include "simulator.h"
#include "spaces.h"
#include "stub-cache.h"
+#include "sweeper-thread.h"
#include "version.h"
#include "vm-state-inl.h"
#undef ISOLATE_INIT_ARRAY_EXECUTE
}
+
void Isolate::TearDown() {
TRACE_ISOLATE(tear_down);
if (state_ == INITIALIZED) {
TRACE_ISOLATE(deinit);
+ if (FLAG_concurrent_sweeping || FLAG_parallel_sweeping) {
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ sweeper_thread_[i]->Stop();
+ delete sweeper_thread_[i];
+ }
+ delete[] sweeper_thread_;
+ }
+
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
}
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
+
+ if (FLAG_parallel_sweeping || FLAG_concurrent_sweeping) {
+ if (FLAG_sweeper_threads < 1) {
+ FLAG_sweeper_threads = 1;
+ }
+ sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ sweeper_thread_[i] = new SweeperThread(this);
+ sweeper_thread_[i]->Start();
+ }
+ }
return true;
}
class ConsStringIteratorOp;
class StringTracker;
class StubCache;
+class SweeperThread;
class ThreadManager;
class ThreadState;
class ThreadVisitor; // Defined in v8threads.h
// TODO(svenpanne) This method is on death row...
static v8::Isolate* GetDefaultIsolateForLocking();
+ SweeperThread** sweeper_threads() {
+ return sweeper_thread_;
+ }
+
private:
Isolate();
DeferredHandles* deferred_handles_head_;
OptimizingCompilerThread optimizing_compiler_thread_;
+ SweeperThread** sweeper_thread_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
friend class OptimizingCompilerThread;
+ friend class SweeperThread;
friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
#include "objects-visiting.h"
#include "objects-visiting-inl.h"
#include "stub-cache.h"
+#include "sweeper-thread.h"
namespace v8 {
namespace internal {
}
+void MarkCompactCollector::StartSweeperThreads() {
+ SweeperThread::set_sweeping_pending(true);
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ heap()->isolate()->sweeper_threads()[i]->StartSweeping();
+ }
+}
+
+
+void MarkCompactCollector::WaitUntilSweepingCompleted() {
+ if (SweeperThread::sweeping_pending()) {
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread();
+ }
+ SweeperThread::set_sweeping_pending(false);
+ StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
+ StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
+ heap()->FreeQueuedChunks();
+ }
+}
+
+
+intptr_t MarkCompactCollector::
+ StealMemoryFromSweeperThreads(PagedSpace* space) {
+ intptr_t freed_bytes = 0;
+ for (int i = 0; i < FLAG_sweeper_threads; i++) {
+ freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space);
+ }
+ return freed_bytes;
+}
+
+
+bool MarkCompactCollector::AreSweeperThreadsActivated() {
+ return heap()->isolate()->sweeper_threads() != NULL;
+}
+
+
bool Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) ==
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
+ if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) {
+ // Instead of waiting we could also abort the sweeper threads here.
+ WaitUntilSweepingCompleted();
+ }
+
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && abort_incremental_marking_) {
heap()->incremental_marking()->Abort();
switch (space->identity()) {
case OLD_DATA_SPACE:
- SweepConservatively(space, p);
+ SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
break;
case OLD_POINTER_SPACE:
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
}
+template<MarkCompactCollector::SweepingParallelism mode>
+static intptr_t Free(PagedSpace* space,
+ FreeList* free_list,
+ Address start,
+ int size) {
+ if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
+ return space->Free(start, size);
+ } else {
+ return size - free_list->Free(start, size);
+ }
+}
+
+
// Sweeps a space conservatively. After this has been done the larger free
// spaces have been put on the free list and the smaller ones have been
// ignored and left untouched. A free space is always either ignored or put
// because it means that any FreeSpace maps left actually describe a region of
// memory that can be ignored when scanning. Dead objects other than free
// spaces will not contain the free space map.
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+template<MarkCompactCollector::SweepingParallelism mode>
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
+ FreeList* free_list,
+ Page* p) {
ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
- double start_time = 0.0;
- if (FLAG_print_cumulative_gc_stat) {
- start_time = OS::TimeCurrentMillis();
- }
+ ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
+ free_list != NULL) ||
+ (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
+ free_list == NULL));
MarkBit::CellType* cells = p->markbits()->cells();
p->MarkSweptConservatively();
}
size_t size = block_address - p->area_start();
if (cell_index == last_cell_index) {
- freed_bytes += static_cast<int>(space->Free(p->area_start(),
- static_cast<int>(size)));
+ freed_bytes += Free<mode>(space, free_list, p->area_start(), size);
ASSERT_EQ(0, p->LiveBytes());
return freed_bytes;
}
Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
// Free the first free space.
size = free_end - p->area_start();
- freed_bytes += space->Free(p->area_start(),
- static_cast<int>(size));
+ freed_bytes += Free<mode>(space, free_list, p->area_start(), size);
+
// The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and
// the marking bitmap for that cell, which describes where the live object
for ( ;
cell_index < last_cell_index;
cell_index++, block_address += 32 * kPointerSize) {
- ASSERT(static_cast<unsigned>(cell_index) ==
- Bitmap::IndexToCell(
- Bitmap::CellAlignIndex(
- p->AddressToMarkbitIndex(block_address))));
+ ASSERT((unsigned)cell_index ==
+ Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(
+ p->AddressToMarkbitIndex(block_address))));
uint32_t cell = cells[cell_index];
if (cell != 0) {
// We have a live object. Check approximately whether it is more than 32
// so now we need to find the start of the first live object at the
// end of the free space.
free_end = StartOfLiveObject(block_address, cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(free_end - free_start));
+ freed_bytes += Free<mode>(space, free_list, free_start,
+ static_cast<int>(free_end - free_start));
}
}
// Update our undigested record of where the current free area started.
// Handle the free space at the end of the page.
if (block_address - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell);
- freed_bytes += space->Free(free_start,
- static_cast<int>(block_address - free_start));
+ freed_bytes += Free<mode>(space, free_list, free_start,
+ static_cast<int>(block_address - free_start));
}
p->ResetLiveBytes();
+ return freed_bytes;
+}
- if (FLAG_print_cumulative_gc_stat) {
- space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
+
+void MarkCompactCollector::SweepInParallel(PagedSpace* space,
+ FreeList* private_free_list,
+ FreeList* free_list) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+
+ if (p->TryParallelSweeping()) {
+ SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
+ free_list->Concatenate(private_free_list);
+ }
}
- return freed_bytes;
}
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
sweeper == LAZY_CONSERVATIVE);
-
space->ClearStats();
PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
+ ASSERT(p->parallel_sweeping() == 0);
// Clear sweeping flags indicating that marking bits are still intact.
p->ClearSweptPrecisely();
p->ClearSweptConservatively();
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p));
}
- SweepConservatively(space, p);
+ SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
pages_swept++;
break;
}
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
reinterpret_cast<intptr_t>(p));
}
- freed_bytes += SweepConservatively(space, p);
+ freed_bytes += SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
pages_swept++;
space->SetPagesToSweep(p->next_page());
lazy_sweeping_active = true;
break;
}
+ case PARALLEL_CONSERVATIVE: {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ p->set_parallel_sweeping(1);
+ break;
+ }
case PRECISE: {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
if (sweep_precisely_) how_to_sweep = PRECISE;
+ if (AreSweeperThreadsActivated()) how_to_sweep = PARALLEL_CONSERVATIVE;
// Noncompacting collections simply sweep the spaces to clear the mark
// bits and free the nonlive blocks (for old and map spaces). We sweep
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
+
SweepSpace(heap()->old_pointer_space(), how_to_sweep);
SweepSpace(heap()->old_data_space(), how_to_sweep);
EvacuateNewSpaceAndCandidates();
+ if (AreSweeperThreadsActivated()) {
+ // TODO(hpayer): The starting of the sweeper threads should be after
+ // SweepSpace old data space.
+ StartSweeperThreads();
+ if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) {
+ WaitUntilSweepingCompleted();
+ }
+ }
+
// ClearNonLiveTransitions depends on precise sweeping of map space to
// detect whether unmarked map became dead in this collection or in one
// of the previous ones.
enum SweeperType {
CONSERVATIVE,
LAZY_CONSERVATIVE,
+ PARALLEL_CONSERVATIVE,
PRECISE
};
+ enum SweepingParallelism {
+ SWEEP_SEQUENTIALLY,
+ SWEEP_IN_PARALLEL
+ };
+
#ifdef VERIFY_HEAP
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
// Sweep a single page from the given space conservatively.
// Return a number of reclaimed bytes.
- static intptr_t SweepConservatively(PagedSpace* space, Page* p);
+ template<SweepingParallelism type>
+ static intptr_t SweepConservatively(PagedSpace* space,
+ FreeList* free_list,
+ Page* p);
INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
return Page::FromAddress(reinterpret_cast<Address>(anchor))->
MarkingParity marking_parity() { return marking_parity_; }
+ void SweepInParallel(PagedSpace* space,
+ FreeList* private_free_list,
+ FreeList* free_list);
+
+ void WaitUntilSweepingCompleted();
+
+ intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
+
+ bool AreSweeperThreadsActivated();
+
private:
MarkCompactCollector();
~MarkCompactCollector();
void RemoveDeadInvalidatedCode();
void ProcessInvalidatedCode(ObjectVisitor* visitor);
+ void StartSweeperThreads();
#ifdef DEBUG
enum CollectorState {
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
+ chunk->parallel_sweeping_ = 0;
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
}
+intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
+ intptr_t free_bytes = 0;
+ if (category->top_ != NULL) {
+ ASSERT(category->end_ != NULL);
+ // This is safe (not going to deadlock) since Concatenate operations
+ // are never performed on the same free lists at the same time in
+ // reverse order.
+ ScopedLock lock_target(mutex_);
+ ScopedLock lock_source(category->mutex());
+ free_bytes = category->available();
+ if (end_ == NULL) {
+ end_ = category->end();
+ } else {
+ category->end()->set_next(top_);
+ }
+ top_ = category->top();
+ available_ += category->available();
+ category->Reset();
+ }
+ return free_bytes;
+}
+
+
void FreeListCategory::Reset() {
top_ = NULL;
end_ = NULL;
}
+intptr_t FreeList::Concatenate(FreeList* free_list) {
+ intptr_t free_bytes = 0;
+ free_bytes += small_list_.Concatenate(free_list->small_list());
+ free_bytes += medium_list_.Concatenate(free_list->medium_list());
+ free_bytes += large_list_.Concatenate(free_list->large_list());
+ free_bytes += huge_list_.Concatenate(free_list->huge_list());
+ return free_bytes;
+}
+
+
void FreeList::Reset() {
small_list_.Reset();
medium_list_.Reset();
reinterpret_cast<intptr_t>(p));
}
DecreaseUnsweptFreeBytes(p);
- freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
+ freed_bytes +=
+ MarkCompactCollector::
+ SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
+ this, NULL, p);
}
p = next_page;
} while (p != anchor() && freed_bytes < bytes_to_sweep);
}
+bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->AreSweeperThreadsActivated()) {
+ if (FLAG_concurrent_sweeping &&
+ collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
+ collector->WaitUntilSweepingCompleted();
+ return true;
+ }
+ return false;
+ } else {
+ return AdvanceSweeper(size_in_bytes);
+ }
+}
+
+
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
bool sweeping_complete = false;
for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
- sweeping_complete = AdvanceSweeper(size_in_bytes);
+ sweeping_complete = EnsureSweeperProgress(size_in_bytes);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
// Last ditch, sweep all the remaining pages to try to find space. This may
// cause a pause.
if (!IsSweepingComplete()) {
- AdvanceSweeper(kMaxInt);
+ EnsureSweeperProgress(kMaxInt);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
// Return all current flags.
intptr_t GetFlags() { return flags_; }
+ intptr_t parallel_sweeping() const {
+ return parallel_sweeping_;
+ }
+
+ void set_parallel_sweeping(intptr_t state) {
+ parallel_sweeping_ = state;
+ }
+
+ bool TryParallelSweeping() {
+ return NoBarrier_CompareAndSwap(¶llel_sweeping_, 1, 0) == 1;
+ }
+
// Manage live byte count (count of bytes known to be live,
// because they are marked black).
void ResetLiveBytes() {
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize + kPointerSize;
- static const size_t kHeaderSize =
- kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize;
+ static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
+ kIntSize + kIntSize + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
+ intptr_t parallel_sweeping_;
+
static MemoryChunk* Initialize(Heap* heap,
Address base,
size_t size,
// the end element of the linked list of free memory blocks.
class FreeListCategory {
public:
- FreeListCategory() : top_(NULL), end_(NULL), available_(0) {}
+ FreeListCategory() :
+ top_(NULL),
+ end_(NULL),
+ mutex_(OS::CreateMutex()),
+ available_(0) {}
+
+ ~FreeListCategory() {
+ delete mutex_;
+ }
+
+ intptr_t Concatenate(FreeListCategory* category);
void Reset();
int available() const { return available_; }
void set_available(int available) { available_ = available; }
+ Mutex* mutex() { return mutex_; }
+
#ifdef DEBUG
intptr_t SumFreeList();
int FreeListLength();
private:
FreeListNode* top_;
FreeListNode* end_;
+ Mutex* mutex_;
// Total available bytes in all blocks of this free list category.
int available_;
public:
explicit FreeList(PagedSpace* owner);
+ intptr_t Concatenate(FreeList* free_list);
+
// Clear the free list.
void Reset();
intptr_t EvictFreeListItems(Page* p);
+ FreeListCategory* small_list() { return &small_list_; }
+ FreeListCategory* medium_list() { return &medium_list_; }
+ FreeListCategory* large_list() { return &large_list_; }
+ FreeListCategory* huge_list() { return &huge_list_; }
+
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
bool AdvanceSweeper(intptr_t bytes_to_sweep);
+ // When parallel sweeper threads are active this function waits
+ // for them to complete, otherwise AdvanceSweeper with size_in_bytes
+ // is called.
+ bool EnsureSweeperProgress(intptr_t size_in_bytes);
+
bool IsSweepingComplete() {
return !first_unswept_page_->is_valid();
}
}
protected:
+ FreeList* free_list() { return &free_list_; }
+
+ void AddToAccountingStats(intptr_t bytes) {
+ accounting_stats_.DeallocateBytes(bytes);
+ }
+
int area_size_;
// Maximum capacity of this space.
MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
+ friend class SweeperThread;
};
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "sweeper-thread.h"
+
+#include "v8.h"
+
+#include "isolate.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+SweeperThread::SweeperThread(Isolate* isolate)
+ : Thread("SweeperThread"),
+ isolate_(isolate),
+ heap_(isolate->heap()),
+ collector_(heap_->mark_compact_collector()),
+ start_sweeping_semaphore_(OS::CreateSemaphore(0)),
+ end_sweeping_semaphore_(OS::CreateSemaphore(0)),
+ stop_semaphore_(OS::CreateSemaphore(0)),
+ free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+ free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
+ private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+ private_free_list_old_pointer_space_(
+ heap_->paged_space(OLD_POINTER_SPACE)) {
+ NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+}
+
+
+bool SweeperThread::sweeping_pending_ = false;
+
+
+void SweeperThread::Run() {
+ Isolate::SetIsolateThreadLocals(isolate_, NULL);
+ while (true) {
+ start_sweeping_semaphore_->Wait();
+
+ if (Acquire_Load(&stop_thread_)) {
+ stop_semaphore_->Signal();
+ return;
+ }
+
+ collector_->SweepInParallel(heap_->old_data_space(),
+ &private_free_list_old_data_space_,
+ &free_list_old_data_space_);
+ collector_->SweepInParallel(heap_->old_pointer_space(),
+ &private_free_list_old_pointer_space_,
+ &free_list_old_pointer_space_);
+ end_sweeping_semaphore_->Signal();
+ }
+}
+
+intptr_t SweeperThread::StealMemory(PagedSpace* space) {
+ intptr_t free_bytes = 0;
+ if (space->identity() == OLD_POINTER_SPACE) {
+ free_bytes = space->free_list()->Concatenate(&free_list_old_pointer_space_);
+ space->AddToAccountingStats(free_bytes);
+ } else if (space->identity() == OLD_DATA_SPACE) {
+ free_bytes = space->free_list()->Concatenate(&free_list_old_data_space_);
+ space->AddToAccountingStats(free_bytes);
+ }
+ return free_bytes;
+}
+
+void SweeperThread::Stop() {
+ Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+ start_sweeping_semaphore_->Signal();
+ stop_semaphore_->Wait();
+}
+
+
+void SweeperThread::StartSweeping() {
+ start_sweeping_semaphore_->Signal();
+}
+
+
+void SweeperThread::WaitForSweeperThread() {
+ end_sweeping_semaphore_->Wait();
+}
+} } // namespace v8::internal
--- /dev/null
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SWEEPER_THREAD_H_
+#define V8_SWEEPER_THREAD_H_
+
+#include "atomicops.h"
+#include "flags.h"
+#include "platform.h"
+#include "v8utils.h"
+
+#include "spaces.h"
+
+#include "heap.h"
+
+namespace v8 {
+namespace internal {
+
+class SweeperThread : public Thread {
+ public:
+ explicit SweeperThread(Isolate* isolate);
+
+ void Run();
+ void Stop();
+ void StartSweeping();
+ void WaitForSweeperThread();
+ intptr_t StealMemory(PagedSpace* space);
+
+ static bool sweeping_pending() { return sweeping_pending_; }
+ static void set_sweeping_pending(bool sweeping_pending) {
+ sweeping_pending_ = sweeping_pending;
+ }
+
+ ~SweeperThread() {
+ delete start_sweeping_semaphore_;
+ delete end_sweeping_semaphore_;
+ delete stop_semaphore_;
+ }
+
+ private:
+ Isolate* isolate_;
+ Heap* heap_;
+ MarkCompactCollector* collector_;
+ Semaphore* start_sweeping_semaphore_;
+ Semaphore* end_sweeping_semaphore_;
+ Semaphore* stop_semaphore_;
+ FreeList free_list_old_data_space_;
+ FreeList free_list_old_pointer_space_;
+ FreeList private_free_list_old_data_space_;
+ FreeList private_free_list_old_pointer_space_;
+ volatile AtomicWord stop_thread_;
+ static bool sweeping_pending_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_SWEEPER_THREAD_H_
'../../src/strtod.h',
'../../src/stub-cache.cc',
'../../src/stub-cache.h',
+ '../../src/sweeper-thread.h',
+ '../../src/sweeper-thread.cc',
'../../src/token.cc',
'../../src/token.h',
'../../src/transitions-inl.h',