incremental_marking()->PrepareForScavenge();
- paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
- paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
-
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_.Flip();
return &incremental_marking_;
}
- bool EnsureSweepersProgressed(int step_size) {
- bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size);
- sweeping_complete &= old_pointer_space()->EnsureSweeperProgress(step_size);
- return sweeping_complete;
- }
-
ExternalStringTable* external_string_table() {
return &external_string_table_;
}
}
if (state_ == SWEEPING) {
- if (heap_->EnsureSweepersProgressed(static_cast<int>(bytes_to_process))) {
+ if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
bytes_scanned_ = 0;
StartMarking(PREVENT_COMPACTION);
}
}
ParallelSweepSpacesComplete();
sweeping_pending_ = false;
- RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE));
- RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE));
+ RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
+ RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
}
-intptr_t MarkCompactCollector::RefillFreeLists(PagedSpace* space) {
+void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
FreeList* free_list;
if (space == heap()->old_pointer_space()) {
} else {
// Any PagedSpace might invoke RefillFreeLists, so we need to make sure
// to only refill them for old data and pointer spaces.
- return 0;
+ return;
}
intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
space->AddToAccountingStats(freed_bytes);
space->DecrementUnsweptFreeBytes(freed_bytes);
- return freed_bytes;
}
void WaitUntilSweepingCompleted();
- intptr_t RefillFreeLists(PagedSpace* space);
+ void RefillFreeList(PagedSpace* space);
bool AreSweeperThreadsActivated();
void MarkWeakObjectToCodeTable();
// Special case for processing weak references in a full collection. We need
- // to artifically keep AllocationSites alive for a time.
+ // to artificially keep AllocationSites alive for a time.
void MarkAllocationSite(AllocationSite* site);
private:
}
-bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
- MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->AreSweeperThreadsActivated()) {
- if (collector->IsConcurrentSweepingInProgress()) {
- if (collector->RefillFreeLists(this) < size_in_bytes) {
- if (!collector->sequential_sweeping()) {
- collector->WaitUntilSweepingCompleted();
- return true;
- }
- }
- return false;
- }
- }
- return true;
-}
-
-
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
- // If there are unswept pages advance sweeping a bounded number of times
- // until we find a size_in_bytes contiguous piece of memory
- const int kMaxSweepingTries = 5;
- bool sweeping_complete = false;
-
- for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
- sweeping_complete = EnsureSweeperProgress(size_in_bytes);
+ // If sweeper threads are active, try to re-fill the free-lists.
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->RefillFreeList(this);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
return free_list_.Allocate(size_in_bytes);
}
- // Last ditch, sweep all the remaining pages to try to find space.
- if (heap()->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
- heap()->mark_compact_collector()->WaitUntilSweepingCompleted();
+ // If sweeper threads are active, wait for them at that point.
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
- // Retry the free list allocation.
+ // After waiting for the sweeper threads, there may be new free-list
+ // entries.
HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object;
}
unswept_free_bytes_ = 0;
}
- // This function tries to steal size_in_bytes memory from the sweeper threads
- // free-lists. If it does not succeed stealing enough memory, it will wait
- // for the sweeper threads to finish sweeping.
- // It returns true when sweeping is completed and false otherwise.
- bool EnsureSweeperProgress(intptr_t size_in_bytes);
-
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }