}
-void Heap::FreeQueuedChunks() {
+void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
if (chunks_queued_for_free_ == NULL) return;
MemoryChunk* next;
MemoryChunk* chunk;
}
isolate_->heap()->store_buffer()->Compact();
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
+}
+
+
+void Heap::FreeQueuedChunks() {
+ MemoryChunk* next;
+ MemoryChunk* chunk;
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
next = chunk->next_chunk();
isolate_->memory_allocator()->Free(chunk);
inline bool OldGenerationAllocationLimitReached();
void QueueMemoryChunkForFree(MemoryChunk* chunk);
+ void FilterStoreBufferEntriesOnAboutToBeFreedPages();
void FreeQueuedChunks();
int gc_count() const { return gc_count_; }
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
+ heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
heap()->FreeQueuedChunks();
}
PrintF("SweepSpace: %s (%d pages swept)\n",
AllocationSpaceName(space->identity()), pages_swept);
}
-
- // Give pages that are queued to be freed back to the OS.
- heap()->FreeQueuedChunks();
}
MoveEvacuationCandidatesToEndOfPagesList();
- // Noncompacting collections simply sweep the spaces to clear the mark
- // bits and free the nonlive blocks (for old and map spaces). We sweep
- // the map space last because freeing non-live maps overwrites them and
- // the other spaces rely on possibly non-live maps to get the sizes for
- // non-live objects.
{
{
GCTracer::Scope sweep_scope(heap()->tracer(),
}
}
- EvacuateNewSpaceAndCandidates();
+ // Deallocate unmarked large objects.
+ heap_->lo_space()->FreeUnmarkedObjects();
+
+ // Give pages that are queued to be freed back to the OS. Invalid store
+ // buffer entries are already filter out. We can just release the memory.
+ heap()->FreeQueuedChunks();
heap()->FreeDeadArrayBuffers(false);
- // Deallocate unmarked objects and clear marked bits for marked objects.
- heap_->lo_space()->FreeUnmarkedObjects();
+ EvacuateNewSpaceAndCandidates();
+
+ // Clear the marking state of live large objects.
+ heap_->lo_space()->ClearMarkingStateOfLiveObjects();
// Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
}
+void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
+ LargePage* current = first_page_;
+ while (current != NULL) {
+ HeapObject* object = current->GetObject();
+ MarkBit mark_bit = Marking::MarkBitFrom(object);
+ DCHECK(Marking::IsBlackOrGrey(mark_bit));
+ Marking::BlackToWhite(mark_bit);
+ Page::FromAddress(object->address())->ResetProgressBar();
+ Page::FromAddress(object->address())->ResetLiveBytes();
+ current = current->next_page();
+ }
+}
+
+
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* previous = NULL;
LargePage* current = first_page_;
while (current != NULL) {
HeapObject* object = current->GetObject();
- // Can this large page contain pointers to non-trivial objects. No other
- // pointer object is this big.
- bool is_pointer_object = object->IsFixedArray();
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (Marking::IsBlackOrGrey(mark_bit)) {
- Marking::BlackToWhite(mark_bit);
- Page::FromAddress(object->address())->ResetProgressBar();
- Page::FromAddress(object->address())->ResetLiveBytes();
previous = current;
current = current->next_page();
} else {
static_cast<uint32_t>(key));
}
- if (is_pointer_object) {
- heap()->QueueMemoryChunkForFree(page);
- } else {
- heap()->isolate()->memory_allocator()->Free(page);
- }
+ heap()->QueueMemoryChunkForFree(page);
}
}
- heap()->FreeQueuedChunks();
}
// if such a page doesn't exist.
LargePage* FindPage(Address a);
+ // Clears the marking state of live objects.
+ void ClearMarkingStateOfLiveObjects();
+
// Frees unmarked objects.
void FreeUnmarkedObjects();