last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
configured_(false),
- last_empty_page_was_given_back_to_the_os_(false),
chunks_queued_for_free_(NULL) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
gc_state_ = NOT_IN_GC;
- Shrink();
-
isolate_->counters()->objs_since_last_full()->Set(0);
contexts_disposed_ = 0;
// Try to shrink all paged spaces.
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
- space->Shrink();
+ space->ReleaseAllUnusedPages();
}
}
}
}
+ isolate_->heap()->store_buffer()->Compact();
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
next = chunk->next_chunk();
Address end,
ObjectSlotCallback callback);
- // Iterate pointers to new space found in memory interval from start to end.
- static void IteratePointersToNewSpace(Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback callback);
-
-
- // Iterate pointers to new space found in memory interval from start to end.
- // This interval is considered to belong to the map space.
- static void IteratePointersFromMapsToNewSpace(Heap* heap,
- Address start,
- Address end,
- ObjectSlotCallback callback);
-
-
// Returns whether the object resides in new space.
inline bool InNewSpace(Object* object);
inline bool InNewSpace(Address addr);
scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
}
- bool ShouldWeGiveBackAPageToTheOS() {
- last_empty_page_was_given_back_to_the_os_ =
- !last_empty_page_was_given_back_to_the_os_;
- return last_empty_page_was_given_back_to_the_os_;
- }
-
void QueueMemoryChunkForFree(MemoryChunk* chunk);
void FreeQueuedChunks();
VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
- bool last_empty_page_was_given_back_to_the_os_;
MemoryChunk* chunks_queued_for_free_;
friend class Factory;
}
-static inline void UpdateSlotsInRange(Object** start, Object** end) {
- for (Object** slot = start;
- slot < end;
- slot++) {
- Object* obj = *slot;
- if (obj->IsHeapObject() &&
- MarkCompactCollector::IsOnEvacuationCandidate(obj)) {
- MapWord map_word = HeapObject::cast(obj)->map_word();
- if (map_word.IsForwardingAddress()) {
- *slot = map_word.ToForwardingAddress();
- ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot));
- }
- }
- }
-}
-
-
enum SweepingMode {
SWEEP_ONLY,
SWEEP_AND_VISIT_LIVE_OBJECTS
}
-INLINE(static uint32_t SweepFree(PagedSpace* space,
- Page* p,
- uint32_t free_start,
- uint32_t region_end,
- uint32_t* cells));
-
-
-static uint32_t SweepFree(PagedSpace* space,
- Page* p,
- uint32_t free_start,
- uint32_t region_end,
- uint32_t* cells) {
- uint32_t free_cell_index = Bitmap::IndexToCell(free_start);
- ASSERT(cells[free_cell_index] == 0);
- while (free_cell_index < region_end && cells[free_cell_index] == 0) {
- free_cell_index++;
- }
-
- if (free_cell_index >= region_end) {
- return free_cell_index;
- }
-
- uint32_t free_end = Bitmap::CellToIndex(free_cell_index);
- space->FreeOrUnmapPage(p,
- p->MarkbitIndexToAddress(free_start),
- (free_end - free_start) << kPointerSizeLog2);
-
- return free_cell_index;
-}
-
-
-INLINE(static uint32_t NextCandidate(uint32_t cell_index,
- uint32_t last_cell_index,
- uint32_t* cells));
-
-
-static uint32_t NextCandidate(uint32_t cell_index,
- uint32_t last_cell_index,
- uint32_t* cells) {
- do {
- cell_index++;
- } while (cell_index < last_cell_index && cells[cell_index] != 0);
- return cell_index;
-}
-
-
static const int kStartTableEntriesPerLine = 5;
static const int kStartTableLines = 171;
static const int kStartTableInvalidLine = 127;
intptr_t freed_bytes = 0;
intptr_t newspace_size = space->heap()->new_space()->Size();
bool lazy_sweeping_active = false;
+ bool unused_page_present = false;
while (it.has_next()) {
Page* p = it.next();
continue;
}
+ // One unused page is kept, all further are released before sweeping them.
+ if (p->LiveBytes() == 0) {
+ if (unused_page_present) {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ space->ReleasePage(p);
+ continue;
+ }
+ unused_page_present = true;
+ }
+
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n",
reinterpret_cast<intptr_t>(p),
case LAZY_CONSERVATIVE: {
freed_bytes += SweepConservatively(space, p);
if (freed_bytes >= newspace_size && p != space->LastPage()) {
- space->SetPagesToSweep(p->next_page(), space->LastPage());
+ space->SetPagesToSweep(p->next_page(), space->anchor());
lazy_sweeping_active = true;
}
break;
}
}
}
+
+ // Give pages that are queued to be freed back to the OS.
+ heap()->FreeQueuedChunks();
}
#endif
-void PagedSpace::Shrink() {
- // TODO(1614) Not implemented.
+void PagedSpace::ReleasePage(Page* page) {
+ ASSERT(page->LiveBytes() == 0);
+ page->Unlink();
+ if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
+ heap()->isolate()->memory_allocator()->Free(page);
+ } else {
+ heap()->QueueMemoryChunkForFree(page);
+ }
+
+ ASSERT(Capacity() > 0);
+ ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+ accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
+}
+
+
+void PagedSpace::ReleaseAllUnusedPages() {
+ PageIterator it(this);
+ while (it.has_next()) {
+ Page* page = it.next();
+ if (page->LiveBytes() == 0) {
+ ReleasePage(page);
+ }
+ }
+ heap()->FreeQueuedChunks();
}
}
-int PagedSpace::FreeOrUnmapPage(Page* page, Address start, int size_in_bytes) {
- Heap* heap = page->heap();
- // TODO(gc): When we count the live bytes per page we can free empty pages
- // instead of sweeping. At that point this if should be turned into an
- // ASSERT that the area to be freed cannot be the entire page.
- if (size_in_bytes == Page::kObjectAreaSize &&
- heap->ShouldWeGiveBackAPageToTheOS()) {
- page->Unlink();
- if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
- heap->isolate()->memory_allocator()->Free(page);
- } else {
- heap->QueueMemoryChunkForFree(page);
- }
- return 0;
- }
- return Free(start, size_in_bytes);
-}
-
-
int FreeList::Free(Address start, int size_in_bytes) {
if (size_in_bytes == 0) return 0;
FreeListNode* node = FreeListNode::FromAddress(start);
// Stop lazy sweeping and clear marking bits for unswept pages.
if (first_unswept_page_ != NULL) {
- Page* last = last_unswept_page_->next_page();
+ Page* last = last_unswept_page_;
Page* p = first_unswept_page_;
do {
// Do not use ShouldBeSweptLazily predicate here.
if (IsSweepingComplete()) return true;
intptr_t freed_bytes = 0;
- Page* last = last_unswept_page_->next_page();
+ Page* last = last_unswept_page_;
Page* p = first_unswept_page_;
do {
Page* next_page = p->next_page();
ASSERT(size_ >= 0);
}
+ // Shrink the space by removing available bytes. Since shrinking is done
+ // during sweeping, bytes have been marked as being in use (part of the size)
+ // and are hereby freed.
+ void ShrinkSpace(int size_in_bytes) {
+ capacity_ -= size_in_bytes;
+ size_ -= size_in_bytes;
+ ASSERT(size_ >= 0);
+ }
+
// Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) {
size_ += size_in_bytes;
return size_in_bytes - wasted;
}
- int FreeOrUnmapPage(Page* page, Address start, int size_in_bytes);
-
// Set space allocation info.
void SetTop(Address top, Address limit) {
ASSERT(top == limit ||
accounting_stats_.ExpandSpace(size);
}
- // Releases half of unused pages.
- void Shrink();
+ // Releases an unused page and shrinks the space.
+ void ReleasePage(Page* page);
+
+ // Releases all of the unused pages.
+ void ReleaseAllUnusedPages();
// The dummy page that anchors the linked list of pages.
Page* anchor() { return &anchor_; }