: Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
- first_unswept_page_(Page::FromAddress(NULL)),
- last_unswept_page_(Page::FromAddress(NULL)) {
+ first_unswept_page_(Page::FromAddress(NULL)) {
max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
* Page::kObjectAreaSize;
accounting_stats_.Clear();
void PagedSpace::ReleasePage(Page* page) {
ASSERT(page->LiveBytes() == 0);
+
+ // Adjust list of unswept pages if the page is it's head or tail.
+ if (first_unswept_page_ == page) {
+ first_unswept_page_ = page->next_page();
+ if (first_unswept_page_ == anchor()) {
+ first_unswept_page_ = Page::FromAddress(NULL);
+ }
+ }
+
+ if (page->WasSwept()) {
+ intptr_t size = free_list_.EvictFreeListItems(page);
+ accounting_stats_.AllocateBytes(size);
+ ASSERT_EQ(Page::kObjectAreaSize, static_cast<int>(size));
+ }
+
page->Unlink();
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap()->isolate()->memory_allocator()->Free(page);
PageIterator it(this);
while (it.has_next()) {
Page* page = it.next();
- if (page->LiveBytes() == 0) {
- ReleasePage(page);
+ if (!page->WasSwept()) {
+ if (page->LiveBytes() == 0) ReleasePage(page);
+ } else {
+ HeapObject* obj = HeapObject::FromAddress(page->body());
+ if (obj->IsFreeSpace() &&
+ FreeSpace::cast(obj)->size() == Page::kObjectAreaSize) {
+ // Sometimes we allocate memory from free list but don't
+ // immediately initialize it (e.g. see PagedSpace::ReserveSpace
+ // called from Heap::ReserveSpace that can cause GC before
+ // reserved space is actually initialized).
+ // Thus we can't simply assume that obj represents a valid
+ // node still owned by a free list
+ // Instead we should verify that the page is fully covered
+ // by free list items.
+ FreeList::SizeStats sizes;
+ free_list_.CountFreeListItems(page, &sizes);
+ if (sizes.Total() == Page::kObjectAreaSize) {
+ ReleasePage(page);
+ }
+ }
}
}
heap()->FreeQueuedChunks();
}
-void FreeList::CountFreeListItems(Page* p, intptr_t* sizes) {
- sizes[0] = CountFreeListItemsInList(small_list_, p);
- sizes[1] = CountFreeListItemsInList(medium_list_, p);
- sizes[2] = CountFreeListItemsInList(large_list_, p);
- sizes[3] = CountFreeListItemsInList(huge_list_, p);
+void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
+ sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p);
+ if (sizes->huge_size_ < Page::kObjectAreaSize) {
+ sizes->small_size_ = CountFreeListItemsInList(small_list_, p);
+ sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p);
+ sizes->large_size_ = CountFreeListItemsInList(large_list_, p);
+ } else {
+ sizes->small_size_ = 0;
+ sizes->medium_size_ = 0;
+ sizes->large_size_ = 0;
+ }
}
+
+static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) {
+ intptr_t sum = 0;
+ while (*n != NULL) {
+ if (Page::FromAddress((*n)->address()) == p) {
+ FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+ sum += free_space->Size();
+ *n = (*n)->next();
+ } else {
+ n = (*n)->next_address();
+ }
+ }
+ return sum;
+}
+
+
+intptr_t FreeList::EvictFreeListItems(Page* p) {
+ intptr_t sum = EvictFreeListItemsInList(&huge_list_, p);
+
+ if (sum < Page::kObjectAreaSize) {
+ sum += EvictFreeListItemsInList(&small_list_, p) +
+ EvictFreeListItemsInList(&medium_list_, p) +
+ EvictFreeListItemsInList(&large_list_, p);
+ }
+
+ available_ -= sum;
+
+ return sum;
+}
+
+
#ifdef DEBUG
intptr_t FreeList::SumFreeList(FreeListNode* cur) {
intptr_t sum = 0;
// Stop lazy sweeping and clear marking bits for unswept pages.
if (first_unswept_page_ != NULL) {
- Page* last = last_unswept_page_;
Page* p = first_unswept_page_;
do {
// Do not use ShouldBeSweptLazily predicate here.
}
}
p = p->next_page();
- } while (p != last);
+ } while (p != anchor());
}
- first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
+ first_unswept_page_ = Page::FromAddress(NULL);
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
if (IsSweepingComplete()) return true;
intptr_t freed_bytes = 0;
- Page* last = last_unswept_page_;
Page* p = first_unswept_page_;
do {
Page* next_page = p->next_page();
freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
}
p = next_page;
- } while (p != last && freed_bytes < bytes_to_sweep);
+ } while (p != anchor() && freed_bytes < bytes_to_sweep);
- if (p == last) {
- last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL);
+ if (p == anchor()) {
+ first_unswept_page_ = Page::FromAddress(NULL);
} else {
first_unswept_page_ = p;
}
// 'wasted_bytes'. The size should be a non-zero multiple of the word size.
MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
- void MarkNodes();
-
#ifdef DEBUG
void Zap();
static intptr_t SumFreeList(FreeListNode* node);
bool IsVeryLong();
#endif
- void CountFreeListItems(Page* p, intptr_t* sizes);
+ struct SizeStats {
+ intptr_t Total() {
+ return small_size_ + medium_size_ + large_size_ + huge_size_;
+ }
+
+ intptr_t small_size_;
+ intptr_t medium_size_;
+ intptr_t large_size_;
+ intptr_t huge_size_;
+ };
+
+ void CountFreeListItems(Page* p, SizeStats* sizes);
+
+ intptr_t EvictFreeListItems(Page* p);
private:
// The size range of blocks, in bytes.
!p->WasSweptPrecisely();
}
- void SetPagesToSweep(Page* first, Page* last) {
+ void SetPagesToSweep(Page* first) {
first_unswept_page_ = first;
- last_unswept_page_ = last;
}
bool AdvanceSweeper(intptr_t bytes_to_sweep);
Page* LastPage() { return anchor_.prev_page(); }
bool IsFragmented(Page* p) {
- intptr_t sizes[4];
- free_list_.CountFreeListItems(p, sizes);
+ FreeList::SizeStats sizes;
+ free_list_.CountFreeListItems(p, &sizes);
intptr_t ratio;
intptr_t ratio_threshold;
if (identity() == CODE_SPACE) {
- ratio = (sizes[1] * 10 + sizes[2] * 2) * 100 / Page::kObjectAreaSize;
+ ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
+ Page::kObjectAreaSize;
ratio_threshold = 10;
} else {
- ratio = (sizes[0] * 5 + sizes[1]) * 100 / Page::kObjectAreaSize;
+ ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
+ Page::kObjectAreaSize;
ratio_threshold = 15;
}
PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
reinterpret_cast<void*>(p),
identity(),
- static_cast<int>(sizes[0]),
- static_cast<double>(sizes[0] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[1]),
- static_cast<double>(sizes[1] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[2]),
- static_cast<double>(sizes[2] * 100) / Page::kObjectAreaSize,
- static_cast<int>(sizes[3]),
- static_cast<double>(sizes[3] * 100) / Page::kObjectAreaSize,
+ static_cast<int>(sizes.small_size_),
+ static_cast<double>(sizes.small_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.medium_size_),
+ static_cast<double>(sizes.medium_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.large_size_),
+ static_cast<double>(sizes.large_size_ * 100) /
+ Page::kObjectAreaSize,
+ static_cast<int>(sizes.huge_size_),
+ static_cast<double>(sizes.huge_size_ * 100) /
+ Page::kObjectAreaSize,
(ratio > ratio_threshold) ? "[fragmented]" : "");
}
return (ratio > ratio_threshold) ||
- (FLAG_always_compact && sizes[3] != Page::kObjectAreaSize);
+ (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize);
}
void EvictEvacuationCandidatesFromFreeLists();
bool was_swept_conservatively_;
Page* first_unswept_page_;
- Page* last_unswept_page_;
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS.
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact();
- void MarkFreeListNodes() { free_list_.MarkNodes(); }
-
protected:
void ResetFreeList() {
free_list_.Reset();