#include "src/base/atomicops.h"
#include "src/base/bits.h"
+#include "src/base/sys-info.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/cpu-profiler.h"
heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes();
heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes();
heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes();
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
VerifyEvacuation(heap_);
void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
FreeList* free_list;
-
if (space == heap()->old_space()) {
free_list = free_list_old_space_.get();
} else if (space == heap()->code_space()) {
}
+int MarkCompactCollector::NumberOfParallelCompactionTasks() {
+ if (!FLAG_parallel_compaction) return 1;
+ // We cap the number of parallel compaction tasks by
+ // - (#cores - 1)
+ // - a value depending on the list of evacuation candidates
+ // - a hard limit
+ const int kPagesPerCompactionTask = 4;
+ const int kMaxCompactionTasks = 8;
+ return Min(kMaxCompactionTasks,
+ Min(1 + evacuation_candidates_.length() / kPagesPerCompactionTask,
+ Max(1, base::SysInfo::NumberOfProcessors() - 1)));
+}
+
+
void MarkCompactCollector::EvacuatePagesInParallel() {
if (evacuation_candidates_.length() == 0) return;
- int num_tasks = 1;
- if (FLAG_parallel_compaction) {
- num_tasks = NumberOfParallelCompactionTasks();
- }
+ const int num_tasks = NumberOfParallelCompactionTasks();
// Set up compaction spaces.
- CompactionSpaceCollection** compaction_spaces_for_tasks =
+ CompactionSpaceCollection** spaces_for_tasks =
new CompactionSpaceCollection*[num_tasks];
for (int i = 0; i < num_tasks; i++) {
- compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
+ spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
}
-
- compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
- heap()->old_space());
- compaction_spaces_for_tasks[0]
- ->Get(CODE_SPACE)
- ->MoveOverFreeMemory(heap()->code_space());
+ heap()->old_space()->DivideMemory(spaces_for_tasks, num_tasks, 1 * MB);
+ heap()->code_space()->DivideMemory(spaces_for_tasks, num_tasks, 1 * MB);
compaction_in_progress_ = true;
// Kick off parallel tasks.
for (int i = 1; i < num_tasks; i++) {
concurrent_compaction_tasks_active_++;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
+ new CompactionTask(heap(), spaces_for_tasks[i]),
v8::Platform::kShortRunningTask);
}
- // Contribute in main thread. Counter and signal are in principal not needed.
- concurrent_compaction_tasks_active_++;
- EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
- pending_compaction_tasks_semaphore_.Signal();
+ // Perform compaction on the main thread.
+ EvacuatePages(spaces_for_tasks[0], &migration_slots_buffer_);
WaitUntilCompactionCompleted();
// Merge back memory (compacted and unused) from compaction spaces.
for (int i = 0; i < num_tasks; i++) {
heap()->old_space()->MergeCompactionSpace(
- compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
+ spaces_for_tasks[i]->Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace(
- compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
- delete compaction_spaces_for_tasks[i];
+ spaces_for_tasks[i]->Get(CODE_SPACE));
+ delete spaces_for_tasks[i];
}
- delete[] compaction_spaces_for_tasks;
+ delete[] spaces_for_tasks;
// Finalize sequentially.
const int num_pages = evacuation_candidates_.length();
void EvacuatePagesInParallel();
- int NumberOfParallelCompactionTasks() {
- // TODO(hpayer, mlippautz): Figure out some logic to determine the number
- // of compaction tasks.
- return 1;
- }
+ // The number of parallel compaction tasks, including the main thread.
+ int NumberOfParallelCompactionTasks();
void WaitUntilCompactionCompleted();
// Update and clear accounting statistics.
accounting_stats_.Merge(other->accounting_stats_);
- other->accounting_stats_.Reset();
+ other->accounting_stats_.Clear();
// Move over pages.
PageIterator it(other);
}
+FreeSpace* PagedSpace::TryRemoveMemory() {
+ FreeSpace* space = nullptr;
+ int node_size = 0;
+ space = free_list()->FindNodeIn(FreeList::kHuge, &node_size);
+ if (space == nullptr)
+ space = free_list()->FindNodeIn(FreeList::kLarge, &node_size);
+ if (space == nullptr)
+ space = free_list()->FindNodeIn(FreeList::kMedium, &node_size);
+ if (space == nullptr)
+ space = free_list()->FindNodeIn(FreeList::kSmall, &node_size);
+ if (space != nullptr) {
+ accounting_stats_.AllocateBytes(node_size);
+ }
+ return space;
+}
+
+
+void PagedSpace::DivideMemory(CompactionSpaceCollection** other, int num,
+ intptr_t limit) {
+ CHECK(num > 0);
+ CHECK(other != nullptr);
+
+ if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
+
+ EmptyAllocationInfo();
+
+ int index = 0;
+ FreeSpace* node = nullptr;
+ for (CompactionSpace* space = other[index]->Get(identity());
+ ((node = TryRemoveMemory()) != nullptr) &&
+ (space->free_list()->available() < limit);
+ space = other[++index % num]->Get(identity())) {
+ CHECK(space->identity() == identity());
+ space->AddMemory(node->address(), node->size());
+ }
+}
+
+
void FreeList::Reset() {
small_list_.Reset();
medium_list_.Reset();
}
+void FreeList::UpdateFragmentationStats(FreeListCategoryType category,
+ Address address, int size) {
+ Page* page = Page::FromAddress(address);
+ switch (category) {
+ case kSmall:
+ page->add_available_in_small_free_list(size);
+ break;
+ case kMedium:
+ page->add_available_in_medium_free_list(size);
+ break;
+ case kLarge:
+ page->add_available_in_large_free_list(size);
+ break;
+ case kHuge:
+ page->add_available_in_huge_free_list(size);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
+ FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
+ if (node != nullptr) {
+ UpdateFragmentationStats(category, node->address(), -(*node_size));
+ DCHECK(IsVeryLong() || available() == SumFreeLists());
+ }
+ return node;
+}
+
+
FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeSpace* node = NULL;
Page* page = NULL;
if (size_in_bytes <= kSmallAllocationMax) {
- node = small_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_small_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ node = FindNodeIn(kSmall, node_size);
+ if (node != nullptr) {
+ DCHECK(size_in_bytes <= node->size());
return node;
}
}
if (size_in_bytes <= kMediumAllocationMax) {
- node = medium_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_medium_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ node = FindNodeIn(kMedium, node_size);
+ if (node != nullptr) {
+ DCHECK(size_in_bytes <= node->size());
return node;
}
}
if (size_in_bytes <= kLargeAllocationMax) {
- node = large_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_large_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ node = FindNodeIn(kLarge, node_size);
+ if (node != nullptr) {
+ DCHECK(size_in_bytes <= node->size());
return node;
}
}
(unswept_free_bytes_ == 0));
const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top());
DCHECK_GE(size, 0);
- USE(size);
return size;
}
namespace v8 {
namespace internal {
+class CompactionSpaceCollection;
class Isolate;
// -----------------------------------------------------------------------------
// An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (i.e., not
-// including page bookkeeping structures) currently in the space. The 'size'
-// of a space is the number of allocated bytes, the 'waste' in the space is
-// the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (e.g. small blocks due
-// to internal fragmentation, top of page areas in map space), and the bytes
-// 'available' is the number of unallocated bytes that are not waste. The
-// capacity is the sum of size, waste, and available.
//
// The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in
-// conjunction with capacity, or else they always balance increases and
-// decreases to the non-capacity stats.
+// functions increase or decrease one of the non-capacity stats in conjunction
+// with capacity, or else they always balance increases and decreases to the
+// non-capacity stats.
class AllocationStats BASE_EMBEDDED {
public:
AllocationStats() { Clear(); }
max_capacity_ = 0;
size_ = 0;
waste_ = 0;
+ borrowed_ = 0;
}
void ClearSizeWaste() {
intptr_t MaxCapacity() { return max_capacity_; }
intptr_t Size() { return size_; }
intptr_t Waste() { return waste_; }
+ intptr_t Borrowed() { return borrowed_; }
// Grow the space by adding available bytes. They are initially marked as
// being in use (part of the size), but will normally be immediately freed,
// during sweeping, bytes have been marked as being in use (part of the size)
// and are hereby freed.
void ShrinkSpace(int size_in_bytes) {
+ DCHECK_GE(size_in_bytes, 0);
capacity_ -= size_in_bytes;
size_ -= size_in_bytes;
- DCHECK(size_ >= 0);
+ DCHECK_GE(size_, 0);
+ DCHECK_GE(capacity_, 0);
}
// Allocate from available bytes (available -> size).
void AllocateBytes(intptr_t size_in_bytes) {
+ DCHECK_GE(size_in_bytes, 0);
size_ += size_in_bytes;
- DCHECK(size_ >= 0);
+ DCHECK_GE(size_, 0);
+ DCHECK_LE(size_, capacity_);
}
// Free allocated bytes, making them available (size -> available).
// Merge {other} into {this}.
void Merge(const AllocationStats& other) {
+ DCHECK_GE(other.capacity_, 0);
+ DCHECK_GE(other.size_, 0);
+ DCHECK_GE(other.waste_, 0);
capacity_ += other.capacity_;
size_ += other.size_;
+ // See description of |borrowed_| below why we need to remove it from
+ // |capacity_| as well as |size_|.
+ capacity_ -= other.borrowed_;
+ size_ -= other.borrowed_;
waste_ += other.waste_;
- if (other.max_capacity_ > max_capacity_) {
- max_capacity_ = other.max_capacity_;
+ if (capacity_ > max_capacity_) {
+ max_capacity_ = capacity_;
}
}
void DecreaseCapacity(intptr_t size_in_bytes) {
+ DCHECK_GE(size_in_bytes, 0);
capacity_ -= size_in_bytes;
- DCHECK_GE(capacity_, 0);
+ DCHECK_GE(capacity_, size_);
}
- void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
+ void IncreaseCapacity(intptr_t size_in_bytes) {
+ DCHECK_GE(size_in_bytes, 0);
+ capacity_ += size_in_bytes;
+ }
+
+ void BorrowMemory(intptr_t size_in_bytes) {
+ DCHECK_GE(size_in_bytes, 0);
+ borrowed_ += size_in_bytes;
+ }
private:
+ // |capacity_| is the number of object-area bytes (i.e., not including page
+ // bookkeeping structures) currently in the space.
intptr_t capacity_;
+
+ // |max_capacity_| is the maximum |capacity_| ever observed by a space.
intptr_t max_capacity_;
+
+ // |size_| is the number of allocated bytes.
intptr_t size_;
+
+ // |waste_| is the number of bytes that are not allocated and not available
+ // to allocation without reorganizing the space via a GC (e.g. small blocks
+ // due to internal fragmentation, top of page areas in map space
intptr_t waste_;
+
+ // |borrowed_| denotes the number of bytes that are currently borrowed in this
+ // space, i.e., they have been accounted as allocated in another space, but
+ // have been moved over (e.g. through a free list) to the current space.
+ // Note that accounting them as allocated results in them being included
+ // in |size_| as well as |capacity_| of the original space. The temporary
+ // double-accounting is fixed upon merging accounting stats.
+ intptr_t borrowed_;
};
PagedSpace* owner() { return owner_; }
private:
+ enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
+
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
static const int kLargeAllocationMax = kMediumListMax;
FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
+ FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
+
+ FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
+ switch (category) {
+ case kSmall:
+ return &small_list_;
+ case kMedium:
+ return &medium_list_;
+ case kLarge:
+ return &large_list_;
+ case kHuge:
+ return &huge_list_;
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ void UpdateFragmentationStats(FreeListCategoryType category, Address address,
+ int size);
PagedSpace* owner_;
Heap* heap_;
FreeListCategory large_list_;
FreeListCategory huge_list_;
+ friend class PagedSpace;
+
DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
};
virtual bool is_local() { return false; }
+ // Divide {this} free lists up among {other} CompactionSpaceCollections
+ // up to some certain {limit} of bytes. Note that this operation eventually
+ // needs to iterate over nodes one-by-one, making it a potentially slow
+ // operation.
+ void DivideMemory(CompactionSpaceCollection** other, int num, intptr_t limit);
+
protected:
+ // Adds memory starting at {start} of {size_in_bytes} to the space.
+ void AddMemory(Address start, int size_in_bytes) {
+ IncreaseCapacity(size_in_bytes);
+ accounting_stats_.BorrowMemory(size_in_bytes);
+ Free(start, size_in_bytes);
+ }
+
+ // Tries to remove some memory from {this} free lists. We try to remove
+ // as much memory as possible, i.e., we check the free lists from huge
+ // to small.
+ FreeSpace* TryRemoveMemory();
+
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
virtual bool snapshotable() { return true; }
CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
: PagedSpace(heap, id, executable) {}
- // Adds external memory starting at {start} of {size_in_bytes} to the space.
- void AddExternalMemory(Address start, int size_in_bytes) {
- IncreaseCapacity(size_in_bytes);
- Free(start, size_in_bytes);
- }
-
virtual bool is_local() { return true; }
protected:
CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_scope(isolate, allocator);
- CompactionSpace* compaction_space =
- new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
+ CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap);
+ CompactionSpace* compaction_space = collection->Get(OLD_SPACE);
CHECK(compaction_space != NULL);
CHECK(compaction_space->SetUp());
// We expect two pages to be reachable from old_space in the end.
const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
- Object* chunk =
- old_space->AllocateRawUnaligned(static_cast<int>(rest)).ToObjectChecked();
CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
- CHECK(chunk != nullptr);
- CHECK(chunk->IsHeapObject());
-
CHECK_EQ(compaction_space->CountTotalPages(), 0);
CHECK_EQ(compaction_space->Capacity(), 0);
// Make the rest of memory available for compaction.
- compaction_space->AddExternalMemory(HeapObject::cast(chunk)->address(),
- static_cast<int>(rest));
+ old_space->DivideMemory(&collection, 1, rest);
CHECK_EQ(compaction_space->CountTotalPages(), 0);
CHECK_EQ(compaction_space->Capacity(), rest);
while (num_rest_objects-- > 0) {
old_space->MergeCompactionSpace(compaction_space);
CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
- delete compaction_space;
+ delete collection;
delete old_space;
allocator->TearDown();