space->identity() == OLD_DATA_SPACE ||
space->identity() == CODE_SPACE);
+ static const int kMaxMaxEvacuationCandidates = 1000;
int number_of_pages = space->CountTotalPages();
-
- const int kMaxMaxEvacuationCandidates = 1000;
- int max_evacuation_candidates = Min(
- kMaxMaxEvacuationCandidates,
- static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
+ int max_evacuation_candidates =
+ static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1);
if (FLAG_stress_compaction || FLAG_always_compact) {
max_evacuation_candidates = kMaxMaxEvacuationCandidates;
intptr_t over_reserved = reserved - space->SizeOfObjects();
static const intptr_t kFreenessThreshold = 50;
- if (over_reserved >= 2 * space->AreaSize() &&
- reduce_memory_footprint_) {
- mode = REDUCE_MEMORY_FOOTPRINT;
+ if (over_reserved >= 2 * space->AreaSize()) {
- // We expect that empty pages are easier to compact so slightly bump the
- // limit.
- max_evacuation_candidates += 2;
+ // If reduction of memory footprint was requested, we are aggressive
+ // about choosing pages to free. We expect that half-empty pages
+ // are easier to compact so slightly bump the limit.
+ if (reduce_memory_footprint_) {
+ mode = REDUCE_MEMORY_FOOTPRINT;
+ max_evacuation_candidates += 2;
+ }
- if (FLAG_trace_fragmentation) {
- PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
+ // If over-usage is very high (more than a third of the space), we
+ // try to free all mostly empty pages. We expect that almost empty
+ // pages are even easier to compact so bump the limit even more.
+ if (over_reserved > reserved / 3) {
+ mode = REDUCE_MEMORY_FOOTPRINT;
+ max_evacuation_candidates *= 2;
+ }
+
+ if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
+ PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
static_cast<double>(over_reserved) / MB,
+ static_cast<double>(reserved) / MB,
static_cast<int>(kFreenessThreshold));
}
}
Candidate candidates[kMaxMaxEvacuationCandidates];
+ max_evacuation_candidates =
+ Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
+
int count = 0;
int fragmentation = 0;
Candidate* least = NULL;
CHECK(root->IsJSObject());
CHECK(root->map()->IsMap());
}
+
+
+// Implemented in the test-alloc.cc test suite.
+void SimulateFullSpace(PagedSpace* space);
+
+
+TEST(ReleaseOverReservedPages) {
+ i::FLAG_trace_gc = true;
+ InitializeVM();
+ v8::HandleScope scope;
+ static const int number_of_test_pages = 20;
+
+ // Prepare many pages with low live-bytes count.
+ PagedSpace* old_pointer_space = HEAP->old_pointer_space();
+ CHECK_EQ(1, old_pointer_space->CountTotalPages());
+ for (int i = 0; i < number_of_test_pages; i++) {
+ AlwaysAllocateScope always_allocate;
+ SimulateFullSpace(old_pointer_space);
+ FACTORY->NewFixedArray(1, TENURED);
+ }
+ CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
+
+ // Triggering one GC will cause a lot of garbage to be discovered but
+ // even spread across all allocated pages.
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered for preparation");
+ CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
+
+ // Triggering subsequent GCs should cause at least half of the pages
+ // to be released to the OS after at most two cycles.
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 1");
+ CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2");
+ CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages() * 2);
+
+ // Triggering a last-resort GC should cause all pages to be released
+ // to the OS so that other processes can seize the memory.
+ HEAP->CollectAllAvailableGarbage("triggered really hard");
+ CHECK_EQ(1, old_pointer_space->CountTotalPages());
+}