Tweak compaction candidate selection to avoid keeping page with low occupancy around.
authorvegorov@chromium.org <vegorov@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Fri, 3 Feb 2012 14:16:40 +0000 (14:16 +0000)
committervegorov@chromium.org <vegorov@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Fri, 3 Feb 2012 14:16:40 +0000 (14:16 +0000)
Increase slots buffer chain length to 15 to make compaction more aggressive and usefull.

Pass gc and collector selection reasons to GCTracer to allow more meaningull --gc-trace.

Print fragmentation of spaces that we do not compact.

R=erik.corry@gmail.com

Review URL: https://chromiumcodereview.appspot.com/9323007

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10601 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

15 files changed:
src/api.cc
src/debug.cc
src/execution.cc
src/extensions/gc-extension.cc
src/heap-inl.h
src/heap.cc
src/heap.h
src/log.cc
src/mark-compact-inl.h
src/mark-compact.cc
src/mark-compact.h
src/mksnapshot.cc
src/profile-generator.cc
src/runtime.cc
src/spaces.h

index 0b67516..4a71539 100644 (file)
@@ -4077,7 +4077,7 @@ bool v8::V8::IdleNotification(int hint) {
 void v8::V8::LowMemoryNotification() {
   i::Isolate* isolate = i::Isolate::Current();
   if (isolate == NULL || !isolate->IsInitialized()) return;
-  isolate->heap()->CollectAllAvailableGarbage();
+  isolate->heap()->CollectAllAvailableGarbage("low memory notification");
 }
 
 
index f6c4d6c..f2c4dda 100644 (file)
@@ -1904,7 +1904,8 @@ void Debug::PrepareForBreakPoints() {
     {
       // We are going to iterate heap to find all functions without
       // debug break slots.
-      isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+      isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                                          "preparing for breakpoints");
 
       // Ensure no GC in this scope as we are going to use gc_metadata
       // field in the Code object to mark active functions.
@@ -2230,8 +2231,9 @@ void Debug::CreateScriptCache() {
   // rid of all the cached script wrappers and the second gets rid of the
   // scripts which are no longer referenced.  The second also sweeps precisely,
   // which saves us doing yet another GC to make the heap iterable.
-  heap->CollectAllGarbage(Heap::kNoGCFlags);
-  heap->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  heap->CollectAllGarbage(Heap::kNoGCFlags, "Debug::CreateScriptCache");
+  heap->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                          "Debug::CreateScriptCache");
 
   ASSERT(script_cache_ == NULL);
   script_cache_ = new ScriptCache();
@@ -2281,7 +2283,8 @@ Handle<FixedArray> Debug::GetLoadedScripts() {
 
   // Perform GC to get unreferenced scripts evicted from the cache before
   // returning the content.
-  isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+  isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+                                      "Debug::GetLoadedScripts");
 
   // Get the scripts from the cache.
   return script_cache_->GetScripts();
index 8a0242f..71e8ea3 100644 (file)
@@ -877,7 +877,7 @@ MaybeObject* Execution::HandleStackGuardInterrupt() {
   StackGuard* stack_guard = isolate->stack_guard();
 
   if (stack_guard->IsGCRequest()) {
-    isolate->heap()->CollectAllGarbage(false);
+    isolate->heap()->CollectAllGarbage(false, "StackGuard GC request");
     stack_guard->Continue(GC_REQUEST);
   }
 
index 54c8cdc..573797e 100644 (file)
@@ -40,7 +40,7 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
 
 
 v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
-  HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
   return v8::Undefined();
 }
 
index 4c27095..39cdf13 100644 (file)
@@ -438,8 +438,10 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
 }
 
 
-bool Heap::CollectGarbage(AllocationSpace space) {
-  return CollectGarbage(space, SelectGarbageCollector(space));
+bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) {
+  const char* collector_reason = NULL;
+  GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+  return CollectGarbage(space, collector, gc_reason, collector_reason);
 }
 
 
@@ -474,7 +476,7 @@ int Heap::AdjustAmountOfExternalAllocatedMemory(int change_in_bytes) {
         amount_of_external_allocated_memory_ -
         amount_of_external_allocated_memory_at_last_global_gc_;
     if (amount_since_last_global_gc > external_allocation_limit_) {
-      CollectAllGarbage(kNoGCFlags);
+      CollectAllGarbage(kNoGCFlags, "external memory allocation limit reached");
     }
   } else {
     // Avoid underflow.
@@ -523,7 +525,8 @@ Isolate* Heap::isolate() {
     }                                                                     \
     if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY;                \
     ISOLATE->heap()->CollectGarbage(Failure::cast(__maybe_object__)->     \
-                                    allocation_space());                  \
+                                    allocation_space(),                   \
+                                    "allocation failure");                \
     __maybe_object__ = FUNCTION_CALL;                                     \
     if (__maybe_object__->ToObject(&__object__)) RETURN_VALUE;            \
     if (__maybe_object__->IsOutOfMemory()) {                              \
@@ -531,7 +534,7 @@ Isolate* Heap::isolate() {
     }                                                                     \
     if (!__maybe_object__->IsRetryAfterGC()) RETURN_EMPTY;                \
     ISOLATE->counters()->gc_last_resort_from_handles()->Increment();      \
-    ISOLATE->heap()->CollectAllAvailableGarbage();                        \
+    ISOLATE->heap()->CollectAllAvailableGarbage("last resort gc");        \
     {                                                                     \
       AlwaysAllocateScope __scope__;                                      \
       __maybe_object__ = FUNCTION_CALL;                                   \
index 3b77714..4c7e33d 100644 (file)
@@ -236,16 +236,19 @@ int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
 }
 
 
-GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
+GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
+                                              const char** reason) {
   // Is global GC requested?
   if (space != NEW_SPACE || FLAG_gc_global) {
     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
+    *reason = "GC in old space requested";
     return MARK_COMPACTOR;
   }
 
   // Is enough data promoted to justify a global GC?
   if (OldGenerationPromotionLimitReached()) {
     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
+    *reason = "promotion limit reached";
     return MARK_COMPACTOR;
   }
 
@@ -253,6 +256,7 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
   if (old_gen_exhausted_) {
     isolate_->counters()->
         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
+    *reason = "old generations exhausted";
     return MARK_COMPACTOR;
   }
 
@@ -268,10 +272,12 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
     isolate_->counters()->
         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
+    *reason = "scavenge might not succeed";
     return MARK_COMPACTOR;
   }
 
   // Default
+  *reason = NULL;
   return SCAVENGER;
 }
 
@@ -431,17 +437,17 @@ void Heap::GarbageCollectionEpilogue() {
 }
 
 
-void Heap::CollectAllGarbage(int flags) {
+void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
   mark_compact_collector_.SetFlags(flags);
-  CollectGarbage(OLD_POINTER_SPACE);
+  CollectGarbage(OLD_POINTER_SPACE, gc_reason);
   mark_compact_collector_.SetFlags(kNoGCFlags);
 }
 
 
-void Heap::CollectAllAvailableGarbage() {
+void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
   // Since we are ignoring the return value, the exact choice of space does
   // not matter, so long as we do not specify NEW_SPACE, which would not
   // cause a full GC.
@@ -453,11 +459,12 @@ void Heap::CollectAllAvailableGarbage() {
   // Note: as weak callbacks can execute arbitrary code, we cannot
   // hope that eventually there will be no weak callbacks invocations.
   // Therefore stop recollecting after several attempts.
-  mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
+  mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
+                                     kReduceMemoryFootprintMask);
   isolate_->compilation_cache()->Clear();
   const int kMaxNumberOfAttempts = 7;
   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
-    if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
+    if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
       break;
     }
   }
@@ -469,7 +476,10 @@ void Heap::CollectAllAvailableGarbage() {
 }
 
 
-bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
+bool Heap::CollectGarbage(AllocationSpace space,
+                          GarbageCollector collector,
+                          const char* gc_reason,
+                          const char* collector_reason) {
   // The VM is in the GC state until exiting this function.
   VMState state(isolate_, GC);
 
@@ -497,11 +507,12 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
       PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
     }
     collector = SCAVENGER;
+    collector_reason = "incremental marking delaying mark-sweep";
   }
 
   bool next_gc_likely_to_collect_more = false;
 
-  { GCTracer tracer(this);
+  { GCTracer tracer(this, gc_reason, collector_reason);
     GarbageCollectionPrologue();
     // The GC count was incremented in the prologue.  Tell the tracer about
     // it.
@@ -533,7 +544,7 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
 
 
 void Heap::PerformScavenge() {
-  GCTracer tracer(this);
+  GCTracer tracer(this, NULL, NULL);
   if (incremental_marking()->IsStopped()) {
     PerformGarbageCollection(SCAVENGER, &tracer);
   } else {
@@ -588,27 +599,33 @@ void Heap::ReserveSpace(
   while (gc_performed && counter++ < kThreshold) {
     gc_performed = false;
     if (!new_space->ReserveSpace(new_space_size)) {
-      Heap::CollectGarbage(NEW_SPACE);
+      Heap::CollectGarbage(NEW_SPACE,
+                           "failed to reserve space in the new space");
       gc_performed = true;
     }
     if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
-      Heap::CollectGarbage(OLD_POINTER_SPACE);
+      Heap::CollectGarbage(OLD_POINTER_SPACE,
+                           "failed to reserve space in the old pointer space");
       gc_performed = true;
     }
     if (!(old_data_space->ReserveSpace(data_space_size))) {
-      Heap::CollectGarbage(OLD_DATA_SPACE);
+      Heap::CollectGarbage(OLD_DATA_SPACE,
+                           "failed to reserve space in the old data space");
       gc_performed = true;
     }
     if (!(code_space->ReserveSpace(code_space_size))) {
-      Heap::CollectGarbage(CODE_SPACE);
+      Heap::CollectGarbage(CODE_SPACE,
+                           "failed to reserve space in the code space");
       gc_performed = true;
     }
     if (!(map_space->ReserveSpace(map_space_size))) {
-      Heap::CollectGarbage(MAP_SPACE);
+      Heap::CollectGarbage(MAP_SPACE,
+                           "failed to reserve space in the map space");
       gc_performed = true;
     }
     if (!(cell_space->ReserveSpace(cell_space_size))) {
-      Heap::CollectGarbage(CELL_SPACE);
+      Heap::CollectGarbage(CELL_SPACE,
+                           "failed to reserve space in the cell space");
       gc_performed = true;
     }
     // We add a slack-factor of 2 in order to have space for a series of
@@ -620,7 +637,8 @@ void Heap::ReserveSpace(
     large_object_size += cell_space_size + map_space_size + code_space_size +
         data_space_size + pointer_space_size;
     if (!(lo_space->ReserveSpace(large_object_size))) {
-      Heap::CollectGarbage(LO_SPACE);
+      Heap::CollectGarbage(LO_SPACE,
+                           "failed to reserve space in the large object space");
       gc_performed = true;
     }
   }
@@ -4742,7 +4760,7 @@ bool Heap::IsHeapIterable() {
 void Heap::EnsureHeapIsIterable() {
   ASSERT(IsAllocationAllowed());
   if (!IsHeapIterable()) {
-    CollectAllGarbage(kMakeHeapIterableMask);
+    CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
   }
   ASSERT(IsHeapIterable());
 }
@@ -4812,7 +4830,7 @@ bool Heap::IdleNotification(int hint) {
       isolate_->compilation_cache()->Clear();
       uncommit = true;
     }
-    CollectAllGarbage(kNoGCFlags);
+    CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
     gc_count_at_last_idle_gc_ = gc_count_;
     if (uncommit) {
       new_space_.Shrink();
@@ -4853,9 +4871,10 @@ bool Heap::IdleGlobalGC() {
   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
     if (contexts_disposed_ > 0) {
       HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(kNoGCFlags);
+      CollectAllGarbage(kReduceMemoryFootprintMask,
+                        "idle notification: contexts disposed");
     } else {
-      CollectGarbage(NEW_SPACE);
+      CollectGarbage(NEW_SPACE, "idle notification");
     }
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
@@ -4865,12 +4884,12 @@ bool Heap::IdleGlobalGC() {
     // generated code for cached functions.
     isolate_->compilation_cache()->Clear();
 
-    CollectAllGarbage(kNoGCFlags);
+    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
 
   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
-    CollectAllGarbage(kNoGCFlags);
+    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
     new_space_.Shrink();
     last_idle_notification_gc_count_ = gc_count_;
     number_idle_notifications_ = 0;
@@ -4880,7 +4899,8 @@ bool Heap::IdleGlobalGC() {
       contexts_disposed_ = 0;
     } else {
       HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(kNoGCFlags);
+      CollectAllGarbage(kReduceMemoryFootprintMask,
+                        "idle notification: contexts disposed");
       last_idle_notification_gc_count_ = gc_count_;
     }
     // If this is the first idle notification, we reset the
@@ -6511,7 +6531,9 @@ static intptr_t CountTotalHolesSize() {
 }
 
 
-GCTracer::GCTracer(Heap* heap)
+GCTracer::GCTracer(Heap* heap,
+                   const char* gc_reason,
+                   const char* collector_reason)
     : start_time_(0.0),
       start_object_size_(0),
       start_memory_size_(0),
@@ -6520,7 +6542,9 @@ GCTracer::GCTracer(Heap* heap)
       allocated_since_last_gc_(0),
       spent_in_mutator_(0),
       promoted_objects_size_(0),
-      heap_(heap) {
+      heap_(heap),
+      gc_reason_(gc_reason),
+      collector_reason_(collector_reason) {
   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
   start_time_ = OS::TimeCurrentMillis();
   start_object_size_ = heap_->SizeOfObjects();
@@ -6599,6 +6623,15 @@ GCTracer::~GCTracer() {
                longest_step_);
       }
     }
+
+    if (gc_reason_ != NULL) {
+      PrintF(" [%s]", gc_reason_);
+    }
+
+    if (collector_reason_ != NULL) {
+      PrintF(" [%s]", collector_reason_);
+    }
+
     PrintF(".\n");
   } else {
     PrintF("pause=%d ", time);
index 4b0c05f..67818d2 100644 (file)
@@ -1025,23 +1025,28 @@ class Heap {
   // Performs garbage collection operation.
   // Returns whether there is a chance that another major GC could
   // collect more garbage.
-  bool CollectGarbage(AllocationSpace space, GarbageCollector collector);
+  bool CollectGarbage(AllocationSpace space,
+                      GarbageCollector collector,
+                      const char* gc_reason,
+                      const char* collector_reason);
 
   // Performs garbage collection operation.
   // Returns whether there is a chance that another major GC could
   // collect more garbage.
-  inline bool CollectGarbage(AllocationSpace space);
+  inline bool CollectGarbage(AllocationSpace space,
+                             const char* gc_reason = NULL);
 
   static const int kNoGCFlags = 0;
   static const int kMakeHeapIterableMask = 1;
+  static const int kReduceMemoryFootprintMask = 2;
 
   // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
   // non-zero, then the slower precise sweeper is used, which leaves the heap
   // in a state where we can iterate over the heap visiting all objects.
-  void CollectAllGarbage(int flags);
+  void CollectAllGarbage(int flags, const char* gc_reason = NULL);
 
   // Last hope GC, should try to squeeze as much as possible.
-  void CollectAllAvailableGarbage();
+  void CollectAllAvailableGarbage(const char* gc_reason = NULL);
 
   // Check whether the heap is currently iterable.
   bool IsHeapIterable();
@@ -1741,7 +1746,8 @@ class Heap {
   }
 
   // Checks whether a global GC is necessary
-  GarbageCollector SelectGarbageCollector(AllocationSpace space);
+  GarbageCollector SelectGarbageCollector(AllocationSpace space,
+                                          const char** reason);
 
   // Performs garbage collection
   // Returns whether there is a chance another major GC could
@@ -2364,7 +2370,9 @@ class GCTracer BASE_EMBEDDED {
     double start_time_;
   };
 
-  explicit GCTracer(Heap* heap);
+  explicit GCTracer(Heap* heap,
+                    const char* gc_reason,
+                    const char* collector_reason);
   ~GCTracer();
 
   // Sets the collector.
@@ -2432,6 +2440,9 @@ class GCTracer BASE_EMBEDDED {
   double steps_took_since_last_gc_;
 
   Heap* heap_;
+
+  const char* gc_reason_;
+  const char* collector_reason_;
 };
 
 
index 5e82872..3979719 100644 (file)
@@ -1521,7 +1521,8 @@ void Logger::LowLevelLogWriteBytes(const char* bytes, int size) {
 
 
 void Logger::LogCodeObjects() {
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                          "Logger::LogCodeObjects");
   HeapIterator iterator;
   AssertNoAllocation no_alloc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
@@ -1576,7 +1577,8 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared,
 
 
 void Logger::LogCompiledFunctions() {
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                          "Logger::LogCompiledFunctions");
   HandleScope scope;
   const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL);
   ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count);
@@ -1595,7 +1597,8 @@ void Logger::LogCompiledFunctions() {
 
 
 void Logger::LogAccessorCallbacks() {
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                          "Logger::LogAccessorCallbacks");
   HeapIterator iterator;
   AssertNoAllocation no_alloc;
   for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
index a9107bd..fd25a40 100644 (file)
@@ -46,6 +46,7 @@ MarkBit Marking::MarkBitFrom(Address addr) {
 
 void MarkCompactCollector::SetFlags(int flags) {
   sweep_precisely_ = ((flags & Heap::kMakeHeapIterableMask) != 0);
+  reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
 }
 
 
index 8cd9d02..124e439 100644 (file)
@@ -230,6 +230,18 @@ void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
 }
 
 
+static void TraceFragmentation(PagedSpace* space) {
+  int number_of_pages = space->CountTotalPages();
+  intptr_t reserved = (number_of_pages * Page::kObjectAreaSize);
+  intptr_t free = reserved - space->SizeOfObjects();
+  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
+         AllocationSpaceName(space->identity()),
+         number_of_pages,
+         static_cast<int>(free),
+         static_cast<double>(free) * 100 / reserved);
+}
+
+
 bool MarkCompactCollector::StartCompaction() {
   if (!compacting_) {
     ASSERT(evacuation_candidates_.length() == 0);
@@ -239,6 +251,13 @@ bool MarkCompactCollector::StartCompaction() {
 
     if (FLAG_compact_code_space) {
       CollectEvacuationCandidates(heap()->code_space());
+    } else if (FLAG_trace_fragmentation) {
+      TraceFragmentation(heap()->code_space());
+    }
+
+    if (FLAG_trace_fragmentation) {
+      TraceFragmentation(heap()->map_space());
+      TraceFragmentation(heap()->cell_space());
     }
 
     heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
@@ -414,6 +433,65 @@ const char* AllocationSpaceName(AllocationSpace space) {
 }
 
 
+// Returns zero for pages that have so little fragmentation that it is not
+// worth defragmenting them.  Otherwise a positive integer that gives an
+// estimate of fragmentation on an arbitrary scale.
+static int FreeListFragmentation(PagedSpace* space, Page* p) {
+  // If page was not swept then there are no free list items on it.
+  if (!p->WasSwept()) {
+    if (FLAG_trace_fragmentation) {
+      PrintF("%p [%s]: %d bytes live (unswept)\n",
+             reinterpret_cast<void*>(p),
+             AllocationSpaceName(space->identity()),
+             p->LiveBytes());
+    }
+    return 0;
+  }
+
+  FreeList::SizeStats sizes;
+  space->CountFreeListItems(p, &sizes);
+
+  intptr_t ratio;
+  intptr_t ratio_threshold;
+  if (space->identity() == CODE_SPACE) {
+    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
+        Page::kObjectAreaSize;
+    ratio_threshold = 10;
+  } else {
+    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
+        Page::kObjectAreaSize;
+    ratio_threshold = 15;
+  }
+
+  if (FLAG_trace_fragmentation) {
+    PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
+           reinterpret_cast<void*>(p),
+           AllocationSpaceName(space->identity()),
+           static_cast<int>(sizes.small_size_),
+           static_cast<double>(sizes.small_size_ * 100) /
+           Page::kObjectAreaSize,
+           static_cast<int>(sizes.medium_size_),
+           static_cast<double>(sizes.medium_size_ * 100) /
+           Page::kObjectAreaSize,
+           static_cast<int>(sizes.large_size_),
+           static_cast<double>(sizes.large_size_ * 100) /
+           Page::kObjectAreaSize,
+           static_cast<int>(sizes.huge_size_),
+           static_cast<double>(sizes.huge_size_ * 100) /
+           Page::kObjectAreaSize,
+           (ratio > ratio_threshold) ? "[fragmented]" : "");
+  }
+
+  if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
+    return 1;
+  }
+
+  if (ratio <= ratio_threshold) return 0;  // Not fragmented.
+
+  return static_cast<int>(ratio - ratio_threshold);
+}
+
+
 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
   ASSERT(space->identity() == OLD_POINTER_SPACE ||
          space->identity() == OLD_DATA_SPACE ||
@@ -421,7 +499,6 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
 
   int number_of_pages = space->CountTotalPages();
 
-  PageIterator it(space);
   const int kMaxMaxEvacuationCandidates = 1000;
   int max_evacuation_candidates = Min(
     kMaxMaxEvacuationCandidates,
@@ -444,22 +521,89 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
     Page* page_;
   };
 
+  enum CompactionMode {
+    COMPACT_FREE_LISTS,
+    REDUCE_MEMORY_FOOTPRINT
+  };
+
+  CompactionMode mode = COMPACT_FREE_LISTS;
+
+  intptr_t reserved = number_of_pages * Page::kObjectAreaSize;
+  intptr_t over_reserved = reserved - space->SizeOfObjects();
+  static const intptr_t kFreenessThreshold = 50;
+
+  if (over_reserved >= 2 * Page::kObjectAreaSize &&
+      reduce_memory_footprint_) {
+    mode = REDUCE_MEMORY_FOOTPRINT;
+
+    // We expect that empty pages are easier to compact so slightly bump the
+    // limit.
+    max_evacuation_candidates += 2;
+
+    if (FLAG_trace_fragmentation) {
+      PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
+             static_cast<double>(over_reserved) / MB,
+             kFreenessThreshold);
+    }
+  }
+
+  intptr_t estimated_release = 0;
+
   Candidate candidates[kMaxMaxEvacuationCandidates];
 
   int count = 0;
-  if (it.has_next()) it.next();  // Never compact the first page.
   int fragmentation = 0;
   Candidate* least = NULL;
+
+  PageIterator it(space);
+  if (it.has_next()) it.next();  // Never compact the first page.
+
   while (it.has_next()) {
     Page* p = it.next();
     p->ClearEvacuationCandidate();
+
     if (FLAG_stress_compaction) {
       int counter = space->heap()->ms_count();
       uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
       if ((counter & 1) == (page_number & 1)) fragmentation = 1;
+    } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
+      // Don't try to release too many pages.
+      if (estimated_release >= ((over_reserved * 3) / 4)) {
+        continue;
+      }
+
+      intptr_t free_bytes = 0;
+
+      if (!p->WasSwept()) {
+        free_bytes = (Page::kObjectAreaSize - p->LiveBytes());
+      } else {
+        FreeList::SizeStats sizes;
+        space->CountFreeListItems(p, &sizes);
+        free_bytes = sizes.Total();
+      }
+
+      int free_pct = static_cast<int>(free_bytes * 100 / Page::kObjectAreaSize);
+
+      if (free_pct >= kFreenessThreshold) {
+        estimated_release += Page::kObjectAreaSize +
+            (Page::kObjectAreaSize - free_bytes);
+        fragmentation = free_pct;
+      } else {
+        fragmentation = 0;
+      }
+
+      if (FLAG_trace_fragmentation) {
+        PrintF("%p [%s]: %d (%.2f%%) free %s\n",
+               reinterpret_cast<void*>(p),
+               AllocationSpaceName(space->identity()),
+               free_bytes,
+               static_cast<double>(free_bytes * 100) / Page::kObjectAreaSize,
+               (fragmentation > 0) ? "[fragmented]" : "");
+      }
     } else {
-      fragmentation = space->Fragmentation(p);
+      fragmentation = FreeListFragmentation(space, p);
     }
+
     if (fragmentation != 0) {
       if (count < max_evacuation_candidates) {
         candidates[count++] = Candidate(fragmentation, p);
@@ -479,6 +623,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
       }
     }
   }
+
   for (int i = 0; i < count; i++) {
     AddEvacuationCandidate(candidates[i].page());
   }
@@ -3242,6 +3387,8 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
     p->set_scan_on_scavenge(false);
     slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
     p->ClearEvacuationCandidate();
+    p->ResetLiveBytes();
+    space->ReleasePage(p);
   }
   evacuation_candidates_.Rewind(0);
   compacting_ = false;
index a911b49..135f220 100644 (file)
@@ -374,7 +374,7 @@ class SlotsBuffer {
   static const int kNumberOfElements = 1021;
 
  private:
-  static const int kChainLengthThreshold = 6;
+  static const int kChainLengthThreshold = 15;
 
   intptr_t idx_;
   intptr_t chain_length_;
@@ -572,6 +572,8 @@ class MarkCompactCollector {
   // heap.
   bool sweep_precisely_;
 
+  bool reduce_memory_footprint_;
+
   // True if we are collecting slots to perform evacuation from evacuation
   // candidates.
   bool compacting_;
index 82871c0..d1620bf 100644 (file)
@@ -312,7 +312,7 @@ int main(int argc, char** argv) {
   }
   // If we don't do this then we end up with a stray root pointing at the
   // context even after we have disposed of the context.
-  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+  HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot");
   i::Object* raw_context = *(v8::Utils::OpenHandle(*context));
   context.Dispose();
   CppByteSink sink(argv[1]);
index 614287f..70f881d 100644 (file)
@@ -1562,7 +1562,8 @@ void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
 
 Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById(uint64_t id) {
   // First perform a full GC in order to avoid dead objects.
-  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                          "HeapSnapshotsCollection::FindHeapObjectById");
   AssertNoAllocation no_allocation;
   HeapObject* object = NULL;
   HeapIterator iterator(HeapIterator::kFilterUnreachable);
@@ -3056,8 +3057,12 @@ bool HeapSnapshotGenerator::GenerateSnapshot() {
   // full GC is reachable from the root when computing dominators.
   // This is not true for weakly reachable objects.
   // As a temporary solution we call GC twice.
-  Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
-  Isolate::Current()->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  Isolate::Current()->heap()->CollectAllGarbage(
+      Heap::kMakeHeapIterableMask,
+      "HeapSnapshotGenerator::GenerateSnapshot");
+  Isolate::Current()->heap()->CollectAllGarbage(
+      Heap::kMakeHeapIterableMask,
+      "HeapSnapshotGenerator::GenerateSnapshot");
 
 #ifdef DEBUG
   Heap* debug_heap = Isolate::Current()->heap();
index 7f3d318..308ef86 100644 (file)
@@ -12445,7 +12445,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) {
   ASSERT(args.length() == 3);
 
   // First perform a full GC in order to avoid references from dead objects.
-  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                                     "%DebugReferencedBy");
   // The heap iterator reserves the right to do a GC to make the heap iterable.
   // Due to the GC above we know it won't need to do that, but it seems cleaner
   // to get the heap iterator constructed before we start having unprotected
@@ -12536,7 +12537,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
   ASSERT(args.length() == 2);
 
   // First perform a full GC in order to avoid dead objects.
-  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask);
+  isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+                                     "%DebugConstructedBy");
 
   // Check parameters.
   CONVERT_CHECKED(JSFunction, constructor, args[0]);
@@ -12934,7 +12936,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) {
 // Performs a GC.
 // Presently, it only does a full GC.
 RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) {
-  isolate->heap()->CollectAllGarbage(true);
+  isolate->heap()->CollectAllGarbage(true, "%CollectGarbage");
   return isolate->heap()->undefined_value();
 }
 
@@ -13645,12 +13647,14 @@ void Runtime::PerformGC(Object* result) {
     }
     // Try to do a garbage collection; ignore it if it fails. The C
     // entry stub will throw an out-of-memory exception in that case.
-    isolate->heap()->CollectGarbage(failure->allocation_space());
+    isolate->heap()->CollectGarbage(failure->allocation_space(),
+                                    "Runtime::PerformGC");
   } else {
     // Handle last resort GC and make sure to allow future allocations
     // to grow the heap without causing GCs (if possible).
     isolate->counters()->gc_last_resort_from_js()->Increment();
-    isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags);
+    isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags,
+                                       "Runtime::PerformGC");
   }
 }
 
index 1a30078..0671b4a 100644 (file)
@@ -1589,50 +1589,8 @@ class PagedSpace : public Space {
   Page* FirstPage() { return anchor_.next_page(); }
   Page* LastPage() { return anchor_.prev_page(); }
 
-  // Returns zero for pages that have so little fragmentation that it is not
-  // worth defragmenting them.  Otherwise a positive integer that gives an
-  // estimate of fragmentation on an arbitrary scale.
-  int Fragmentation(Page* p) {
-    FreeList::SizeStats sizes;
-    free_list_.CountFreeListItems(p, &sizes);
-
-    intptr_t ratio;
-    intptr_t ratio_threshold;
-    if (identity() == CODE_SPACE) {
-      ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
-          Page::kObjectAreaSize;
-      ratio_threshold = 10;
-    } else {
-      ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
-          Page::kObjectAreaSize;
-      ratio_threshold = 15;
-    }
-
-    if (FLAG_trace_fragmentation) {
-      PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
-             reinterpret_cast<void*>(p),
-             identity(),
-             static_cast<int>(sizes.small_size_),
-             static_cast<double>(sizes.small_size_ * 100) /
-                 Page::kObjectAreaSize,
-             static_cast<int>(sizes.medium_size_),
-             static_cast<double>(sizes.medium_size_ * 100) /
-                 Page::kObjectAreaSize,
-             static_cast<int>(sizes.large_size_),
-             static_cast<double>(sizes.large_size_ * 100) /
-                 Page::kObjectAreaSize,
-             static_cast<int>(sizes.huge_size_),
-             static_cast<double>(sizes.huge_size_ * 100) /
-                 Page::kObjectAreaSize,
-             (ratio > ratio_threshold) ? "[fragmented]" : "");
-    }
-
-    if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
-      return 1;
-    }
-    if (ratio <= ratio_threshold) return 0;  // Not fragmented.
-
-    return static_cast<int>(ratio - ratio_threshold);
+  void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
+    free_list_.CountFreeListItems(p, sizes);
   }
 
   void EvictEvacuationCandidatesFromFreeLists();