Only sweep one page eagerly unless we are running out of space.
authorerik.corry@gmail.com <erik.corry@gmail.com@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Tue, 29 Nov 2011 11:56:14 +0000 (11:56 +0000)
committererik.corry@gmail.com <erik.corry@gmail.com@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Tue, 29 Nov 2011 11:56:14 +0000 (11:56 +0000)
Limit the number of pages that are compacted in a given GC.
Review URL: http://codereview.chromium.org/8692002

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10084 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/heap.cc
src/heap.h
src/mark-compact.cc
src/spaces.cc
src/spaces.h

index 34f5b8b..4a048a1 100644 (file)
@@ -6304,7 +6304,19 @@ GCTracer::~GCTracer() {
     PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
     PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
     PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
-    PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
+    PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES]));
+    PrintF("new_new=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]));
+    PrintF("root_new=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]));
+    PrintF("old_new=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]));
+    PrintF("compaction_ptrs=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]));
+    PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[
+        Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]));
+    PrintF("misc_compaction=%d ",
+           static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS]));
 
     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
index 081c466..3116895 100644 (file)
@@ -2259,7 +2259,13 @@ class GCTracer BASE_EMBEDDED {
       MC_MARK,
       MC_SWEEP,
       MC_SWEEP_NEWSPACE,
-      MC_COMPACT,
+      MC_EVACUATE_PAGES,
+      MC_UPDATE_NEW_TO_NEW_POINTERS,
+      MC_UPDATE_ROOT_TO_NEW_POINTERS,
+      MC_UPDATE_OLD_TO_NEW_POINTERS,
+      MC_UPDATE_POINTERS_TO_EVACUATED,
+      MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
+      MC_UPDATE_MISC_POINTERS,
       MC_FLUSH_CODE,
       kNumberOfScopes
     };
index 493fe66..6064060 100644 (file)
@@ -418,26 +418,69 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
          space->identity() == OLD_DATA_SPACE ||
          space->identity() == CODE_SPACE);
 
+  int number_of_pages = space->CountTotalPages();
+
   PageIterator it(space);
+  const int kMaxMaxEvacuationCandidates = 1000;
+  int max_evacuation_candidates = Min(
+    kMaxMaxEvacuationCandidates,
+    static_cast<int>(sqrt(number_of_pages / 2) + 1));
+
+  if (FLAG_stress_compaction || FLAG_always_compact) {
+    max_evacuation_candidates = kMaxMaxEvacuationCandidates;
+  }
+
+  class Candidate {
+   public:
+    Candidate() : fragmentation_(0), page_(NULL) { }
+    Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
+
+    int fragmentation() { return fragmentation_; }
+    Page* page() { return page_; }
+
+   private:
+    int fragmentation_;
+    Page* page_;
+  };
+
+  Candidate candidates[kMaxMaxEvacuationCandidates];
+
   int count = 0;
   if (it.has_next()) it.next();  // Never compact the first page.
+  int fragmentation = 0;
+  Candidate* least = NULL;
   while (it.has_next()) {
     Page* p = it.next();
-    bool evacuate = false;
+    p->ClearEvacuationCandidate();
     if (FLAG_stress_compaction) {
       int counter = space->heap()->ms_count();
       uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
-      if ((counter & 1) == (page_number & 1)) evacuate = true;
+      if ((counter & 1) == (page_number & 1)) fragmentation = 1;
     } else {
-      if (space->IsFragmented(p)) evacuate = true;
+      fragmentation = space->Fragmentation(p);
     }
-    if (evacuate) {
-      AddEvacuationCandidate(p);
-      count++;
-    } else {
-      p->ClearEvacuationCandidate();
+    if (fragmentation != 0) {
+      if (count < max_evacuation_candidates) {
+        candidates[count++] = Candidate(fragmentation, p);
+      } else {
+        if (least == NULL) {
+          for (int i = 0; i < max_evacuation_candidates; i++) {
+            if (least == NULL ||
+                candidates[i].fragmentation() < least->fragmentation()) {
+              least = candidates + i;
+            }
+          }
+        }
+        if (least->fragmentation() < fragmentation) {
+          *least = Candidate(fragmentation, p);
+          least = NULL;
+        }
+      }
     }
   }
+  for (int i = 0; i < count; i++) {
+    AddEvacuationCandidate(candidates[i].page());
+  }
 
   if (count > 0 && FLAG_trace_fragmentation) {
     PrintF("Collected %d evacuation candidates for space %s\n",
@@ -2953,109 +2996,131 @@ void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
 
 
 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
-  bool code_slots_filtering_required = MarkInvalidatedCode();
+  bool code_slots_filtering_required;
+  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
+    code_slots_filtering_required = MarkInvalidatedCode();
+
+    EvacuateNewSpace();
+  }
 
-  EvacuateNewSpace();
-  EvacuatePages();
+
+  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
+    EvacuatePages();
+  }
 
   // Second pass: find pointers to new space and update them.
   PointersUpdatingVisitor updating_visitor(heap());
 
-  // Update pointers in to space.
-  SemiSpaceIterator to_it(heap()->new_space()->bottom(),
-                          heap()->new_space()->top());
-  for (HeapObject* object = to_it.Next();
-       object != NULL;
-       object = to_it.Next()) {
-    Map* map = object->map();
-    object->IterateBody(map->instance_type(),
-                        object->SizeFromMap(map),
-                        &updating_visitor);
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
+    // Update pointers in to space.
+    SemiSpaceIterator to_it(heap()->new_space()->bottom(),
+                            heap()->new_space()->top());
+    for (HeapObject* object = to_it.Next();
+         object != NULL;
+         object = to_it.Next()) {
+      Map* map = object->map();
+      object->IterateBody(map->instance_type(),
+                          object->SizeFromMap(map),
+                          &updating_visitor);
+    }
   }
 
-  // Update roots.
-  heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
-  LiveObjectList::IterateElements(&updating_visitor);
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
+    // Update roots.
+    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
+    LiveObjectList::IterateElements(&updating_visitor);
+  }
 
-  {
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
     StoreBufferRebuildScope scope(heap_,
                                   heap_->store_buffer(),
                                   &Heap::ScavengeStoreBufferCallback);
     heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
   }
 
-  SlotsBuffer::UpdateSlotsRecordedIn(heap_,
-                                     migration_slots_buffer_,
-                                     code_slots_filtering_required);
-  if (FLAG_trace_fragmentation) {
-    PrintF("  migration slots buffer: %d\n",
-           SlotsBuffer::SizeOfChain(migration_slots_buffer_));
-  }
-
-  if (compacting_ && was_marked_incrementally_) {
-    // It's difficult to filter out slots recorded for large objects.
-    LargeObjectIterator it(heap_->lo_space());
-    for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-      // LargeObjectSpace is not swept yet thus we have to skip
-      // dead objects explicitly.
-      if (!IsMarked(obj)) continue;
-
-      Page* p = Page::FromAddress(obj->address());
-      if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-        obj->Iterate(&updating_visitor);
-        p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+  { GCTracer::Scope gc_scope(tracer_,
+                             GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
+    SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+                                       migration_slots_buffer_,
+                                       code_slots_filtering_required);
+    if (FLAG_trace_fragmentation) {
+      PrintF("  migration slots buffer: %d\n",
+             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
+    }
+
+    if (compacting_ && was_marked_incrementally_) {
+      // It's difficult to filter out slots recorded for large objects.
+      LargeObjectIterator it(heap_->lo_space());
+      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+        // LargeObjectSpace is not swept yet thus we have to skip
+        // dead objects explicitly.
+        if (!IsMarked(obj)) continue;
+
+        Page* p = Page::FromAddress(obj->address());
+        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+          obj->Iterate(&updating_visitor);
+          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+        }
       }
     }
   }
 
   int npages = evacuation_candidates_.length();
-  for (int i = 0; i < npages; i++) {
-    Page* p = evacuation_candidates_[i];
-    ASSERT(p->IsEvacuationCandidate() ||
-           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
-
-    if (p->IsEvacuationCandidate()) {
-      SlotsBuffer::UpdateSlotsRecordedIn(heap_,
-                                         p->slots_buffer(),
-                                         code_slots_filtering_required);
-      if (FLAG_trace_fragmentation) {
-        PrintF("  page %p slots buffer: %d\n",
-               reinterpret_cast<void*>(p),
-               SlotsBuffer::SizeOfChain(p->slots_buffer()));
-      }
-
-      // Important: skip list should be cleared only after roots were updated
-      // because root iteration traverses the stack and might have to find code
-      // objects from non-updated pc pointing into evacuation candidate.
-      SkipList* list = p->skip_list();
-      if (list != NULL) list->Clear();
-    } else {
-      if (FLAG_gc_verbose) {
-        PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
-               reinterpret_cast<intptr_t>(p));
-      }
-      PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+  { GCTracer::Scope gc_scope(
+      tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+    for (int i = 0; i < npages; i++) {
+      Page* p = evacuation_candidates_[i];
+      ASSERT(p->IsEvacuationCandidate() ||
+             p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+
+      if (p->IsEvacuationCandidate()) {
+        SlotsBuffer::UpdateSlotsRecordedIn(heap_,
+                                           p->slots_buffer(),
+                                           code_slots_filtering_required);
+        if (FLAG_trace_fragmentation) {
+          PrintF("  page %p slots buffer: %d\n",
+                 reinterpret_cast<void*>(p),
+                 SlotsBuffer::SizeOfChain(p->slots_buffer()));
+        }
 
-      switch (space->identity()) {
-        case OLD_DATA_SPACE:
-          SweepConservatively(space, p);
-          break;
-        case OLD_POINTER_SPACE:
-          SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
-              space, p, &updating_visitor);
-          break;
-        case CODE_SPACE:
-          SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
-              space, p, &updating_visitor);
-          break;
-        default:
-          UNREACHABLE();
-          break;
+        // Important: skip list should be cleared only after roots were updated
+        // because root iteration traverses the stack and might have to find
+        // code objects from non-updated pc pointing into evacuation candidate.
+        SkipList* list = p->skip_list();
+        if (list != NULL) list->Clear();
+      } else {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+        p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+
+        switch (space->identity()) {
+          case OLD_DATA_SPACE:
+            SweepConservatively(space, p);
+            break;
+          case OLD_POINTER_SPACE:
+            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
+                space, p, &updating_visitor);
+            break;
+          case CODE_SPACE:
+            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
+                space, p, &updating_visitor);
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
       }
     }
   }
 
+  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
+
   // Update pointers from cells.
   HeapObjectIterator cell_iterator(heap_->cell_space());
   for (HeapObject* cell = cell_iterator.Next();
@@ -3482,8 +3547,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
 }
 
 
-void MarkCompactCollector::SweepSpace(PagedSpace* space,
-                                      SweeperType sweeper) {
+void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
   space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
                                       sweeper == LAZY_CONSERVATIVE);
 
@@ -3492,10 +3556,16 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space,
   PageIterator it(space);
 
   intptr_t freed_bytes = 0;
+  int pages_swept = 0;
   intptr_t newspace_size = space->heap()->new_space()->Size();
   bool lazy_sweeping_active = false;
   bool unused_page_present = false;
 
+  intptr_t old_space_size = heap()->PromotedSpaceSize();
+  intptr_t space_left =
+      Min(heap()->OldGenPromotionLimit(old_space_size),
+          heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
+
   while (it.has_next()) {
     Page* p = it.next();
 
@@ -3534,31 +3604,45 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space,
       unused_page_present = true;
     }
 
-    if (FLAG_gc_verbose) {
-      PrintF("Sweeping 0x%" V8PRIxPTR " with sweeper %d.\n",
-             reinterpret_cast<intptr_t>(p),
-             sweeper);
-    }
-
     switch (sweeper) {
       case CONSERVATIVE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
         SweepConservatively(space, p);
+        pages_swept++;
         break;
       }
       case LAZY_CONSERVATIVE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
         freed_bytes += SweepConservatively(space, p);
-        if (freed_bytes >= newspace_size && p != space->LastPage()) {
+        pages_swept++;
+        if (space_left + freed_bytes > newspace_size) {
           space->SetPagesToSweep(p->next_page());
           lazy_sweeping_active = true;
+        } else {
+          if (FLAG_gc_verbose) {
+            PrintF("Only %" V8PRIdPTR " bytes freed.  Still sweeping.\n",
+                   freed_bytes);
+          }
         }
         break;
       }
       case PRECISE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
         if (space->identity() == CODE_SPACE) {
           SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
         } else {
           SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
         }
+        pages_swept++;
         break;
       }
       default: {
@@ -3567,6 +3651,12 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space,
     }
   }
 
+  if (FLAG_gc_verbose) {
+    PrintF("SweepSpace: %s (%d pages swept)\n",
+           AllocationSpaceName(space->identity()),
+           pages_swept);
+  }
+
   // Give pages that are queued to be freed back to the OS.
   heap()->FreeQueuedChunks();
 }
@@ -3593,9 +3683,7 @@ void MarkCompactCollector::SweepSpaces() {
 
   SweepSpace(heap()->cell_space(), PRECISE);
 
-  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
-    EvacuateNewSpaceAndCandidates();
-  }
+  EvacuateNewSpaceAndCandidates();
 
   // ClearNonLiveTransitions depends on precise sweeping of map space to
   // detect whether unmarked map became dead in this collection or in one
index 53f6ff3..ee7c091 100644 (file)
@@ -738,7 +738,6 @@ bool PagedSpace::Expand() {
 }
 
 
-#ifdef DEBUG
 int PagedSpace::CountTotalPages() {
   PageIterator it(this);
   int count = 0;
@@ -748,7 +747,6 @@ int PagedSpace::CountTotalPages() {
   }
   return count;
 }
-#endif
 
 
 void PagedSpace::ReleasePage(Page* page) {
@@ -1853,6 +1851,13 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
   // skipped when scanning the heap.  This also puts it back in the free list
   // if it is big enough.
   owner_->Free(owner_->top(), old_linear_size);
+
+#ifdef DEBUG
+  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
+    reinterpret_cast<Object**>(new_node->address())[i] = Smi::FromInt(0);
+  }
+#endif
+
   owner_->heap()->incremental_marking()->OldSpaceStep(
       size_in_bytes - old_linear_size);
 
@@ -2443,8 +2448,17 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
   page->set_next_page(first_page_);
   first_page_ = page;
 
+  HeapObject* object = page->GetObject();
+
+#ifdef DEBUG
+  // Make the object consistent so the heap can be vefified in OldSpaceStep.
+  reinterpret_cast<Object**>(object->address())[0] =
+      heap()->fixed_array_map();
+  reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+#endif
+
   heap()->incremental_marking()->OldSpaceStep(object_size);
-  return page->GetObject();
+  return object;
 }
 
 
index 8a05659..d2899fe 100644 (file)
@@ -1557,6 +1557,7 @@ class PagedSpace : public Space {
   }
 
   void SetPagesToSweep(Page* first) {
+    if (first == &anchor_) first = NULL;
     first_unswept_page_ = first;
   }
 
@@ -1569,7 +1570,10 @@ class PagedSpace : public Space {
   Page* FirstPage() { return anchor_.next_page(); }
   Page* LastPage() { return anchor_.prev_page(); }
 
-  bool IsFragmented(Page* p) {
+  // Returns zero for pages that have so little fragmentation that it is not
+  // worth defragmenting them.  Otherwise a positive integer that gives an
+  // estimate of fragmentation on an arbitrary scale.
+  int Fragmentation(Page* p) {
     FreeList::SizeStats sizes;
     free_list_.CountFreeListItems(p, &sizes);
 
@@ -1604,14 +1608,21 @@ class PagedSpace : public Space {
              (ratio > ratio_threshold) ? "[fragmented]" : "");
     }
 
-    return (ratio > ratio_threshold) ||
-        (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize);
+    if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
+      return 1;
+    }
+    if (ratio <= ratio_threshold) return 0;  // Not fragmented.
+
+    return static_cast<int>(ratio - ratio_threshold);
   }
 
   void EvictEvacuationCandidatesFromFreeLists();
 
   bool CanExpand();
 
+  // Returns the number of total pages in this space.
+  int CountTotalPages();
+
  protected:
   // Maximum capacity of this space.
   intptr_t max_capacity_;
@@ -1649,11 +1660,6 @@ class PagedSpace : public Space {
   // Slow path of AllocateRaw.  This function is space-dependent.
   MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
 
-#ifdef DEBUG
-  // Returns the number of total pages in this space.
-  int CountTotalPages();
-#endif
-
   friend class PageIterator;
 };