Remove eager sweeping for lazy swept spaces. Try to find in SlowAllocateRaw a bounded...
authorhpayer@chromium.org <hpayer@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 26 Nov 2012 14:50:20 +0000 (14:50 +0000)
committerhpayer@chromium.org <hpayer@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 26 Nov 2012 14:50:20 +0000 (14:50 +0000)
BUG=v8:2194

Review URL: https://codereview.chromium.org/11420036

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13058 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/mark-compact.cc
src/spaces.cc

index 30abe6d..aa1900b 100644 (file)
@@ -3525,7 +3525,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
 
   intptr_t freed_bytes = 0;
   int pages_swept = 0;
-  intptr_t newspace_size = space->heap()->new_space()->Size();
   bool lazy_sweeping_active = false;
   bool unused_page_present = false;
 
@@ -3588,15 +3587,8 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
         }
         freed_bytes += SweepConservatively(space, p);
         pages_swept++;
-        if (freed_bytes > 2 * newspace_size) {
-          space->SetPagesToSweep(p->next_page());
-          lazy_sweeping_active = true;
-        } else {
-          if (FLAG_gc_verbose) {
-            PrintF("Only %" V8PRIdPTR " bytes freed.  Still sweeping.\n",
-                   freed_bytes);
-          }
-        }
+        space->SetPagesToSweep(p->next_page());
+        lazy_sweeping_active = true;
         break;
       }
       case PRECISE: {
index 0ac23d2..cacd969 100644 (file)
@@ -2391,10 +2391,13 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   // Allocation in this space has failed.
 
-  // If there are unswept pages advance lazy sweeper then sweep one page before
-  // allocating a new page.
-  if (first_unswept_page_->is_valid()) {
-    AdvanceSweeper(size_in_bytes);
+  // If there are unswept pages advance lazy sweeper a bounded number of times
+  // until we find a size_in_bytes contiguous piece of memory
+  const int kMaxSweepingTries = 5;
+  bool sweeping_complete = false;
+
+  for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
+    sweeping_complete = AdvanceSweeper(size_in_bytes);
 
     // Retry the free list allocation.
     HeapObject* object = free_list_.Allocate(size_in_bytes);