Remove a race between sweepers and the free space skipping code (while iterating...
authorjarin@chromium.org <jarin@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 14 May 2014 12:35:13 +0000 (12:35 +0000)
committerjarin@chromium.org <jarin@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 14 May 2014 12:35:13 +0000 (12:35 +0000)
There has been a race between a sweeper setting the next pointer on
free list node and the main thread skipping free space during update of
new space pointers in the heap.

This change removes the free space skipping code.

R=hpayer@chromium.org
BUG=370551
LOG=N

Review URL: https://codereview.chromium.org/285733003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21306 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/store-buffer.cc
src/store-buffer.h

index b0f7d2f..547359a 100644 (file)
@@ -332,27 +332,6 @@ void StoreBuffer::GCPrologue() {
 
 
 #ifdef VERIFY_HEAP
-static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
-  // Do nothing.
-}
-
-
-void StoreBuffer::VerifyPointers(PagedSpace* space,
-                                 RegionCallback region_callback) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* page = it.next();
-    FindPointersToNewSpaceOnPage(
-        reinterpret_cast<PagedSpace*>(page->owner()),
-        page,
-        region_callback,
-        &DummyScavengePointer,
-        false);
-  }
-}
-
-
 void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
   LargeObjectIterator it(space);
   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
@@ -378,10 +357,6 @@ void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
 
 void StoreBuffer::Verify() {
 #ifdef VERIFY_HEAP
-  VerifyPointers(heap_->old_pointer_space(),
-                 &StoreBuffer::FindPointersToNewSpaceInRegion);
-  VerifyPointers(heap_->map_space(),
-                 &StoreBuffer::FindPointersToNewSpaceInMapsRegion);
   VerifyPointers(heap_->lo_space());
 #endif
 }
@@ -482,94 +457,6 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
 }
 
 
-// This function iterates over all the pointers in a paged space in the heap,
-// looking for pointers into new space.  Within the pages there may be dead
-// objects that have not been overwritten by free spaces or fillers because of
-// concurrent sweeping.  These dead objects may not contain pointers to new
-// space. The garbage areas that have been swept properly (these will normally
-// be the large ones) will be marked with free space and filler map words.  In
-// addition any area that has never been used at all for object allocation must
-// be marked with a free space or filler.  Because the free space and filler
-// maps do not move we can always recognize these even after a compaction.
-// Normal objects like FixedArrays and JSObjects should not contain references
-// to these maps.  Constant pool array objects may contain references to these
-// maps, however, constant pool arrays cannot contain pointers to new space
-// objects, therefore they are skipped.  The special garbage section (see
-// comment in spaces.h) is skipped since it can contain absolutely anything.
-// Any objects that are allocated during iteration may or may not be visited by
-// the iteration, but they will not be partially visited.
-void StoreBuffer::FindPointersToNewSpaceOnPage(
-    PagedSpace* space,
-    Page* page,
-    RegionCallback region_callback,
-    ObjectSlotCallback slot_callback,
-    bool clear_maps) {
-  Address visitable_start = page->area_start();
-  Address end_of_page = page->area_end();
-
-  Address visitable_end = visitable_start;
-
-  Object* free_space_map = heap_->free_space_map();
-  Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
-  Object* constant_pool_array_map = heap_->constant_pool_array_map();
-
-  while (visitable_end < end_of_page) {
-    // The sweeper thread concurrently may write free space maps and size to
-    // this page. We need acquire load here to make sure that we get a
-    // consistent view of maps and their sizes.
-    Object* o = reinterpret_cast<Object*>(
-        Acquire_Load(reinterpret_cast<AtomicWord*>(visitable_end)));
-    // Skip fillers or constant pool arrays (which never contain new-space
-    // pointers but can contain pointers which can be confused for fillers)
-    // but not things that look like fillers in the special garbage section
-    // which can contain anything.
-    if (o == free_space_map ||
-        o == two_pointer_filler_map ||
-        o == constant_pool_array_map ||
-        (visitable_end == space->top() && visitable_end != space->limit())) {
-      if (visitable_start != visitable_end) {
-        // After calling this the special garbage section may have moved.
-        (this->*region_callback)(visitable_start,
-                                 visitable_end,
-                                 slot_callback,
-                                 clear_maps);
-        if (visitable_end >= space->top() && visitable_end < space->limit()) {
-          visitable_end = space->limit();
-          visitable_start = visitable_end;
-          continue;
-        }
-      }
-      if (visitable_end == space->top() && visitable_end != space->limit()) {
-        visitable_start = visitable_end = space->limit();
-      } else {
-        // At this point we are either at the start of a filler, a
-        // constant pool array, or we are at the point where the space->top()
-        // used to be before the visit_pointer_region call above.  Either way we
-        // can skip the object at the current spot:  We don't promise to visit
-        // objects allocated during heap traversal, and if space->top() moved
-        // then it must be because an object was allocated at this point.
-        visitable_start =
-            visitable_end + HeapObject::FromAddress(visitable_end)->Size();
-        visitable_end = visitable_start;
-      }
-    } else {
-      ASSERT(o != free_space_map);
-      ASSERT(o != two_pointer_filler_map);
-      ASSERT(o != constant_pool_array_map);
-      ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
-      visitable_end += kPointerSize;
-    }
-  }
-  ASSERT(visitable_end == end_of_page);
-  if (visitable_start != visitable_end) {
-    (this->*region_callback)(visitable_start,
-                             visitable_end,
-                             slot_callback,
-                             clear_maps);
-  }
-}
-
-
 void StoreBuffer::IteratePointersInStoreBuffer(
     ObjectSlotCallback slot_callback,
     bool clear_maps) {
@@ -656,14 +543,15 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
         } else {
           Page* page = reinterpret_cast<Page*>(chunk);
           PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
-          FindPointersToNewSpaceOnPage(
-              owner,
-              page,
-              (owner == heap_->map_space() ?
-                 &StoreBuffer::FindPointersToNewSpaceInMapsRegion :
-                 &StoreBuffer::FindPointersToNewSpaceInRegion),
-              slot_callback,
-              clear_maps);
+          Address start = page->area_start();
+          Address end = page->area_end();
+          if (owner == heap_->map_space()) {
+            FindPointersToNewSpaceInMapsRegion(
+                start, end, slot_callback, clear_maps);
+          } else {
+            FindPointersToNewSpaceInRegion(
+                start, end, slot_callback, clear_maps);
+          }
         }
       }
     }
index bf3a3f7..f775afb 100644 (file)
@@ -180,18 +180,10 @@ class StoreBuffer {
     ObjectSlotCallback slot_callback,
     bool clear_maps);
 
-  void FindPointersToNewSpaceOnPage(
-    PagedSpace* space,
-    Page* page,
-    RegionCallback region_callback,
-    ObjectSlotCallback slot_callback,
-    bool clear_maps);
-
   void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
                                     bool clear_maps);
 
 #ifdef VERIFY_HEAP
-  void VerifyPointers(PagedSpace* space, RegionCallback region_callback);
   void VerifyPointers(LargeObjectSpace* space);
 #endif