Revert "Allow partial scanning of large arrays in order to avoid"
authorverwaest@chromium.org <verwaest@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Thu, 4 Oct 2012 11:09:17 +0000 (11:09 +0000)
committerverwaest@chromium.org <verwaest@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Thu, 4 Oct 2012 11:09:17 +0000 (11:09 +0000)
This reverts commit r12619.

BUG=

Review URL: https://chromiumcodereview.appspot.com/11029023

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12660 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/heap.cc
src/incremental-marking.cc
src/mark-compact-inl.h
src/mark-compact.cc
src/mark-compact.h
src/objects-visiting-inl.h
src/objects-visiting.h
src/spaces.cc
src/spaces.h

index 1336027..d5d1128 100644 (file)
@@ -1359,12 +1359,11 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
 
   if (external_string_table_.new_space_strings_.is_empty()) return;
 
-  Object** start_slot = &external_string_table_.new_space_strings_[0];
-  Object** end_slot =
-        start_slot + external_string_table_.new_space_strings_.length();
-  Object** last = start_slot;
+  Object** start = &external_string_table_.new_space_strings_[0];
+  Object** end = start + external_string_table_.new_space_strings_.length();
+  Object** last = start;
 
-  for (Object** p = start_slot; p < end_slot; ++p) {
+  for (Object** p = start; p < end; ++p) {
     ASSERT(InFromSpace(*p));
     String* target = updater_func(this, p);
 
@@ -1382,8 +1381,8 @@ void Heap::UpdateNewSpaceReferencesInExternalStringTable(
     }
   }
 
-  ASSERT(last <= end_slot);
-  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start_slot));
+  ASSERT(last <= end);
+  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
 }
 
 
@@ -1392,10 +1391,9 @@ void Heap::UpdateReferencesInExternalStringTable(
 
   // Update old space string references.
   if (external_string_table_.old_space_strings_.length() > 0) {
-    Object** start_slot = &external_string_table_.old_space_strings_[0];
-    Object** end_slot =
-        start_slot + external_string_table_.old_space_strings_.length();
-    for (Object** p = start_slot; p < end_slot; ++p) *p = updater_func(this, p);
+    Object** start = &external_string_table_.old_space_strings_[0];
+    Object** end = start + external_string_table_.old_space_strings_.length();
+    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
   }
 
   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
@@ -6792,11 +6790,11 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
   // Scan the object body.
   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
     // This is specialized to scan Context's properly.
-    Object** start_slot = reinterpret_cast<Object**>(obj->address() +
-                                                     Context::kHeaderSize);
-    Object** end_slot = reinterpret_cast<Object**>(obj->address() +
+    Object** start = reinterpret_cast<Object**>(obj->address() +
+                                                Context::kHeaderSize);
+    Object** end = reinterpret_cast<Object**>(obj->address() +
         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
-    mark_visitor->VisitPointers(start_slot, end_slot);
+    mark_visitor->VisitPointers(start, end);
   } else {
     obj->IterateBody(map_p->instance_type(),
                      obj->SizeFromMap(map_p),
index 4aec1e9..32a084d 100644 (file)
@@ -190,11 +190,8 @@ class IncrementalMarkingMarkingVisitor
 
   static void VisitJSWeakMap(Map* map, HeapObject* object) {
     Heap* heap = map->GetHeap();
-    Object** start_slot =
-        HeapObject::RawField(object, JSWeakMap::kPropertiesOffset);
     VisitPointers(heap,
-                  start_slot,
-                  start_slot,
+                  HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
                   HeapObject::RawField(object, JSWeakMap::kSize));
   }
 
@@ -209,54 +206,15 @@ class IncrementalMarkingMarkingVisitor
                      void>::Visit(map, object);
   }
 
-  static const int kScanningChunk = 32 * 1024;
-
-  static int VisitHugeArray(FixedArray* array) {
-    Heap* heap = array->GetHeap();
-    MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
-    Object** start_slot = array->data_start();
-    int length = array->length();
-
-    if (chunk->owner()->identity() != LO_SPACE) {
-      VisitPointers(heap, start_slot, start_slot, start_slot + length);
-      return length;
-    }
-
-    int from =
-        chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0;
-    int to = Min(from + kScanningChunk, length);
-
-    VisitPointers(heap, start_slot, start_slot + from, start_slot + to);
-
-    if (to == length) {
-      // If it went from black to grey while it was waiting for the next bit to
-      // be scanned then we have to start the scan again.
-      MarkBit mark_bit = Marking::MarkBitFrom(array);
-      if (!Marking::IsBlack(mark_bit)) {
-        ASSERT(Marking::IsGrey(mark_bit));
-        chunk->SetPartiallyScannedProgress(0);
-      } else {
-        chunk->SetCompletelyScanned();
-      }
-    } else {
-      chunk->SetPartiallyScannedProgress(to);
-    }
-    return to - from;
-  }
-
   static inline void VisitJSFunction(Map* map, HeapObject* object) {
     Heap* heap = map->GetHeap();
     // Iterate over all fields in the body but take care in dealing with
     // the code entry and skip weak fields.
-    Object** start_slot =
-        HeapObject::RawField(object, JSFunction::kPropertiesOffset);
     VisitPointers(heap,
-                  start_slot,
-                  start_slot,
+                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
                   HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
     VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
     VisitPointers(heap,
-                  start_slot,
                   HeapObject::RawField(object,
                       JSFunction::kCodeEntryOffset + kPointerSize),
                   HeapObject::RawField(object,
@@ -271,14 +229,11 @@ class IncrementalMarkingMarkingVisitor
     }
   }
 
-  INLINE(static void VisitPointers(Heap* heap,
-                                   Object** anchor,
-                                   Object** start,
-                                   Object** end)) {
+  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
     for (Object** p = start; p < end; p++) {
       Object* obj = *p;
       if (obj->NonFailureIsHeapObject()) {
-        heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
+        heap->mark_compact_collector()->RecordSlot(start, p, obj);
         MarkObject(heap, obj);
       }
     }
@@ -680,8 +635,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
 #ifdef DEBUG
         MarkBit mark_bit = Marking::MarkBitFrom(obj);
         ASSERT(Marking::IsGrey(mark_bit) ||
-               (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
-               MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned());
+               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
 #endif
     }
   }
@@ -704,57 +658,36 @@ void IncrementalMarking::Hurry() {
     // was stopped.
     Map* filler_map = heap_->one_pointer_filler_map();
     Map* native_context_map = heap_->native_context_map();
-    do {
-      while (!marking_deque_.IsEmpty()) {
-        HeapObject* obj = marking_deque_.Pop();
-
-        // Explicitly skip one word fillers. Incremental markbit patterns are
-        // correct only for objects that occupy at least two words.
-        Map* map = obj->map();
-        if (map == filler_map) {
-          continue;
-        } else if (map == native_context_map) {
-          // Native contexts have weak fields.
-          IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj);
-          ASSERT(!Marking::IsBlack(Marking::MarkBitFrom(obj)));
-          MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
-        } else if (map->instance_type() == FIXED_ARRAY_TYPE &&
-          FixedArray::cast(obj)->length() >
-                IncrementalMarkingMarkingVisitor::kScanningChunk) {
-          MarkBit map_mark_bit = Marking::MarkBitFrom(map);
-          if (Marking::IsWhite(map_mark_bit)) {
-            WhiteToGreyAndPush(map, map_mark_bit);
-          }
-          MarkBit mark_bit = Marking::MarkBitFrom(obj);
-          if (!Marking::IsBlack(mark_bit)) {
-            MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
-          } else {
-            ASSERT(
-                MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned());
-          }
-          IncrementalMarkingMarkingVisitor::VisitHugeArray(
-                FixedArray::cast(obj));
-         } else {
-          MarkBit map_mark_bit = Marking::MarkBitFrom(map);
-          if (Marking::IsWhite(map_mark_bit)) {
-            WhiteToGreyAndPush(map, map_mark_bit);
-          }
-          IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
-          ASSERT(!Marking::IsBlack(Marking::MarkBitFrom(obj)));
-          MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+    while (!marking_deque_.IsEmpty()) {
+      HeapObject* obj = marking_deque_.Pop();
+
+      // Explicitly skip one word fillers. Incremental markbit patterns are
+      // correct only for objects that occupy at least two words.
+      Map* map = obj->map();
+      if (map == filler_map) {
+        continue;
+      } else if (map == native_context_map) {
+        // Native contexts have weak fields.
+        IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj);
+      } else {
+        MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+        if (Marking::IsWhite(map_mark_bit)) {
+          WhiteToGreyAndPush(map, map_mark_bit);
         }
-
-        MarkBit mark_bit = Marking::MarkBitFrom(obj);
-        Marking::MarkBlack(mark_bit);
+        IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
       }
-      state_ = COMPLETE;
-      if (FLAG_trace_incremental_marking) {
-        double end = OS::TimeCurrentMillis();
-        PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
-               static_cast<int>(end - start));
-      }
-      MarkCompactCollector::ProcessLargePostponedArrays(heap_, &marking_deque_);
-    } while (!marking_deque_.IsEmpty());
+
+      MarkBit mark_bit = Marking::MarkBitFrom(obj);
+      ASSERT(!Marking::IsBlack(mark_bit));
+      Marking::MarkBlack(mark_bit);
+      MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+    }
+    state_ = COMPLETE;
+    if (FLAG_trace_incremental_marking) {
+      double end = OS::TimeCurrentMillis();
+      PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+             static_cast<int>(end - start));
+    }
   }
 
   if (FLAG_cleanup_code_caches_at_gc) {
@@ -889,71 +822,42 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
   } else if (state_ == MARKING) {
     Map* filler_map = heap_->one_pointer_filler_map();
     Map* native_context_map = heap_->native_context_map();
-    while (true) {
-      while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
-        HeapObject* obj = marking_deque_.Pop();
-
-        // Explicitly skip one word fillers. Incremental markbit patterns are
-        // correct only for objects that occupy at least two words.
-        Map* map = obj->map();
-        if (map == filler_map) continue;
+    while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
+      HeapObject* obj = marking_deque_.Pop();
+
+      // Explicitly skip one word fillers. Incremental markbit patterns are
+      // correct only for objects that occupy at least two words.
+      Map* map = obj->map();
+      if (map == filler_map) continue;
+
+      int size = obj->SizeFromMap(map);
+      bytes_to_process -= size;
+      MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+      if (Marking::IsWhite(map_mark_bit)) {
+        WhiteToGreyAndPush(map, map_mark_bit);
+      }
 
-        int size = obj->SizeFromMap(map);
-        MarkBit map_mark_bit = Marking::MarkBitFrom(map);
-        if (Marking::IsWhite(map_mark_bit)) {
-          WhiteToGreyAndPush(map, map_mark_bit);
-        }
+      // TODO(gc) switch to static visitor instead of normal visitor.
+      if (map == native_context_map) {
+        // Native contexts have weak fields.
+        Context* ctx = Context::cast(obj);
 
-        // TODO(gc) switch to static visitor instead of normal visitor.
-        if (map == native_context_map) {
-          // Native contexts have weak fields.
-          Context* ctx = Context::cast(obj);
-
-          // We will mark cache black with a separate pass
-          // when we finish marking.
-          MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
-
-          IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx);
-          bytes_to_process -= size;
-          SLOW_ASSERT(Marking::IsGrey(Marking::MarkBitFrom(obj)));
-          MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
-        } else if (map->instance_type() == FIXED_ARRAY_TYPE &&
-          FixedArray::cast(obj)->length() >
-              IncrementalMarkingMarkingVisitor::kScanningChunk) {
-          SLOW_ASSERT(
-              Marking::IsGrey(Marking::MarkBitFrom(obj)) ||
-              MemoryChunk::FromAddress(obj->address())->IsPartiallyScanned());
-          bytes_to_process -=
-              IncrementalMarkingMarkingVisitor::VisitHugeArray(
-                  FixedArray::cast(obj));
-          MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
-          if (!Marking::IsBlack(obj_mark_bit)) {
-            MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
-          }
-         } else {
-          IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
-          bytes_to_process -= size;
-          SLOW_ASSERT(
-              Marking::IsGrey(Marking::MarkBitFrom(obj)) ||
-              (obj->IsFiller() && Marking::IsWhite(Marking::MarkBitFrom(obj))));
-          MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
-        }
+        // We will mark cache black with a separate pass
+        // when we finish marking.
+        MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
 
-        MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
-        Marking::MarkBlack(obj_mark_bit);
+        IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx);
+      } else {
+        IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
       }
-      if (marking_deque_.IsEmpty()) {
-        MarkCompactCollector::ProcessLargePostponedArrays(heap_,
-                                                          &marking_deque_);
-        if (marking_deque_.IsEmpty()) {
-          MarkingComplete(action);
-          break;
-         }
-       } else {
-        ASSERT(bytes_to_process <= 0);
-        break;
-       }
+
+      MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
+      SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
+                  (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
+      Marking::MarkBlack(obj_mark_bit);
+      MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
     }
+    if (marking_deque_.IsEmpty()) MarkingComplete(action);
   }
 
   steps_count_++;
index faa75b6..10773e7 100644 (file)
@@ -83,9 +83,6 @@ void MarkCompactCollector::RecordSlot(Object** anchor_slot,
                                       Object** slot,
                                       Object* object) {
   Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
-  // Ensure the anchor slot is on the first 'page' of a large object.
-  ASSERT(Page::FromAddress(reinterpret_cast<Address>(anchor_slot))->owner() !=
-         NULL);
   if (object_page->IsEvacuationCandidate() &&
       !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
     if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
index 015f506..bb9c23e 100644 (file)
@@ -1053,43 +1053,16 @@ class MarkCompactMarkingVisitor
     MarkObjectByPointer(heap->mark_compact_collector(), p, p);
   }
 
-  INLINE(static void VisitPointers(Heap* heap,
-                                   Object** anchor,
-                                   Object** start,
-                                   Object** end)) {
+  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
     // Mark all objects pointed to in [start, end).
     const int kMinRangeForMarkingRecursion = 64;
     if (end - start >= kMinRangeForMarkingRecursion) {
-      if (VisitUnmarkedObjects(heap, anchor, start, end)) return;
+      if (VisitUnmarkedObjects(heap, start, end)) return;
       // We are close to a stack overflow, so just mark the objects.
     }
     MarkCompactCollector* collector = heap->mark_compact_collector();
     for (Object** p = start; p < end; p++) {
-      MarkObjectByPointer(collector, anchor, p);
-    }
-  }
-
-  static void VisitHugeFixedArray(Heap* heap, FixedArray* array, int length);
-
-  // The deque is contiguous and we use new space, it is therefore contained in
-  // one page minus the header.  It also has a size that is a power of two so
-  // it is half the size of a page.  We want to scan a number of array entries
-  // that is less than the number of entries in the deque, so we divide by 2
-  // once more.
-  static const int kScanningChunk = Page::kPageSize / 4 / kPointerSize;
-
-  INLINE(static void VisitFixedArray(Map* map, HeapObject* object)) {
-    FixedArray* array = FixedArray::cast(object);
-    int length = array->length();
-    Heap* heap = map->GetHeap();
-
-    if (length < kScanningChunk ||
-        MemoryChunk::FromAddress(array->address())->owner()->identity() !=
-            LO_SPACE) {
-      Object** start_slot = array->data_start();
-      VisitPointers(heap, start_slot, start_slot, start_slot + length);
-    } else {
-      VisitHugeFixedArray(heap, array, length);
+      MarkObjectByPointer(collector, start, p);
     }
   }
 
@@ -1139,22 +1112,21 @@ class MarkCompactMarkingVisitor
     IterateBody(map, obj);
   }
 
-  // Visit all unmarked objects pointed to by [start_slot, end_slot).
+  // Visit all unmarked objects pointed to by [start, end).
   // Returns false if the operation fails (lack of stack space).
   static inline bool VisitUnmarkedObjects(Heap* heap,
-                                          Object** anchor_slot,
-                                          Object** start_slot,
-                                          Object** end_slot) {
+                                          Object** start,
+                                          Object** end) {
     // Return false is we are close to the stack limit.
     StackLimitCheck check(heap->isolate());
     if (check.HasOverflowed()) return false;
 
     MarkCompactCollector* collector = heap->mark_compact_collector();
     // Visit the unmarked objects.
-    for (Object** p = start_slot; p < end_slot; p++) {
+    for (Object** p = start; p < end; p++) {
       Object* o = *p;
       if (!o->IsHeapObject()) continue;
-      collector->RecordSlot(anchor_slot, p, o);
+      collector->RecordSlot(start, p, o);
       HeapObject* obj = HeapObject::cast(o);
       MarkBit mark = Marking::MarkBitFrom(obj);
       if (mark.Get()) continue;
@@ -1475,11 +1447,9 @@ class MarkCompactMarkingVisitor
                                            bool flush_code_candidate) {
     Heap* heap = map->GetHeap();
 
-    Object** start_slot =
-        HeapObject::RawField(object, JSFunction::kPropertiesOffset);
-    Object** end_slot =
-        HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
-    VisitPointers(heap, start_slot, start_slot, end_slot);
+    VisitPointers(heap,
+                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
+                  HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
 
     if (!flush_code_candidate) {
       VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
@@ -1503,12 +1473,11 @@ class MarkCompactMarkingVisitor
       }
     }
 
-    start_slot =
+    VisitPointers(
+        heap,
         HeapObject::RawField(object,
-                             JSFunction::kCodeEntryOffset + kPointerSize);
-    end_slot =
-        HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
-    VisitPointers(heap, start_slot, start_slot, end_slot);
+                             JSFunction::kCodeEntryOffset + kPointerSize),
+        HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset));
   }
 
 
@@ -1524,40 +1493,17 @@ class MarkCompactMarkingVisitor
                                         SharedFunctionInfo::kCodeOffset));
     }
 
-    Object** start_slot =
+    VisitPointers(
+        heap,
         HeapObject::RawField(object,
-                             SharedFunctionInfo::kOptimizedCodeMapOffset);
-    Object** end_slot =
-        HeapObject::RawField(object, SharedFunctionInfo::kSize);
-
-    VisitPointers(heap, start_slot, start_slot, end_slot);
+                             SharedFunctionInfo::kOptimizedCodeMapOffset),
+        HeapObject::RawField(object, SharedFunctionInfo::kSize));
   }
 
   static VisitorDispatchTable<Callback> non_count_table_;
 };
 
 
-void MarkCompactMarkingVisitor::VisitHugeFixedArray(Heap* heap,
-                                                    FixedArray* array,
-                                                    int length) {
-  MemoryChunk* chunk = MemoryChunk::FromAddress(array->address());
-
-  ASSERT(chunk->owner()->identity() == LO_SPACE);
-
-  Object** start_slot = array->data_start();
-  int from =
-      chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0;
-  int to = Min(from + kScanningChunk, length);
-  VisitPointers(heap, start_slot, start_slot + from, start_slot + to);
-
-  if (to == length) {
-    chunk->SetCompletelyScanned();
-  } else {
-    chunk->SetPartiallyScannedProgress(to);
-  }
-}
-
-
 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
     FixedArrayBase* fixed_array,
     FixedArraySubInstanceType fast_type,
@@ -1699,9 +1645,6 @@ void MarkCompactMarkingVisitor::Initialize() {
   table_.Register(kVisitJSRegExp,
                   &VisitRegExpAndFlushCode);
 
-  table_.Register(kVisitFixedArray,
-                  &VisitFixedArray);
-
   if (FLAG_track_gc_object_stats) {
     // Copy the visitor table to make call-through possible.
     non_count_table_.CopyFrom(&table_);
@@ -1725,9 +1668,8 @@ class MarkingVisitor : public ObjectVisitor {
     MarkCompactMarkingVisitor::VisitPointer(heap_, p);
   }
 
-  void VisitPointers(Object** start_slot, Object** end_slot) {
-    MarkCompactMarkingVisitor::VisitPointers(
-        heap_, start_slot, start_slot, end_slot);
+  void VisitPointers(Object** start, Object** end) {
+    MarkCompactMarkingVisitor::VisitPointers(heap_, start, end);
   }
 
  private:
@@ -1754,8 +1696,8 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
   explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
       : collector_(collector) {}
 
-  void VisitPointers(Object** start_slot, Object** end_slot) {
-    for (Object** p = start_slot; p < end_slot; p++) VisitPointer(p);
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) VisitPointer(p);
   }
 
   void VisitPointer(Object** slot) {
@@ -1866,8 +1808,8 @@ class RootMarkingVisitor : public ObjectVisitor {
     MarkObjectByPointer(p);
   }
 
-  void VisitPointers(Object** start_slot, Object** end_slot) {
-    for (Object** p = start_slot; p < end_slot; p++) MarkObjectByPointer(p);
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
   }
 
  private:
@@ -1903,9 +1845,9 @@ class SymbolTableCleaner : public ObjectVisitor {
   explicit SymbolTableCleaner(Heap* heap)
     : heap_(heap), pointers_removed_(0) { }
 
-  virtual void VisitPointers(Object** start_slot, Object** end_slot) {
-    // Visit all HeapObject pointers in [start_slot, end_slot).
-    for (Object** p = start_slot; p < end_slot; p++) {
+  virtual void VisitPointers(Object** start, Object** end) {
+    // Visit all HeapObject pointers in [start, end).
+    for (Object** p = start; p < end; p++) {
       Object* o = *p;
       if (o->IsHeapObject() &&
           !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
@@ -2186,7 +2128,6 @@ void MarkCompactCollector::EmptyMarkingDeque() {
 
       MarkCompactMarkingVisitor::IterateBody(map, object);
     }
-    ProcessLargePostponedArrays(heap(), &marking_deque_);
 
     // Process encountered weak maps, mark objects only reachable by those
     // weak maps and repeat until fix-point is reached.
@@ -2195,29 +2136,12 @@ void MarkCompactCollector::EmptyMarkingDeque() {
 }
 
 
-void MarkCompactCollector::ProcessLargePostponedArrays(Heap* heap,
-                                                       MarkingDeque* deque) {
-  ASSERT(deque->IsEmpty());
-  LargeObjectIterator it(heap->lo_space());
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    if (!obj->IsFixedArray()) continue;
-    MemoryChunk* p = MemoryChunk::FromAddress(obj->address());
-    if (p->IsPartiallyScanned()) {
-      deque->PushBlack(obj);
-    }
-  }
-}
-
-
 // Sweep the heap for overflowed objects, clear their overflow bits, and
 // push them on the marking stack.  Stop early if the marking stack fills
 // before sweeping completes.  If sweeping completes, there are no remaining
 // overflowed objects in the heap so the overflow flag on the markings stack
 // is cleared.
 void MarkCompactCollector::RefillMarkingDeque() {
-  if (FLAG_trace_gc) {
-    PrintPID("Marking queue overflowed\n");
-  }
   ASSERT(marking_deque_.overflowed());
 
   SemiSpaceIterator new_it(heap()->new_space());
@@ -2708,8 +2632,8 @@ class PointersUpdatingVisitor: public ObjectVisitor {
     UpdatePointer(p);
   }
 
-  void VisitPointers(Object** start_slot, Object** end_slot) {
-    for (Object** p = start_slot; p < end_slot; p++) UpdatePointer(p);
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) UpdatePointer(p);
   }
 
   void VisitEmbeddedPointer(RelocInfo* rinfo) {
index 965204e..deade29 100644 (file)
@@ -240,35 +240,6 @@ class MarkingDeque {
   int mask() { return mask_; }
   void set_top(int top) { top_ = top; }
 
-  int space_left() {
-    // If we already overflowed we may as well just say there is lots of
-    // space left.
-    if (overflowed_) return mask_ + 1;
-    if (IsEmpty()) return mask_ + 1;
-    if (IsFull()) return 0;
-    return (bottom_ - top_) & mask_;
-  }
-
-#ifdef DEBUG
-  const char* Status() {
-    if (overflowed_) return "Overflowed";
-    if (IsEmpty()) return "Empty";
-    if (IsFull()) return "Full";
-    int oct = (((top_ - bottom_) & mask_) * 8) / (mask_ + 1);
-    switch (oct) {
-      case 0: return "Almost empty";
-      case 1: return "1/8 full";
-      case 2: return "2/8 full";
-      case 3: return "3/8 full";
-      case 4: return "4/8 full";
-      case 5: return "5/8 full";
-      case 6: return "6/8 full";
-      case 7: return "7/8 full";
-    }
-    return "??";
-  }
-#endif
-
  private:
   HeapObject** array_;
   // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
@@ -596,10 +567,6 @@ class MarkCompactCollector {
 
   bool is_compacting() const { return compacting_; }
 
-  // Find the large objects that are not completely scanned, but have been
-  // postponed to later.
-  static void ProcessLargePostponedArrays(Heap* heap, MarkingDeque* deque);
-
  private:
   MarkCompactCollector();
   ~MarkCompactCollector();
index ea5f1ca..5d33e2e 100644 (file)
@@ -262,11 +262,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap(
       map_object->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
     MarkMapContents(heap, map_object);
   } else {
-    Object** start_slot =
-        HeapObject::RawField(object, Map::kPointerFieldsBeginOffset);
-    Object** end_slot =
-        HeapObject::RawField(object, Map::kPointerFieldsEndOffset);
-    StaticVisitor::VisitPointers(heap, start_slot, start_slot, end_slot);
+    StaticVisitor::VisitPointers(heap,
+        HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
+        HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
   }
 }
 
@@ -288,12 +286,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(
     Map* map, HeapObject* object) {
   int last_property_offset =
       JSRegExp::kSize + kPointerSize * map->inobject_properties();
-  Object** start_slot =
-      HeapObject::RawField(object, JSRegExp::kPropertiesOffset);
-  Object** end_slot =
-      HeapObject::RawField(object, last_property_offset);
-  StaticVisitor::VisitPointers(
-      map->GetHeap(), start_slot, start_slot, end_slot);
+  StaticVisitor::VisitPointers(map->GetHeap(),
+      HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
+      HeapObject::RawField(object, last_property_offset));
 }
 
 
@@ -320,11 +315,9 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
   // Mark the pointer fields of the Map. Since the transitions array has
   // been marked already, it is fine that one of these fields contains a
   // pointer to it.
-  Object** start_slot =
-      HeapObject::RawField(map, Map::kPointerFieldsBeginOffset);
-  Object** end_slot =
-      HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, start_slot, end_slot);
+  StaticVisitor::VisitPointers(heap,
+      HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+      HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
 }
 
 
index 407611b..7a3b3f4 100644 (file)
@@ -213,7 +213,7 @@ class BodyVisitorBase : public AllStatic {
                                                      start_offset);
     Object** end_slot = reinterpret_cast<Object**>(object->address() +
                                                    end_offset);
-    StaticVisitor::VisitPointers(heap, start_slot, start_slot, end_slot);
+    StaticVisitor::VisitPointers(heap, start_slot, end_slot);
   }
 };
 
@@ -283,26 +283,21 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
     return table_.GetVisitor(map)(map, obj);
   }
 
-  static inline void VisitPointers(
-      Heap* heap, Object** anchor, Object** start, Object** end) {
+  static inline void VisitPointers(Heap* heap, Object** start, Object** end) {
     for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
   }
 
  private:
   static inline int VisitJSFunction(Map* map, HeapObject* object) {
     Heap* heap = map->GetHeap();
-    Object** start_slot =
-        HeapObject::RawField(object, JSFunction::kPropertiesOffset);
     VisitPointers(heap,
-                  start_slot,
-                  start_slot,
+                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
                   HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
 
     // Don't visit code entry. We are using this visitor only during scavenges.
 
     VisitPointers(
         heap,
-        start_slot,
         HeapObject::RawField(object,
                              JSFunction::kCodeEntryOffset + kPointerSize),
         HeapObject::RawField(object,
index 62d8263..bc1d7b0 100644 (file)
@@ -2679,10 +2679,12 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
 
   HeapObject* object = page->GetObject();
 
-  // Make the object consistent so the large object space can be traversed.
+#ifdef DEBUG
+  // Make the object consistent so the heap can be vefified in OldSpaceStep.
   reinterpret_cast<Object**>(object->address())[0] =
       heap()->fixed_array_map();
   reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+#endif
 
   heap()->incremental_marking()->OldSpaceStep(object_size);
   return object;
index 97bcaa5..effe06b 100644 (file)
@@ -400,15 +400,6 @@ class MemoryChunk {
     WAS_SWEPT_PRECISELY,
     WAS_SWEPT_CONSERVATIVELY,
 
-    // Used for large objects only.  Indicates that the object has been
-    // partially scanned by the incremental mark-sweep GC.  Objects that have
-    // been partially scanned are marked black so that the write barrier
-    // triggers for them, and they are counted as live bytes.  If the mutator
-    // writes to them they may be turned grey and subtracted from the live byte
-    // list.  They move back to the marking deque either by an iteration over
-    // the large object space or in the write barrier.
-    IS_PARTIALLY_SCANNED,
-
     // Last flag, keep at bottom.
     NUM_MEMORY_CHUNK_FLAGS
   };
@@ -429,25 +420,6 @@ class MemoryChunk {
       (1 << IN_FROM_SPACE) |
       (1 << IN_TO_SPACE);
 
-  static const int kIsPartiallyScannedMask = 1 << IS_PARTIALLY_SCANNED;
-
-  void SetPartiallyScannedProgress(int progress) {
-    SetFlag(IS_PARTIALLY_SCANNED);
-    partially_scanned_progress_ = progress;
-  }
-
-  bool IsPartiallyScanned() {
-    return IsFlagSet(IS_PARTIALLY_SCANNED);
-  }
-
-  void SetCompletelyScanned() {
-    ClearFlag(IS_PARTIALLY_SCANNED);
-  }
-
-  int PartiallyScannedProgress() {
-    ASSERT(IsPartiallyScanned());
-    return partially_scanned_progress_;
-  }
 
   void SetFlag(int flag) {
     flags_ |= static_cast<uintptr_t>(1) << flag;
@@ -534,14 +506,8 @@ class MemoryChunk {
 
   static const size_t kWriteBarrierCounterOffset =
       kSlotsBufferOffset + kPointerSize + kPointerSize;
-  static const size_t kPartiallyScannedProgress =
-      kWriteBarrierCounterOffset + kPointerSize;
 
-  // Actually the partially_scanned_progress_ member is only an int, but on
-  // 64 bit the size of MemoryChunk gets rounded up to a 64 bit size so we
-  // have to have the header start kPointerSize after the
-  // partially_scanned_progress_ member.
-  static const size_t kHeaderSize = kPartiallyScannedProgress + kPointerSize;
+  static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize;
 
   static const int kBodyOffset =
     CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
@@ -678,7 +644,6 @@ class MemoryChunk {
   SlotsBuffer* slots_buffer_;
   SkipList* skip_list_;
   intptr_t write_barrier_counter_;
-  int partially_scanned_progress_;
 
   static MemoryChunk* Initialize(Heap* heap,
                                  Address base,