Fix free list node ending up on evacuation candidate.
authormstarzinger@chromium.org <mstarzinger@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Tue, 11 Oct 2011 16:50:58 +0000 (16:50 +0000)
committermstarzinger@chromium.org <mstarzinger@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Tue, 11 Oct 2011 16:50:58 +0000 (16:50 +0000)
This is a temporary fix which avoids compaction when incremental marking
is restarted during an old-space-step. That could turn the page that
holds the chosen free list node into an evacuation candidate. It could
also cause several other inconsistencies if it happens during scavenge.

R=vegorov@chromium.org

Review URL: http://codereview.chromium.org/8228010

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9585 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/incremental-marking.cc
src/incremental-marking.h
src/spaces.cc

index 7aab992..60a21ee 100644 (file)
@@ -411,7 +411,7 @@ void IncrementalMarking::Start() {
 
   if (heap_->old_pointer_space()->IsSweepingComplete() &&
       heap_->old_data_space()->IsSweepingComplete()) {
-    StartMarking();
+    StartMarking(ALLOW_COMPACTION);
   } else {
     if (FLAG_trace_incremental_marking) {
       PrintF("[IncrementalMarking] Start sweeping.\n");
@@ -436,12 +436,12 @@ static void MarkObjectGreyDoNotEnqueue(Object* obj) {
 }
 
 
-void IncrementalMarking::StartMarking() {
+void IncrementalMarking::StartMarking(CompactionFlag flag) {
   if (FLAG_trace_incremental_marking) {
     PrintF("[IncrementalMarking] Start marking\n");
   }
 
-  is_compacting_ = !FLAG_never_compact &&
+  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
       heap_->mark_compact_collector()->StartCompaction();
 
   state_ = MARKING;
@@ -705,7 +705,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
   if (state_ == SWEEPING) {
     if (heap_->old_pointer_space()->AdvanceSweeper(bytes_to_process) &&
         heap_->old_data_space()->AdvanceSweeper(bytes_to_process)) {
-      StartMarking();
+      StartMarking(PREVENT_COMPACTION);
     }
   } else if (state_ == MARKING) {
     Map* filler_map = heap_->one_pointer_filler_map();
index 30dbbc1..d1627bc 100644 (file)
@@ -206,7 +206,9 @@ class IncrementalMarking {
 
   void ResetStepCounters();
 
-  void StartMarking();
+  enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
+
+  void StartMarking(CompactionFlag flag);
 
   void ActivateIncrementalWriteBarrier(PagedSpace* space);
   static void ActivateIncrementalWriteBarrier(NewSpace* space);
index 2aaca5b..61b3181 100644 (file)
@@ -1798,6 +1798,11 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
   owner_->heap()->incremental_marking()->OldSpaceStep(
       size_in_bytes - old_linear_size);
 
+  // The old-space-step might have finished sweeping and restarted marking.
+  // Verify that it did not turn the page of the new node into an evacuation
+  // candidate.
+  ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
   const int kThreshold = IncrementalMarking::kAllocatedThreshold;
 
   // Memory in the linear allocation area is counted as allocated.  We may free