Do not dereference handles during relocation.
authoryangguo@chromium.org <yangguo@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Fri, 26 Apr 2013 07:35:07 +0000 (07:35 +0000)
committeryangguo@chromium.org <yangguo@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Fri, 26 Apr 2013 07:35:07 +0000 (07:35 +0000)
R=hpayer@chromium.org
BUG=

Review URL: https://chromiumcodereview.appspot.com/13982023

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@14445 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/handles-inl.h
src/heap-inl.h
src/heap.cc
src/heap.h
src/hydrogen.cc
src/mark-compact.cc
src/optimizing-compiler-thread.cc

index f12a811..5a3e9ed 100644 (file)
@@ -91,6 +91,10 @@ bool Handle<T>::IsDereferenceAllowed(bool allow_deferred) const {
       handle < roots_array_start + Heap::kStrongRootListLength) {
     return true;
   }
+  if (isolate->optimizing_compiler_thread()->IsOptimizerThread() &&
+      !Heap::RelocationLock::IsLockedByOptimizerThread(isolate->heap())) {
+    return false;
+  }
   switch (isolate->HandleDereferenceGuardState()) {
     case HandleDereferenceGuard::ALLOW:
       return true;
index ab1fdb4..f937426 100644 (file)
@@ -211,6 +211,7 @@ MaybeObject* Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
 MaybeObject* Heap::AllocateRaw(int size_in_bytes,
                                AllocationSpace space,
                                AllocationSpace retry_space) {
+  SLOW_ASSERT(!isolate_->optimizing_compiler_thread()->IsOptimizerThread());
   ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
   ASSERT(space != NEW_SPACE ||
          retry_space == OLD_POINTER_SPACE ||
index 7976cf8..f2f0b9b 100644 (file)
@@ -1300,6 +1300,8 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
 
 
 void Heap::Scavenge() {
+  RelocationLock relocation_lock(this);
+
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
 #endif
@@ -6635,6 +6637,11 @@ bool Heap::SetUp() {
 
   store_buffer()->SetUp();
 
+  if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
+#ifdef DEBUG
+  relocation_mutex_locked_by_optimizer_thread_ = false;
+#endif  // DEBUG
+
   return true;
 }
 
@@ -6737,6 +6744,8 @@ void Heap::TearDown() {
   incremental_marking()->TearDown();
 
   isolate_->memory_allocator()->TearDown();
+
+  delete relocation_mutex_;
 }
 
 
@@ -7866,4 +7875,15 @@ void Heap::CheckpointObjectStats() {
   ClearObjectStats();
 }
 
+
+Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
+  if (FLAG_parallel_recompilation) {
+    heap_->relocation_mutex_->Lock();
+#ifdef DEBUG
+    heap_->relocation_mutex_locked_by_optimizer_thread_ =
+        heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
+#endif  // DEBUG
+  }
+}
+
 } }  // namespace v8::internal
index 28c24a5..b6527e8 100644 (file)
@@ -1858,6 +1858,31 @@ class Heap {
 
   void CheckpointObjectStats();
 
+  // We don't use a ScopedLock here since we want to lock the heap
+  // only when FLAG_parallel_recompilation is true.
+  class RelocationLock {
+   public:
+    explicit RelocationLock(Heap* heap);
+
+    ~RelocationLock() {
+      if (FLAG_parallel_recompilation) {
+#ifdef DEBUG
+        heap_->relocation_mutex_locked_by_optimizer_thread_ = false;
+#endif  // DEBUG
+        heap_->relocation_mutex_->Unlock();
+      }
+    }
+
+#ifdef DEBUG
+    static bool IsLockedByOptimizerThread(Heap* heap) {
+      return heap->relocation_mutex_locked_by_optimizer_thread_;
+    }
+#endif  // DEBUG
+
+   private:
+    Heap* heap_;
+  };
+
  private:
   Heap();
 
@@ -2332,6 +2357,11 @@ class Heap {
 
   MemoryChunk* chunks_queued_for_free_;
 
+  Mutex* relocation_mutex_;
+#ifdef DEBUG
+  bool relocation_mutex_locked_by_optimizer_thread_;
+#endif  // DEBUG;
+
   friend class Factory;
   friend class GCTracer;
   friend class DisallowAllocationFailure;
index 023d4df..57854b8 100644 (file)
@@ -511,6 +511,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
 
 
 void HGraph::Verify(bool do_full_verify) const {
+  Heap::RelocationLock(isolate()->heap());
   ALLOW_HANDLE_DEREF(isolate(), "debug mode verification");
   for (int i = 0; i < blocks_.length(); i++) {
     HBasicBlock* block = blocks_.at(i);
index 5685ab5..62dee48 100644 (file)
@@ -3125,6 +3125,8 @@ void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
 
 
 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+  Heap::RelocationLock relocation_lock(heap());
+
   bool code_slots_filtering_required;
   { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
     code_slots_filtering_required = MarkInvalidatedCode();
index b982b94..1e2e0a8 100644 (file)
@@ -88,7 +88,9 @@ void OptimizingCompilerThread::CompileNext() {
   // The function may have already been optimized by OSR.  Simply continue.
   // Mark it for installing before queuing so that we can be sure of the write
   // order: marking first and (after being queued) installing code second.
-  optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
+  { Heap::RelocationLock relocation_lock(isolate_->heap());
+    optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
+  }
   output_queue_.Enqueue(optimizing_compiler);
 }