handle < roots_array_start + Heap::kStrongRootListLength) {
return true;
}
+ if (isolate->optimizing_compiler_thread()->IsOptimizerThread() &&
+ !Heap::RelocationLock::IsLockedByOptimizerThread(isolate->heap())) {
+ return false;
+ }
switch (isolate->HandleDereferenceGuardState()) {
case HandleDereferenceGuard::ALLOW:
return true;
MaybeObject* Heap::AllocateRaw(int size_in_bytes,
AllocationSpace space,
AllocationSpace retry_space) {
+ SLOW_ASSERT(!isolate_->optimizing_compiler_thread()->IsOptimizerThread());
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
ASSERT(space != NEW_SPACE ||
retry_space == OLD_POINTER_SPACE ||
void Heap::Scavenge() {
+ RelocationLock relocation_lock(this);
+
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
#endif
store_buffer()->SetUp();
+ if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
+#ifdef DEBUG
+ relocation_mutex_locked_by_optimizer_thread_ = false;
+#endif // DEBUG
+
return true;
}
incremental_marking()->TearDown();
isolate_->memory_allocator()->TearDown();
+
+ delete relocation_mutex_;
}
ClearObjectStats();
}
+
+Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
+ if (FLAG_parallel_recompilation) {
+ heap_->relocation_mutex_->Lock();
+#ifdef DEBUG
+ heap_->relocation_mutex_locked_by_optimizer_thread_ =
+ heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
+#endif // DEBUG
+ }
+}
+
} } // namespace v8::internal
void CheckpointObjectStats();
+ // We don't use a ScopedLock here since we want to lock the heap
+ // only when FLAG_parallel_recompilation is true.
+ class RelocationLock {
+ public:
+ explicit RelocationLock(Heap* heap);
+
+ ~RelocationLock() {
+ if (FLAG_parallel_recompilation) {
+#ifdef DEBUG
+ heap_->relocation_mutex_locked_by_optimizer_thread_ = false;
+#endif // DEBUG
+ heap_->relocation_mutex_->Unlock();
+ }
+ }
+
+#ifdef DEBUG
+ static bool IsLockedByOptimizerThread(Heap* heap) {
+ return heap->relocation_mutex_locked_by_optimizer_thread_;
+ }
+#endif // DEBUG
+
+ private:
+ Heap* heap_;
+ };
+
private:
Heap();
MemoryChunk* chunks_queued_for_free_;
+ Mutex* relocation_mutex_;
+#ifdef DEBUG
+ bool relocation_mutex_locked_by_optimizer_thread_;
+#endif // DEBUG;
+
friend class Factory;
friend class GCTracer;
friend class DisallowAllocationFailure;
void HGraph::Verify(bool do_full_verify) const {
+ Heap::RelocationLock(isolate()->heap());
ALLOW_HANDLE_DEREF(isolate(), "debug mode verification");
for (int i = 0; i < blocks_.length(); i++) {
HBasicBlock* block = blocks_.at(i);
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+ Heap::RelocationLock relocation_lock(heap());
+
bool code_slots_filtering_required;
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
code_slots_filtering_required = MarkInvalidatedCode();
// The function may have already been optimized by OSR. Simply continue.
// Mark it for installing before queuing so that we can be sure of the write
// order: marking first and (after being queued) installing code second.
- optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
+ { Heap::RelocationLock relocation_lock(isolate_->heap());
+ optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
+ }
output_queue_.Enqueue(optimizing_compiler);
}