From fdc1745e335206d78f13623d5a5468e2754ce549 Mon Sep 17 00:00:00 2001 From: ulan Date: Mon, 23 Mar 2015 06:24:07 -0700 Subject: [PATCH] Respect old space allocation limit in PagedSpace::AllocateRaw. BUG=v8:3976 LOG=NO Review URL: https://codereview.chromium.org/1025643002 Cr-Commit-Position: refs/heads/master@{#27364} --- src/heap/heap-inl.h | 6 ------ src/heap/heap.cc | 4 ++++ src/heap/spaces.cc | 2 +- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h index 7e869f2..f2c593e 100644 --- a/src/heap/heap-inl.h +++ b/src/heap/heap-inl.h @@ -698,18 +698,12 @@ void Heap::CompletelyClearInstanceofCache() { AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate) : heap_(isolate->heap()), daf_(isolate) { - // We shouldn't hit any nested scopes, because that requires - // non-handle code to call handle code. The code still works but - // performance will degrade, so we want to catch this situation - // in debug mode. - DCHECK(heap_->always_allocate_scope_depth_ == 0); heap_->always_allocate_scope_depth_++; } AlwaysAllocateScope::~AlwaysAllocateScope() { heap_->always_allocate_scope_depth_--; - DCHECK(heap_->always_allocate_scope_depth_ == 0); } diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 483b091..3f65253 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -1516,6 +1516,10 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer { void Heap::Scavenge() { RelocationLock relocation_lock(this); + // There are soft limits in the allocation code, designed to trigger a mark + // sweep collection by failing allocations. There is no sense in trying to + // trigger one during scavenge: scavenges allocation should always succeed. + AlwaysAllocateScope scope(isolate()); #ifdef VERIFY_HEAP if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this); diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index f3c2eb0..827f3ab 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -2618,7 +2618,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { // If sweeper threads are active, wait for them at that point and steal // elements form their free-lists. HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); - if (object != NULL) return object; + return object; } // Try to expand the space and allocate in the new next page. -- 2.7.4