From c808a6449ec2d01abc40cc805b9558a82802970a Mon Sep 17 00:00:00 2001 From: "erik.corry@gmail.com" Date: Fri, 14 Oct 2011 10:52:30 +0000 Subject: [PATCH] Avoid extra GCs when deserializing during incremental marking. Review URL: http://codereview.chromium.org/8276030 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@9626 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/spaces.cc | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/src/spaces.cc b/src/spaces.cc index 61b3181..861bc27 100644 --- a/src/spaces.cc +++ b/src/spaces.cc @@ -1909,11 +1909,24 @@ intptr_t FreeList::SumFreeLists() { bool NewSpace::ReserveSpace(int bytes) { // We can't reliably unpack a partial snapshot that needs more new space - // space than the minimum NewSpace size. + // space than the minimum NewSpace size. The limit can be set lower than + // the end of new space either because there is more space on the next page + // or because we have lowered the limit in order to get periodic incremental + // marking. The most reliable way to ensure that there is linear space is + // to do the allocation, then rewind the limit. ASSERT(bytes <= InitialCapacity()); - Address limit = allocation_info_.limit; + MaybeObject* maybe = AllocateRawInternal(bytes); + Object* object = NULL; + if (!maybe->ToObject(&object)) return false; + HeapObject* allocation = HeapObject::cast(object); Address top = allocation_info_.top; - return limit - top >= bytes; + if ((top - bytes) == allocation->address()) { + allocation_info_.top = allocation->address(); + return true; + } + // There may be a borderline case here where the allocation succeeded, but + // the limit and top have moved on to a new page. In that case we try again. + return ReserveSpace(bytes); } -- 2.7.4