Upstream version 9.37.195.0
[platform/framework/web/crosswalk.git] / src / v8 / src / spaces.cc
index a80341b..69a0145 100644 (file)
@@ -1,36 +1,14 @@
 // Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "macro-assembler.h"
-#include "mark-compact.h"
-#include "msan.h"
-#include "platform.h"
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/full-codegen.h"
+#include "src/macro-assembler.h"
+#include "src/mark-compact.h"
+#include "src/msan.h"
+#include "src/platform.h"
 
 namespace v8 {
 namespace internal {
@@ -133,9 +111,21 @@ CodeRange::CodeRange(Isolate* isolate)
 }
 
 
-bool CodeRange::SetUp(const size_t requested) {
+bool CodeRange::SetUp(size_t requested) {
   ASSERT(code_range_ == NULL);
 
+  if (requested == 0) {
+    // When a target requires the code range feature, we put all code objects
+    // in a kMaximalCodeRangeSize range of virtual address space, so that
+    // they can call each other with near calls.
+    if (kRequiresCodeRange) {
+      requested = kMaximalCodeRangeSize;
+    } else {
+      return true;
+    }
+  }
+
+  ASSERT(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
   code_range_ = new VirtualMemory(requested);
   CHECK(code_range_ != NULL);
   if (!code_range_->IsReserved()) {
@@ -146,7 +136,8 @@ bool CodeRange::SetUp(const size_t requested) {
 
   // We are sure that we have mapped a block of requested addresses.
   ASSERT(code_range_->size() == requested);
-  LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
+  LOG(isolate_,
+      NewEvent("CodeRange", code_range_->address(), requested));
   Address base = reinterpret_cast<Address>(code_range_->address());
   Address aligned_base =
       RoundUp(reinterpret_cast<Address>(code_range_->address()),
@@ -167,12 +158,12 @@ int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
 }
 
 
-void CodeRange::GetNextAllocationBlock(size_t requested) {
+bool CodeRange::GetNextAllocationBlock(size_t requested) {
   for (current_allocation_block_index_++;
        current_allocation_block_index_ < allocation_list_.length();
        current_allocation_block_index_++) {
     if (requested <= allocation_list_[current_allocation_block_index_].size) {
-      return;  // Found a large enough allocation block.
+      return true;  // Found a large enough allocation block.
     }
   }
 
@@ -199,12 +190,12 @@ void CodeRange::GetNextAllocationBlock(size_t requested) {
        current_allocation_block_index_ < allocation_list_.length();
        current_allocation_block_index_++) {
     if (requested <= allocation_list_[current_allocation_block_index_].size) {
-      return;  // Found a large enough allocation block.
+      return true;  // Found a large enough allocation block.
     }
   }
-
+  current_allocation_block_index_ = 0;
   // Code range is full or too fragmented.
-  V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
+  return false;
 }
 
 
@@ -214,9 +205,8 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
   ASSERT(commit_size <= requested_size);
   ASSERT(current_allocation_block_index_ < allocation_list_.length());
   if (requested_size > allocation_list_[current_allocation_block_index_].size) {
-    // Find an allocation block large enough.  This function call may
-    // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
-    GetNextAllocationBlock(requested_size);
+    // Find an allocation block large enough.
+    if (!GetNextAllocationBlock(requested_size)) return NULL;
   }
   // Commit the requested memory at the start of the current allocation block.
   size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
@@ -239,7 +229,8 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
   allocation_list_[current_allocation_block_index_].start += *allocated;
   allocation_list_[current_allocation_block_index_].size -= *allocated;
   if (*allocated == current.size) {
-    GetNextAllocationBlock(0);  // This block is used up, get the next one.
+    // This block is used up, get the next one.
+    if (!GetNextAllocationBlock(0)) return NULL;
   }
   return current.start;
 }
@@ -333,9 +324,12 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
     size_executable_ -= size;
   }
   // Code which is part of the code-range does not have its own VirtualMemory.
-  ASSERT(!isolate_->code_range()->contains(
-      static_cast<Address>(reservation->address())));
-  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+  ASSERT(isolate_->code_range() == NULL ||
+         !isolate_->code_range()->contains(
+             static_cast<Address>(reservation->address())));
+  ASSERT(executable == NOT_EXECUTABLE ||
+         isolate_->code_range() == NULL ||
+         !isolate_->code_range()->valid());
   reservation->Release();
 }
 
@@ -353,11 +347,14 @@ void MemoryAllocator::FreeMemory(Address base,
     ASSERT(size_executable_ >= size);
     size_executable_ -= size;
   }
-  if (isolate_->code_range()->contains(static_cast<Address>(base))) {
+  if (isolate_->code_range() != NULL &&
+      isolate_->code_range()->contains(static_cast<Address>(base))) {
     ASSERT(executable == EXECUTABLE);
     isolate_->code_range()->FreeRawMemory(base, size);
   } else {
-    ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
+    ASSERT(executable == NOT_EXECUTABLE ||
+           isolate_->code_range() == NULL ||
+           !isolate_->code_range()->valid());
     bool result = VirtualMemory::ReleaseRegion(base, size);
     USE(result);
     ASSERT(result);
@@ -483,7 +480,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_ = static_cast<int>(area_start - base);
-  chunk->parallel_sweeping_ = 0;
+  chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
   chunk->available_in_small_free_list_ = 0;
   chunk->available_in_medium_free_list_ = 0;
   chunk->available_in_large_free_list_ = 0;
@@ -533,7 +530,8 @@ bool MemoryChunk::CommitArea(size_t requested) {
       }
     } else {
       CodeRange* code_range = heap_->isolate()->code_range();
-      ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
+      ASSERT(code_range != NULL && code_range->valid() &&
+             IsFlagSet(IS_EXECUTABLE));
       if (!code_range->CommitRawMemory(start, length)) return false;
     }
 
@@ -549,7 +547,8 @@ bool MemoryChunk::CommitArea(size_t requested) {
       if (!reservation_.Uncommit(start, length)) return false;
     } else {
       CodeRange* code_range = heap_->isolate()->code_range();
-      ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
+      ASSERT(code_range != NULL && code_range->valid() &&
+             IsFlagSet(IS_EXECUTABLE));
       if (!code_range->UncommitRawMemory(start, length)) return false;
     }
   }
@@ -560,33 +559,22 @@ bool MemoryChunk::CommitArea(size_t requested) {
 
 
 void MemoryChunk::InsertAfter(MemoryChunk* other) {
-  next_chunk_ = other->next_chunk_;
-  prev_chunk_ = other;
-
-  // This memory barrier is needed since concurrent sweeper threads may iterate
-  // over the list of pages while a new page is inserted.
-  // TODO(hpayer): find a cleaner way to guarantee that the page list can be
-  // expanded concurrently
-  MemoryBarrier();
+  MemoryChunk* other_next = other->next_chunk();
 
-  // The following two write operations can take effect in arbitrary order
-  // since pages are always iterated by the sweeper threads in LIFO order, i.e,
-  // the inserted page becomes visible for the sweeper threads after
-  // other->next_chunk_ = this;
-  other->next_chunk_->prev_chunk_ = this;
-  other->next_chunk_ = this;
+  set_next_chunk(other_next);
+  set_prev_chunk(other);
+  other_next->set_prev_chunk(this);
+  other->set_next_chunk(this);
 }
 
 
 void MemoryChunk::Unlink() {
-  if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
-    heap_->decrement_scan_on_scavenge_pages();
-    ClearFlag(SCAN_ON_SCAVENGE);
-  }
-  next_chunk_->prev_chunk_ = prev_chunk_;
-  prev_chunk_->next_chunk_ = next_chunk_;
-  prev_chunk_ = NULL;
-  next_chunk_ = NULL;
+  MemoryChunk* next_element = next_chunk();
+  MemoryChunk* prev_element = prev_chunk();
+  next_element->set_prev_chunk(prev_element);
+  prev_element->set_next_chunk(next_element);
+  set_prev_chunk(NULL);
+  set_next_chunk(NULL);
 }
 
 
@@ -650,7 +638,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
                                  OS::CommitPageSize());
     // Allocate executable memory either from code range or from the
     // OS.
-    if (isolate_->code_range()->exists()) {
+    if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
       base = isolate_->code_range()->AllocateRawMemory(chunk_size,
                                                        commit_size,
                                                        &chunk_size);
@@ -718,7 +706,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
                                                 executable,
                                                 owner);
   result->set_reserved_memory(&reservation);
-  MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
+  MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size);
   return result;
 }
 
@@ -948,8 +936,8 @@ PagedSpace::PagedSpace(Heap* heap,
     : Space(heap, id, executable),
       free_list_(this),
       was_swept_conservatively_(false),
-      first_unswept_page_(Page::FromAddress(NULL)),
-      unswept_free_bytes_(0) {
+      unswept_free_bytes_(0),
+      end_of_unswept_pages_(NULL) {
   if (id == CODE_SPACE) {
     area_size_ = heap->isolate()->memory_allocator()->
         CodePageAreaSize();
@@ -1000,11 +988,11 @@ size_t PagedSpace::CommittedPhysicalMemory() {
 }
 
 
-MaybeObject* PagedSpace::FindObject(Address addr) {
+Object* PagedSpace::FindObject(Address addr) {
   // Note: this function can only be called on precisely swept spaces.
   ASSERT(!heap()->mark_compact_collector()->in_use());
 
-  if (!Contains(addr)) return Failure::Exception();
+  if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
 
   Page* p = Page::FromAddress(addr);
   HeapObjectIterator it(p, NULL);
@@ -1015,7 +1003,7 @@ MaybeObject* PagedSpace::FindObject(Address addr) {
   }
 
   UNREACHABLE();
-  return Failure::Exception();
+  return Smi::FromInt(0);
 }
 
 
@@ -1058,7 +1046,7 @@ intptr_t PagedSpace::SizeOfFirstPage() {
   int size = 0;
   switch (identity()) {
     case OLD_POINTER_SPACE:
-      size = 72 * kPointerSize * KB;
+      size = 96 * kPointerSize * KB;
       break;
     case OLD_DATA_SPACE:
       size = 192 * KB;
@@ -1072,16 +1060,20 @@ intptr_t PagedSpace::SizeOfFirstPage() {
     case PROPERTY_CELL_SPACE:
       size = 8 * kPointerSize * KB;
       break;
-    case CODE_SPACE:
-      if (heap()->isolate()->code_range()->exists()) {
+    case CODE_SPACE: {
+      CodeRange* code_range = heap()->isolate()->code_range();
+      if (code_range != NULL && code_range->valid()) {
         // When code range exists, code pages are allocated in a special way
         // (from the reserved code range). That part of the code is not yet
         // upgraded to handle small pages.
         size = AreaSize();
       } else {
-        size = 480 * KB;
+        size = RoundUp(
+            480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
+            kPointerSize);
       }
       break;
+    }
     default:
       UNREACHABLE();
   }
@@ -1122,18 +1114,10 @@ void PagedSpace::IncreaseCapacity(int size) {
 }
 
 
-void PagedSpace::ReleasePage(Page* page, bool unlink) {
+void PagedSpace::ReleasePage(Page* page) {
   ASSERT(page->LiveBytes() == 0);
   ASSERT(AreaSize() == page->area_size());
 
-  // Adjust list of unswept pages if the page is the head of the list.
-  if (first_unswept_page_ == page) {
-    first_unswept_page_ = page->next_page();
-    if (first_unswept_page_ == anchor()) {
-      first_unswept_page_ = Page::FromAddress(NULL);
-    }
-  }
-
   if (page->WasSwept()) {
     intptr_t size = free_list_.EvictFreeListItems(page);
     accounting_stats_.AllocateBytes(size);
@@ -1142,19 +1126,19 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) {
     DecreaseUnsweptFreeBytes(page);
   }
 
-  // TODO(hpayer): This check is just used for debugging purpose and
-  // should be removed or turned into an assert after investigating the
-  // crash in concurrent sweeping.
-  CHECK(!free_list_.ContainsPageFreeListItems(page));
+  if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
+    heap()->decrement_scan_on_scavenge_pages();
+    page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  ASSERT(!free_list_.ContainsPageFreeListItems(page));
 
   if (Page::FromAllocationTop(allocation_info_.top()) == page) {
     allocation_info_.set_top(NULL);
     allocation_info_.set_limit(NULL);
   }
 
-  if (unlink) {
-    page->Unlink();
-  }
+  page->Unlink();
   if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
     heap()->isolate()->memory_allocator()->Free(page);
   } else {
@@ -1202,7 +1186,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
       VerifyObject(object);
 
       // The object itself should look OK.
-      object->Verify();
+      object->ObjectVerify();
 
       // All the interior pointers should be contained in the heap.
       int size = object->Size();
@@ -1427,7 +1411,7 @@ bool NewSpace::AddFreshPage() {
 }
 
 
-MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
+AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
   Address old_top = allocation_info_.top();
   Address high = to_space_.page_high();
   if (allocation_info_.limit() < high) {
@@ -1449,7 +1433,7 @@ MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
     top_on_previous_step_ = to_space_.page_low();
     return AllocateRaw(size_in_bytes);
   } else {
-    return Failure::RetryAfterGC();
+    return AllocationResult::Retry();
   }
 }
 
@@ -1485,7 +1469,7 @@ void NewSpace::Verify() {
       CHECK(!object->IsCode());
 
       // The object itself should look OK.
-      object->Verify();
+      object->ObjectVerify();
 
       // All the interior pointers should be contained in the heap.
       VerifyPointersVisitor visitor;
@@ -2024,10 +2008,13 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
   // field and a next pointer, we give it a filler map that gives it the
   // correct size.
   if (size_in_bytes > FreeSpace::kHeaderSize) {
-    set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
     // Can't use FreeSpace::cast because it fails during deserialization.
+    // We have to set the size first with a release store before we store
+    // the map because a concurrent store buffer scan on scavenge must not
+    // observe a map with an invalid size.
     FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
-    this_as_free_space->set_size(size_in_bytes);
+    this_as_free_space->nobarrier_set_size(size_in_bytes);
+    synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
   } else if (size_in_bytes == kPointerSize) {
     set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
   } else if (size_in_bytes == 2 * kPointerSize) {
@@ -2071,31 +2058,34 @@ void FreeListNode::set_next(FreeListNode* next) {
   // stage.
   if (map() == GetHeap()->raw_unchecked_free_space_map()) {
     ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
-    Memory::Address_at(address() + kNextOffset) =
-        reinterpret_cast<Address>(next);
+    base::NoBarrier_Store(
+        reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
+        reinterpret_cast<base::AtomicWord>(next));
   } else {
-    Memory::Address_at(address() + kPointerSize) =
-        reinterpret_cast<Address>(next);
+    base::NoBarrier_Store(
+        reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
+        reinterpret_cast<base::AtomicWord>(next));
   }
 }
 
 
 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
   intptr_t free_bytes = 0;
-  if (category->top_ != NULL) {
-    ASSERT(category->end_ != NULL);
+  if (category->top() != NULL) {
     // This is safe (not going to deadlock) since Concatenate operations
     // are never performed on the same free lists at the same time in
     // reverse order.
     LockGuard<Mutex> target_lock_guard(mutex());
     LockGuard<Mutex> source_lock_guard(category->mutex());
+    ASSERT(category->end_ != NULL);
     free_bytes = category->available();
     if (end_ == NULL) {
       end_ = category->end();
     } else {
-      category->end()->set_next(top_);
+      category->end()->set_next(top());
     }
-    top_ = category->top();
+    set_top(category->top());
+    base::NoBarrier_Store(&top_, category->top_);
     available_ += category->available();
     category->Reset();
   }
@@ -2104,15 +2094,16 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
 
 
 void FreeListCategory::Reset() {
-  top_ = NULL;
-  end_ = NULL;
-  available_ = 0;
+  set_top(NULL);
+  set_end(NULL);
+  set_available(0);
 }
 
 
 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
   int sum = 0;
-  FreeListNode** n = &top_;
+  FreeListNode* t = top();
+  FreeListNode** n = &t;
   while (*n != NULL) {
     if (Page::FromAddress((*n)->address()) == p) {
       FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
@@ -2122,8 +2113,9 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
       n = (*n)->next_address();
     }
   }
-  if (top_ == NULL) {
-    end_ = NULL;
+  set_top(t);
+  if (top() == NULL) {
+    set_end(NULL);
   }
   available_ -= sum;
   return sum;
@@ -2131,17 +2123,17 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
 
 
 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
-  FreeListNode** n = &top_;
-  while (*n != NULL) {
-    if (Page::FromAddress((*n)->address()) == p) return true;
-    n = (*n)->next_address();
+  FreeListNode* node = top();
+  while (node != NULL) {
+    if (Page::FromAddress(node->address()) == p) return true;
+    node = node->next();
   }
   return false;
 }
 
 
 FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
-  FreeListNode* node = top_;
+  FreeListNode* node = top();
 
   if (node == NULL) return NULL;
 
@@ -2180,8 +2172,8 @@ FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
 
 
 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
-  node->set_next(top_);
-  top_ = node;
+  node->set_next(top());
+  set_top(node);
   if (end_ == NULL) {
     end_ = node;
   }
@@ -2190,7 +2182,7 @@ void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
 
 
 void FreeListCategory::RepairFreeList(Heap* heap) {
-  FreeListNode* n = top_;
+  FreeListNode* n = top();
   while (n != NULL) {
     Map** map_location = reinterpret_cast<Map**>(n->address());
     if (*map_location == NULL) {
@@ -2299,7 +2291,8 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
   }
 
   int huge_list_available = huge_list_.available();
-  for (FreeListNode** cur = huge_list_.GetTopAddress();
+  FreeListNode* top_node = huge_list_.top();
+  for (FreeListNode** cur = &top_node;
        *cur != NULL;
        cur = (*cur)->next_address()) {
     FreeListNode* cur_node = *cur;
@@ -2333,6 +2326,7 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
     }
   }
 
+  huge_list_.set_top(top_node);
   if (huge_list_.top() == NULL) {
     huge_list_.set_end(NULL);
   }
@@ -2486,11 +2480,11 @@ void FreeList::RepairLists(Heap* heap) {
 #ifdef DEBUG
 intptr_t FreeListCategory::SumFreeList() {
   intptr_t sum = 0;
-  FreeListNode* cur = top_;
+  FreeListNode* cur = top();
   while (cur != NULL) {
     ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
     FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
-    sum += cur_as_free_space->Size();
+    sum += cur_as_free_space->nobarrier_size();
     cur = cur->next();
   }
   return sum;
@@ -2502,7 +2496,7 @@ static const int kVeryLongFreeList = 500;
 
 int FreeListCategory::FreeListLength() {
   int length = 0;
-  FreeListNode* cur = top_;
+  FreeListNode* cur = top();
   while (cur != NULL) {
     length++;
     cur = cur->next();
@@ -2542,24 +2536,8 @@ void PagedSpace::PrepareForMarkCompact() {
   // on the first allocation after the sweep.
   EmptyAllocationInfo();
 
-  // Stop lazy sweeping and clear marking bits for unswept pages.
-  if (first_unswept_page_ != NULL) {
-    Page* p = first_unswept_page_;
-    do {
-      // Do not use ShouldBeSweptLazily predicate here.
-      // New evacuation candidates were selected but they still have
-      // to be swept before collection starts.
-      if (!p->WasSwept()) {
-        Bitmap::Clear(p);
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-      }
-      p = p->next_page();
-    } while (p != anchor());
-  }
-  first_unswept_page_ = Page::FromAddress(NULL);
+  // This counter will be increased for pages which will be swept by the
+  // sweeper threads.
   unswept_free_bytes_ = 0;
 
   // Clear the free list before a full GC---it will be rebuilt afterward.
@@ -2568,7 +2546,8 @@ void PagedSpace::PrepareForMarkCompact() {
 
 
 intptr_t PagedSpace::SizeOfObjects() {
-  ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
+  ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() ||
+         (unswept_free_bytes_ == 0));
   return Size() - unswept_free_bytes_ - (limit() - top());
 }
 
@@ -2582,39 +2561,6 @@ void PagedSpace::RepairFreeListsAfterBoot() {
 }
 
 
-bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
-  if (IsLazySweepingComplete()) return true;
-
-  intptr_t freed_bytes = 0;
-  Page* p = first_unswept_page_;
-  do {
-    Page* next_page = p->next_page();
-    if (ShouldBeSweptLazily(p)) {
-      if (FLAG_gc_verbose) {
-        PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
-               reinterpret_cast<intptr_t>(p));
-      }
-      DecreaseUnsweptFreeBytes(p);
-      freed_bytes +=
-          MarkCompactCollector::
-              SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
-                  this, NULL, p);
-    }
-    p = next_page;
-  } while (p != anchor() && freed_bytes < bytes_to_sweep);
-
-  if (p == anchor()) {
-    first_unswept_page_ = Page::FromAddress(NULL);
-  } else {
-    first_unswept_page_ = p;
-  }
-
-  heap()->FreeQueuedChunks();
-
-  return IsLazySweepingComplete();
-}
-
-
 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
   if (allocation_info_.top() >= allocation_info_.limit()) return;
 
@@ -2631,35 +2577,29 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
 }
 
 
-bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
+HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
+    int size_in_bytes) {
   MarkCompactCollector* collector = heap()->mark_compact_collector();
-  if (collector->AreSweeperThreadsActivated()) {
-    if (collector->IsConcurrentSweepingInProgress()) {
-      if (collector->RefillFreeLists(this) < size_in_bytes) {
-        if (!collector->sequential_sweeping()) {
-          collector->WaitUntilSweepingCompleted();
-          return true;
-        }
-      }
-      return false;
-    }
-    return true;
-  } else {
-    return AdvanceSweeper(size_in_bytes);
+
+  // If sweeper threads are still running, wait for them.
+  if (collector->IsConcurrentSweepingInProgress()) {
+    collector->WaitUntilSweepingCompleted();
+
+    // After waiting for the sweeper threads, there may be new free-list
+    // entries.
+    return free_list_.Allocate(size_in_bytes);
   }
+  return NULL;
 }
 
 
 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   // Allocation in this space has failed.
 
-  // If there are unswept pages advance lazy sweeper a bounded number of times
-  // until we find a size_in_bytes contiguous piece of memory
-  const int kMaxSweepingTries = 5;
-  bool sweeping_complete = false;
-
-  for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
-    sweeping_complete = EnsureSweeperProgress(size_in_bytes);
+  // If sweeper threads are active, try to re-fill the free-lists.
+  MarkCompactCollector* collector = heap()->mark_compact_collector();
+  if (collector->IsConcurrentSweepingInProgress()) {
+    collector->RefillFreeList(this);
 
     // Retry the free list allocation.
     HeapObject* object = free_list_.Allocate(size_in_bytes);
@@ -2669,9 +2609,12 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   // Free list allocation failed and there is no next page.  Fail if we have
   // hit the old generation size limit that should cause a garbage
   // collection.
-  if (!heap()->always_allocate() &&
-      heap()->OldGenerationAllocationLimitReached()) {
-    return NULL;
+  if (!heap()->always_allocate()
+      && heap()->OldGenerationAllocationLimitReached()) {
+    // If sweeper threads are active, wait for them at that point and steal
+    // elements form their free-lists.
+    HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+    if (object != NULL) return object;
   }
 
   // Try to expand the space and allocate in the new next page.
@@ -2680,18 +2623,10 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
     return free_list_.Allocate(size_in_bytes);
   }
 
-  // Last ditch, sweep all the remaining pages to try to find space.  This may
-  // cause a pause.
-  if (!IsLazySweepingComplete()) {
-    EnsureSweeperProgress(kMaxInt);
-
-    // Retry the free list allocation.
-    HeapObject* object = free_list_.Allocate(size_in_bytes);
-    if (object != NULL) return object;
-  }
-
-  // Finally, fail.
-  return NULL;
+  // If sweeper threads are active, wait for them at that point and steal
+  // elements form their free-lists. Allocation may still fail their which
+  // would indicate that there is not enough memory for the given allocation.
+  return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
 }
 
 
@@ -2934,22 +2869,22 @@ void LargeObjectSpace::TearDown() {
 }
 
 
-MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
-                                           Executability executable) {
+AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
+                                               Executability executable) {
   // Check if we want to force a GC before growing the old space further.
   // If so, fail the allocation.
   if (!heap()->always_allocate() &&
       heap()->OldGenerationAllocationLimitReached()) {
-    return Failure::RetryAfterGC(identity());
+    return AllocationResult::Retry(identity());
   }
 
   if (Size() + object_size > max_capacity_) {
-    return Failure::RetryAfterGC(identity());
+    return AllocationResult::Retry(identity());
   }
 
   LargePage* page = heap()->isolate()->memory_allocator()->
       AllocateLargePage(object_size, this, executable);
-  if (page == NULL) return Failure::RetryAfterGC(identity());
+  if (page == NULL) return AllocationResult::Retry(identity());
   ASSERT(page->area_size() >= object_size);
 
   size_ += static_cast<int>(page->size());
@@ -3002,12 +2937,12 @@ size_t LargeObjectSpace::CommittedPhysicalMemory() {
 
 
 // GC support
-MaybeObject* LargeObjectSpace::FindObject(Address a) {
+Object* LargeObjectSpace::FindObject(Address a) {
   LargePage* page = FindPage(a);
   if (page != NULL) {
     return page->GetObject();
   }
-  return Failure::Exception();
+  return Smi::FromInt(0);  // Signaling not found.
 }
 
 
@@ -3088,7 +3023,7 @@ bool LargeObjectSpace::Contains(HeapObject* object) {
 
   bool owned = (chunk->owner() == this);
 
-  SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
+  SLOW_ASSERT(!owned || FindObject(address)->IsHeapObject());
 
   return owned;
 }
@@ -3121,7 +3056,7 @@ void LargeObjectSpace::Verify() {
            object->IsFixedDoubleArray() || object->IsByteArray());
 
     // The object itself should look OK.
-    object->Verify();
+    object->ObjectVerify();
 
     // Byte arrays and strings don't have interior pointers.
     if (object->IsCode()) {