Revert 10542 (boot time memory reduction) due to map alignment
authorerik.corry@gmail.com <erik.corry@gmail.com@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 30 Jan 2012 10:20:13 +0000 (10:20 +0000)
committererik.corry@gmail.com <erik.corry@gmail.com@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Mon, 30 Jan 2012 10:20:13 +0000 (10:20 +0000)
issues on 64 bit.
Review URL: https://chromiumcodereview.appspot.com/9295047

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@10544 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

14 files changed:
src/deoptimizer.cc
src/heap.cc
src/incremental-marking.cc
src/mark-compact.cc
src/serialize.cc
src/snapshot.h
src/spaces-inl.h
src/spaces.cc
src/spaces.h
src/store-buffer.cc
src/utils.h
test/cctest/test-heap.cc
test/cctest/test-mark-compact.cc
test/cctest/test-spaces.cc

index 762fd98..56ff454 100644 (file)
@@ -1150,7 +1150,6 @@ MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
 
   MemoryChunk* chunk =
       Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
-                                                            desc.instr_size,
                                                             EXECUTABLE,
                                                             NULL);
   if (chunk == NULL) {
index 59dc5d8..ff978cf 100644 (file)
@@ -582,14 +582,10 @@ void Heap::ReserveSpace(
   PagedSpace* map_space = Heap::map_space();
   PagedSpace* cell_space = Heap::cell_space();
   LargeObjectSpace* lo_space = Heap::lo_space();
-  bool one_old_space_gc_has_been_performed = false;
   bool gc_performed = true;
   int counter = 0;
   static const int kThreshold = 20;
-  bool old_space_gc_performed;
-
   while (gc_performed && counter++ < kThreshold) {
-    old_space_gc_performed = false;
     gc_performed = false;
     if (!new_space->ReserveSpace(new_space_size)) {
       Heap::CollectGarbage(NEW_SPACE);
@@ -598,27 +594,22 @@ void Heap::ReserveSpace(
     if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
       Heap::CollectGarbage(OLD_POINTER_SPACE);
       gc_performed = true;
-      old_space_gc_performed = true;
     }
     if (!(old_data_space->ReserveSpace(data_space_size))) {
       Heap::CollectGarbage(OLD_DATA_SPACE);
       gc_performed = true;
-      old_space_gc_performed = true;
     }
     if (!(code_space->ReserveSpace(code_space_size))) {
       Heap::CollectGarbage(CODE_SPACE);
       gc_performed = true;
-      old_space_gc_performed = true;
     }
     if (!(map_space->ReserveSpace(map_space_size))) {
       Heap::CollectGarbage(MAP_SPACE);
       gc_performed = true;
-      old_space_gc_performed = true;
     }
     if (!(cell_space->ReserveSpace(cell_space_size))) {
       Heap::CollectGarbage(CELL_SPACE);
       gc_performed = true;
-      old_space_gc_performed = true;
     }
     // We add a slack-factor of 2 in order to have space for a series of
     // large-object allocations that are only just larger than the page size.
@@ -628,22 +619,15 @@ void Heap::ReserveSpace(
     // allocation in the other spaces.
     large_object_size += cell_space_size + map_space_size + code_space_size +
         data_space_size + pointer_space_size;
-
-    // If we already did one GC in order to make space in old space, there is
-    // no sense in doing another one.  We will attempt to force through the
-    // large object space allocation, which comes directly from the OS,
-    // regardless of any soft limit.
-    if (!one_old_space_gc_has_been_performed &&
-        !(lo_space->ReserveSpace(large_object_size))) {
+    if (!(lo_space->ReserveSpace(large_object_size))) {
       Heap::CollectGarbage(LO_SPACE);
       gc_performed = true;
     }
-    if (old_space_gc_performed) one_old_space_gc_has_been_performed = true;
   }
 
   if (gc_performed) {
     // Failed to reserve the space after several attempts.
-    V8::FatalProcessOutOfMemory("Heap.:ReserveSpace");
+    V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
   }
 }
 
index 05f60fa..6248524 100644 (file)
@@ -287,7 +287,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
 
     // It's difficult to filter out slots recorded for large objects.
     if (chunk->owner()->identity() == LO_SPACE &&
-        chunk->size() > Page::kPageSize &&
+        chunk->size() > static_cast<size_t>(Page::kPageSize) &&
         is_compacting) {
       chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
     }
index 3287b3b..8cd9d02 100644 (file)
@@ -2919,8 +2919,7 @@ static void SweepPrecisely(PagedSpace* space,
     for ( ; live_objects != 0; live_objects--) {
       Address free_end = object_address + offsets[live_index++] * kPointerSize;
       if (free_end != free_start) {
-        space->AddToFreeLists(free_start,
-                              static_cast<int>(free_end - free_start));
+        space->Free(free_start, static_cast<int>(free_end - free_start));
       }
       HeapObject* live_object = HeapObject::FromAddress(free_end);
       ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
@@ -2946,8 +2945,7 @@ static void SweepPrecisely(PagedSpace* space,
     cells[cell_index] = 0;
   }
   if (free_start != p->ObjectAreaEnd()) {
-    space->AddToFreeLists(free_start,
-                          static_cast<int>(p->ObjectAreaEnd() - free_start));
+    space->Free(free_start, static_cast<int>(p->ObjectAreaEnd() - free_start));
   }
   p->ResetLiveBytes();
 }
@@ -3240,9 +3238,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
     Page* p = evacuation_candidates_[i];
     if (!p->IsEvacuationCandidate()) continue;
     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-    space->AddToFreeLists(
-        p->ObjectAreaStart(),
-        static_cast<int>(p->ObjectAreaEnd() - p->ObjectAreaStart()));
+    space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
     p->set_scan_on_scavenge(false);
     slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
     p->ClearEvacuationCandidate();
@@ -3559,8 +3555,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   }
   size_t size = block_address - p->ObjectAreaStart();
   if (cell_index == last_cell_index) {
-    freed_bytes += static_cast<int>(space->AddToFreeLists(
-        p->ObjectAreaStart(), static_cast<int>(size)));
+    freed_bytes += static_cast<int>(space->Free(p->ObjectAreaStart(),
+                                                static_cast<int>(size)));
     ASSERT_EQ(0, p->LiveBytes());
     return freed_bytes;
   }
@@ -3569,8 +3565,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
   // Free the first free space.
   size = free_end - p->ObjectAreaStart();
-  freed_bytes += space->AddToFreeLists(p->ObjectAreaStart(),
-                                       static_cast<int>(size));
+  freed_bytes += space->Free(p->ObjectAreaStart(),
+                             static_cast<int>(size));
   // The start of the current free area is represented in undigested form by
   // the address of the last 32-word section that contained a live object and
   // the marking bitmap for that cell, which describes where the live object
@@ -3599,8 +3595,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
           // so now we need to find the start of the first live object at the
           // end of the free space.
           free_end = StartOfLiveObject(block_address, cell);
-          freed_bytes += space->AddToFreeLists(
-              free_start, static_cast<int>(free_end - free_start));
+          freed_bytes += space->Free(free_start,
+                                     static_cast<int>(free_end - free_start));
         }
       }
       // Update our undigested record of where the current free area started.
@@ -3614,8 +3610,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   // Handle the free space at the end of the page.
   if (block_address - free_start > 32 * kPointerSize) {
     free_start = DigestFreeStart(free_start, free_start_cell);
-    freed_bytes += space->AddToFreeLists(
-        free_start, static_cast<int>(block_address - free_start));
+    freed_bytes += space->Free(free_start,
+                               static_cast<int>(block_address - free_start));
   }
 
   p->ResetLiveBytes();
index 820439c..d9fc2b7 100644 (file)
@@ -612,7 +612,6 @@ Address Deserializer::Allocate(int space_index, Space* space, int size) {
     pages_[LO_SPACE].Add(address);
   }
   last_object_address_ = address;
-  ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
   return address;
 }
 
@@ -623,12 +622,7 @@ HeapObject* Deserializer::GetAddressFromEnd(int space) {
   int offset = source_->GetInt();
   ASSERT(!SpaceIsLarge(space));
   offset <<= kObjectAlignmentBits;
-  Address address = high_water_[space] - offset;
-  // This assert will fail if kMinimumSpaceSizes is too small for a space,
-  // because we rely on the fact that all allocation is linear when the VM
-  // is very young.
-  ASSERT(address >= Page::FromAddress(address)->ObjectAreaStart());
-  return HeapObject::FromAddress(address);
+  return HeapObject::FromAddress(high_water_[space] - offset);
 }
 
 
index fbb6c8a..4f01a2d 100644 (file)
@@ -26,7 +26,6 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "isolate.h"
-#include "spaces.h"
 
 #ifndef V8_SNAPSHOT_H_
 #define V8_SNAPSHOT_H_
@@ -87,7 +86,6 @@ class Snapshot {
   DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
 };
 
-
 } }  // namespace v8::internal
 
 #endif  // V8_SNAPSHOT_H_
index 89ed3a1..d0cddeb 100644 (file)
@@ -164,12 +164,12 @@ Page* Page::Initialize(Heap* heap,
                        Executability executable,
                        PagedSpace* owner) {
   Page* page = reinterpret_cast<Page*>(chunk);
-  ASSERT(chunk->size() <= kPageSize);
+  ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
   ASSERT(chunk->owner() == owner);
-  int object_bytes =
-      static_cast<int>(page->ObjectAreaEnd() - page->ObjectAreaStart());
-  owner->IncreaseCapacity(object_bytes);
-  owner->AddToFreeLists(page->ObjectAreaStart(), object_bytes);
+  owner->IncreaseCapacity(Page::kObjectAreaSize);
+  owner->Free(page->ObjectAreaStart(),
+              static_cast<int>(page->ObjectAreaEnd() -
+                               page->ObjectAreaStart()));
 
   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
 
@@ -257,7 +257,6 @@ HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
   if (new_top > allocation_info_.limit) return NULL;
 
   allocation_info_.top = new_top;
-  ASSERT(new_top >= Page::FromAllocationTop(new_top)->ObjectAreaStart());
   return HeapObject::FromAddress(current_top);
 }
 
index 481721d..05c5876 100644 (file)
@@ -31,7 +31,6 @@
 #include "macro-assembler.h"
 #include "mark-compact.h"
 #include "platform.h"
-#include "snapshot.h"
 
 namespace v8 {
 namespace internal {
@@ -264,7 +263,7 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
     : isolate_(isolate),
       capacity_(0),
       capacity_executable_(0),
-      memory_allocator_reserved_(0),
+      size_(0),
       size_executable_(0) {
 }
 
@@ -274,7 +273,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
   capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
   ASSERT_GE(capacity_, capacity_executable_);
 
-  memory_allocator_reserved_ = 0;
+  size_ = 0;
   size_executable_ = 0;
 
   return true;
@@ -283,7 +282,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
 
 void MemoryAllocator::TearDown() {
   // Check that spaces were torn down before MemoryAllocator.
-  CHECK_EQ(memory_allocator_reserved_, 0);
+  ASSERT(size_ == 0);
   // TODO(gc) this will be true again when we fix FreeMemory.
   // ASSERT(size_executable_ == 0);
   capacity_ = 0;
@@ -296,8 +295,8 @@ void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
   // TODO(gc) make code_range part of memory allocator?
   ASSERT(reservation->IsReserved());
   size_t size = reservation->size();
-  ASSERT(memory_allocator_reserved_ >= size);
-  memory_allocator_reserved_ -= size;
+  ASSERT(size_ >= size);
+  size_ -= size;
 
   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
 
@@ -317,8 +316,8 @@ void MemoryAllocator::FreeMemory(Address base,
                                  size_t size,
                                  Executability executable) {
   // TODO(gc) make code_range part of memory allocator?
-  ASSERT(memory_allocator_reserved_ >= size);
-  memory_allocator_reserved_ -= size;
+  ASSERT(size_ >= size);
+  size_ -= size;
 
   isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
 
@@ -344,7 +343,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
   VirtualMemory reservation(size, alignment);
 
   if (!reservation.IsReserved()) return NULL;
-  memory_allocator_reserved_ += reservation.size();
+  size_ += reservation.size();
   Address base = RoundUp(static_cast<Address>(reservation.address()),
                          alignment);
   controller->TakeControl(&reservation);
@@ -353,14 +352,11 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size,
 
 
 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
-                                               size_t reserved_size,
                                                size_t alignment,
                                                Executability executable,
                                                VirtualMemory* controller) {
-  ASSERT(RoundUp(reserved_size, OS::CommitPageSize()) >=
-         RoundUp(size, OS::CommitPageSize()));
   VirtualMemory reservation;
-  Address base = ReserveAlignedMemory(reserved_size, alignment, &reservation);
+  Address base = ReserveAlignedMemory(size, alignment, &reservation);
   if (base == NULL) return NULL;
   if (!reservation.Commit(base,
                           size,
@@ -379,53 +375,6 @@ void Page::InitializeAsAnchor(PagedSpace* owner) {
 }
 
 
-void Page::CommitMore(intptr_t space_needed) {
-  intptr_t reserved_page_size = reservation_.IsReserved() ?
-      reservation_.size() :
-      Page::kPageSize;
-  ASSERT(size() + space_needed <= reserved_page_size);
-  // At increase the page size by at least 64k (this also rounds to OS page
-  // size).
-  int expand = Min(reserved_page_size - size(),
-                   RoundUp(size() + space_needed, Page::kGrowthUnit) - size());
-  ASSERT(expand <= kPageSize - size());
-  ASSERT(expand <= reserved_page_size - size());
-  Executability executable =
-      IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
-  Address old_end = ObjectAreaEnd();
-  if (!VirtualMemory::CommitRegion(old_end, expand, executable)) return;
-
-  set_size(size() + expand);
-
-  PagedSpace* paged_space = reinterpret_cast<PagedSpace*>(owner());
-  paged_space->heap()->isolate()->memory_allocator()->AllocationBookkeeping(
-      paged_space,
-      old_end,
-      0,  // No new memory was reserved.
-      expand,  // New memory committed.
-      executable);
-  paged_space->IncreaseCapacity(expand);
-
-  // In spaces with alignment requirements (e.g. map space) we have to align
-  // the expanded area with the correct object alignment.
-  uintptr_t object_area_size = old_end - ObjectAreaStart();
-  uintptr_t aligned_object_area_size =
-      object_area_size - object_area_size % paged_space->ObjectAlignment();
-  if (aligned_object_area_size != object_area_size) {
-    aligned_object_area_size += paged_space->ObjectAlignment();
-  }
-  Address new_area =
-      reinterpret_cast<Address>(ObjectAreaStart() + aligned_object_area_size);
-  // In spaces with alignment requirements, this will waste the space for one
-  // object per doubling of the page size until the next GC.
-  paged_space->AddToFreeLists(old_end, new_area - old_end);
-
-  expand -= (new_area - old_end);
-
-  paged_space->AddToFreeLists(new_area, expand);
-}
-
-
 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
                                        Address start,
                                        SemiSpace* semi_space) {
@@ -511,15 +460,9 @@ void MemoryChunk::Unlink() {
 
 
 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
-                                            intptr_t committed_body_size,
                                             Executability executable,
                                             Space* owner) {
-  ASSERT(body_size >= committed_body_size);
-  size_t chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + body_size,
-                              OS::CommitPageSize());
-  intptr_t committed_chunk_size =
-      committed_body_size + MemoryChunk::kObjectStartOffset;
-  committed_chunk_size = RoundUp(committed_chunk_size, OS::CommitPageSize());
+  size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
   Heap* heap = isolate_->heap();
   Address base = NULL;
   VirtualMemory reservation;
@@ -539,21 +482,20 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
       ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
                        MemoryChunk::kAlignment));
       if (base == NULL) return NULL;
-      // The AllocateAlignedMemory method will update the memory allocator
-      // memory used, but we are not using that if we have a code range, so
-      // we update it here.
-      memory_allocator_reserved_ += chunk_size;
+      size_ += chunk_size;
+      // Update executable memory size.
+      size_executable_ += chunk_size;
     } else {
-      base = AllocateAlignedMemory(committed_chunk_size,
-                                   chunk_size,
+      base = AllocateAlignedMemory(chunk_size,
                                    MemoryChunk::kAlignment,
                                    executable,
                                    &reservation);
       if (base == NULL) return NULL;
+      // Update executable memory size.
+      size_executable_ += reservation.size();
     }
   } else {
-    base = AllocateAlignedMemory(committed_chunk_size,
-                                 chunk_size,
+    base = AllocateAlignedMemory(chunk_size,
                                  MemoryChunk::kAlignment,
                                  executable,
                                  &reservation);
@@ -561,53 +503,31 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
     if (base == NULL) return NULL;
   }
 
-  AllocationBookkeeping(
-      owner, base, chunk_size, committed_chunk_size, executable);
-
-  MemoryChunk* result = MemoryChunk::Initialize(heap,
-                                                base,
-                                                committed_chunk_size,
-                                                executable,
-                                                owner);
-  result->set_reserved_memory(&reservation);
-  return result;
-}
-
-
-void MemoryAllocator::AllocationBookkeeping(Space* owner,
-                                            Address base,
-                                            intptr_t reserved_chunk_size,
-                                            intptr_t committed_chunk_size,
-                                            Executability executable) {
-  if (executable == EXECUTABLE) {
-    // Update executable memory size.
-    size_executable_ += reserved_chunk_size;
-  }
-
 #ifdef DEBUG
-  ZapBlock(base, committed_chunk_size);
+  ZapBlock(base, chunk_size);
 #endif
   isolate_->counters()->memory_allocated()->
-      Increment(static_cast<int>(committed_chunk_size));
+      Increment(static_cast<int>(chunk_size));
 
-  LOG(isolate_, NewEvent("MemoryChunk", base, committed_chunk_size));
+  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
   if (owner != NULL) {
     ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
-    PerformAllocationCallback(
-        space, kAllocationActionAllocate, committed_chunk_size);
+    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
   }
+
+  MemoryChunk* result = MemoryChunk::Initialize(heap,
+                                                base,
+                                                chunk_size,
+                                                executable,
+                                                owner);
+  result->set_reserved_memory(&reservation);
+  return result;
 }
 
 
-Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size,
-                                    PagedSpace* owner,
+Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
                                     Executability executable) {
-  ASSERT(committed_object_area_size <= Page::kObjectAreaSize);
-
-  MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize,
-                                     committed_object_area_size,
-                                     executable,
-                                     owner);
+  MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
 
   if (chunk == NULL) return NULL;
 
@@ -618,8 +538,7 @@ Page* MemoryAllocator::AllocatePage(intptr_t committed_object_area_size,
 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
                                               Executability executable,
                                               Space* owner) {
-  MemoryChunk* chunk =
-      AllocateChunk(object_size, object_size, executable, owner);
+  MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
   if (chunk == NULL) return NULL;
   return LargePage::Initialize(isolate_->heap(), chunk);
 }
@@ -640,12 +559,8 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
   if (reservation->IsReserved()) {
     FreeMemory(reservation, chunk->executable());
   } else {
-    // When we do not have a reservation that is because this allocation
-    // is part of the huge reserved chunk of memory reserved for code on
-    // x64.  In that case the size was rounded up to the page size on
-    // allocation so we do the same now when freeing.
     FreeMemory(chunk->address(),
-               RoundUp(chunk->size(), Page::kPageSize),
+               chunk->size(),
                chunk->executable());
   }
 }
@@ -725,12 +640,11 @@ void MemoryAllocator::RemoveMemoryAllocationCallback(
 
 #ifdef DEBUG
 void MemoryAllocator::ReportStatistics() {
-  float pct =
-      static_cast<float>(capacity_ - memory_allocator_reserved_) / capacity_;
+  float pct = static_cast<float>(capacity_ - size_) / capacity_;
   PrintF("  capacity: %" V8_PTR_PREFIX "d"
              ", used: %" V8_PTR_PREFIX "d"
              ", available: %%%d\n\n",
-         capacity_, memory_allocator_reserved_, static_cast<int>(pct*100));
+         capacity_, size_, static_cast<int>(pct*100));
 }
 #endif
 
@@ -809,6 +723,7 @@ MaybeObject* PagedSpace::FindObject(Address addr) {
 
 bool PagedSpace::CanExpand() {
   ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
+  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
 
   if (Capacity() == max_capacity_) return false;
 
@@ -820,43 +735,11 @@ bool PagedSpace::CanExpand() {
   return true;
 }
 
-bool PagedSpace::Expand(intptr_t size_in_bytes) {
+bool PagedSpace::Expand() {
   if (!CanExpand()) return false;
 
-  Page* last_page = anchor_.prev_page();
-  if (last_page != &anchor_) {
-    // We have run out of linear allocation space.  This may be  because the
-    // most recently allocated page (stored last in the list) is a small one,
-    // that starts on a page aligned boundary, but has not a full kPageSize of
-    // committed memory.  Let's commit more memory for the page.
-    intptr_t reserved_page_size = last_page->reserved_memory()->IsReserved() ?
-        last_page->reserved_memory()->size() :
-        Page::kPageSize;
-    if (last_page->size() < reserved_page_size &&
-        (reserved_page_size - last_page->size()) >= size_in_bytes &&
-        !last_page->IsEvacuationCandidate() &&
-        last_page->WasSwept()) {
-      last_page->CommitMore(size_in_bytes);
-      return true;
-    }
-  }
-
-  // We initially only commit a part of the page, but the deserialization
-  // of the initial snapshot makes the assumption that it can deserialize
-  // into linear memory of a certain size per space, so some of the spaces
-  // need to have a little more committed memory.
-  int initial =
-      Max(OS::CommitPageSize(), static_cast<intptr_t>(Page::kGrowthUnit));
-
-  ASSERT(Page::kPageSize - initial < Page::kObjectAreaSize);
-
-  intptr_t expansion_size =
-      Max(initial,
-          RoundUpToPowerOf2(MemoryChunk::kObjectStartOffset + size_in_bytes)) -
-      MemoryChunk::kObjectStartOffset;
-
   Page* p = heap()->isolate()->memory_allocator()->
-      AllocatePage(expansion_size, this, executable());
+      AllocatePage(this, executable());
   if (p == NULL) return false;
 
   ASSERT(Capacity() <= max_capacity_);
@@ -901,8 +784,6 @@ void PagedSpace::ReleasePage(Page* page) {
     allocation_info_.top = allocation_info_.limit = NULL;
   }
 
-  intptr_t size = page->ObjectAreaEnd() - page->ObjectAreaStart();
-
   page->Unlink();
   if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
     heap()->isolate()->memory_allocator()->Free(page);
@@ -911,7 +792,8 @@ void PagedSpace::ReleasePage(Page* page) {
   }
 
   ASSERT(Capacity() > 0);
-  accounting_stats_.ShrinkSpace(size);
+  ASSERT(Capacity() % Page::kObjectAreaSize == 0);
+  accounting_stats_.ShrinkSpace(Page::kObjectAreaSize);
 }
 
 
@@ -1789,7 +1671,7 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
   // is big enough to be a FreeSpace with at least one extra word (the next
   // pointer), we set its map to be the free space map and its size to an
   // appropriate array length for the desired size from HeapObject::Size().
-  // If the block is too small (e.g. one or two words), to hold both a size
+  // If the block is too small (eg, one or two words), to hold both a size
   // field and a next pointer, we give it a filler map that gives it the
   // correct size.
   if (size_in_bytes > FreeSpace::kHeaderSize) {
@@ -1893,102 +1775,69 @@ int FreeList::Free(Address start, int size_in_bytes) {
 }
 
 
-FreeListNode* FreeList::PickNodeFromList(FreeListNode** list,
-                                         int* node_size,
-                                         int minimum_size) {
+FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
   FreeListNode* node = *list;
 
   if (node == NULL) return NULL;
 
-  ASSERT(node->map() == node->GetHeap()->raw_unchecked_free_space_map());
-
   while (node != NULL &&
          Page::FromAddress(node->address())->IsEvacuationCandidate()) {
     available_ -= node->Size();
     node = node->next();
   }
 
-  if (node == NULL) {
+  if (node != NULL) {
+    *node_size = node->Size();
+    *list = node->next();
+  } else {
     *list = NULL;
-    return NULL;
   }
 
-  // Gets the size without checking the map.  When we are booting we have
-  // a FreeListNode before we have created its map.
-  intptr_t size = reinterpret_cast<FreeSpace*>(node)->Size();
-
-  // We don't search the list for one that fits, preferring to look in the
-  // list of larger nodes, but we do check the first in the list, because
-  // if we had to expand the space or page we may have placed an entry that
-  // was just long enough at the head of one of the lists.
-  if (size < minimum_size) return NULL;
-
-  *node_size = size;
-  available_ -= size;
-  *list = node->next();
-
   return node;
 }
 
 
-FreeListNode* FreeList::FindAbuttingNode(
-  int size_in_bytes, int* node_size, Address limit, FreeListNode** list_head) {
-  FreeListNode* first_node = *list_head;
-  if (first_node != NULL &&
-      first_node->address() == limit &&
-      reinterpret_cast<FreeSpace*>(first_node)->Size() >= size_in_bytes &&
-      !Page::FromAddress(first_node->address())->IsEvacuationCandidate()) {
-    FreeListNode* answer = first_node;
-    int size = reinterpret_cast<FreeSpace*>(first_node)->Size();
-    available_ -= size;
-    *node_size = size;
-    *list_head = first_node->next();
-    ASSERT(IsVeryLong() || available_ == SumFreeLists());
-    return answer;
-  }
-  return NULL;
-}
-
-
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes,
-                                    int* node_size,
-                                    Address limit) {
+FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
   FreeListNode* node = NULL;
 
-  if (limit != NULL) {
-    // We may have a memory area at the head of the free list, which abuts the
-    // old linear allocation area.  This happens if the linear allocation area
-    // has been shortened to allow an incremental marking step to be performed.
-    // In that case we prefer to return the free memory area that is contiguous
-    // with the old linear allocation area.
-    node = FindAbuttingNode(size_in_bytes, node_size, limit, &large_list_);
-    if (node != NULL) return node;
-    node = FindAbuttingNode(size_in_bytes, node_size, limit, &huge_list_);
+  if (size_in_bytes <= kSmallAllocationMax) {
+    node = PickNodeFromList(&small_list_, node_size);
     if (node != NULL) return node;
   }
 
-  node = PickNodeFromList(&small_list_, node_size, size_in_bytes);
-  ASSERT(IsVeryLong() || available_ == SumFreeLists());
-  if (node != NULL) return node;
-
-  node = PickNodeFromList(&medium_list_, node_size, size_in_bytes);
-  ASSERT(IsVeryLong() || available_ == SumFreeLists());
-  if (node != NULL) return node;
+  if (size_in_bytes <= kMediumAllocationMax) {
+    node = PickNodeFromList(&medium_list_, node_size);
+    if (node != NULL) return node;
+  }
 
-  node = PickNodeFromList(&large_list_, node_size, size_in_bytes);
-  ASSERT(IsVeryLong() || available_ == SumFreeLists());
-  if (node != NULL) return node;
+  if (size_in_bytes <= kLargeAllocationMax) {
+    node = PickNodeFromList(&large_list_, node_size);
+    if (node != NULL) return node;
+  }
 
-  // The tricky third clause in this for statement is due to the fact that
-  // PickNodeFromList can cut pages out of the list if they are unavailable for
-  // new allocation (e.g. if they are on a page that has been scheduled for
-  // evacuation).
   for (FreeListNode** cur = &huge_list_;
        *cur != NULL;
-       cur = (*cur) == NULL ? cur : (*cur)->next_address()) {
-    node = PickNodeFromList(cur, node_size, size_in_bytes);
-    ASSERT(IsVeryLong() || available_ == SumFreeLists());
-    if (node != NULL) return node;
+       cur = (*cur)->next_address()) {
+    FreeListNode* cur_node = *cur;
+    while (cur_node != NULL &&
+           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
+      available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
+      cur_node = cur_node->next();
+    }
+
+    *cur = cur_node;
+    if (cur_node == NULL) break;
+
+    ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
+    int size = cur_as_free_space->Size();
+    if (size >= size_in_bytes) {
+      // Large enough node found.  Unlink it from the list.
+      node = *cur;
+      *node_size = size;
+      *cur = node->next();
+      break;
+    }
   }
 
   return node;
@@ -2007,23 +1856,10 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
   ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
 
   int new_node_size = 0;
-  FreeListNode* new_node =
-      FindNodeFor(size_in_bytes, &new_node_size, owner_->limit());
+  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
   if (new_node == NULL) return NULL;
 
-  if (new_node->address() == owner_->limit()) {
-    // The new freelist node we were given is an extension of the one we had
-    // last.  This is a common thing to happen when we extend a small page by
-    // committing more memory.  In this case we just add the new node to the
-    // linear allocation area and recurse.
-    owner_->Allocate(new_node_size);
-    owner_->SetTop(owner_->top(), new_node->address() + new_node_size);
-    MaybeObject* allocation = owner_->AllocateRaw(size_in_bytes);
-    Object* answer;
-    if (!allocation->ToObject(&answer)) return NULL;
-    return HeapObject::cast(answer);
-  }
-
+  available_ -= new_node_size;
   ASSERT(IsVeryLong() || available_ == SumFreeLists());
 
   int bytes_left = new_node_size - size_in_bytes;
@@ -2033,9 +1869,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
   // Mark the old linear allocation area with a free space map so it can be
   // skipped when scanning the heap.  This also puts it back in the free list
   // if it is big enough.
-  if (old_linear_size != 0) {
-    owner_->AddToFreeLists(owner_->top(), old_linear_size);
-  }
+  owner_->Free(owner_->top(), old_linear_size);
 
 #ifdef DEBUG
   for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
@@ -2064,8 +1898,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
     // We don't want to give too large linear areas to the allocator while
     // incremental marking is going on, because we won't check again whether
     // we want to do another increment until the linear area is used up.
-    owner_->AddToFreeLists(new_node->address() + size_in_bytes + linear_size,
-                           new_node_size - size_in_bytes - linear_size);
+    owner_->Free(new_node->address() + size_in_bytes + linear_size,
+                 new_node_size - size_in_bytes - linear_size);
     owner_->SetTop(new_node->address() + size_in_bytes,
                    new_node->address() + size_in_bytes + linear_size);
   } else if (bytes_left > 0) {
@@ -2074,7 +1908,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
     owner_->SetTop(new_node->address() + size_in_bytes,
                    new_node->address() + new_node_size);
   } else {
-    ASSERT(bytes_left == 0);
     // TODO(gc) Try not freeing linear allocation region when bytes_left
     // are zero.
     owner_->SetTop(NULL, NULL);
@@ -2207,9 +2040,7 @@ bool NewSpace::ReserveSpace(int bytes) {
   HeapObject* allocation = HeapObject::cast(object);
   Address top = allocation_info_.top;
   if ((top - bytes) == allocation->address()) {
-    Address new_top = allocation->address();
-    ASSERT(new_top >= Page::FromAddress(new_top - 1)->ObjectAreaStart());
-    allocation_info_.top = new_top;
+    allocation_info_.top = allocation->address();
     return true;
   }
   // There may be a borderline case here where the allocation succeeded, but
@@ -2224,7 +2055,7 @@ void PagedSpace::PrepareForMarkCompact() {
   // Mark the old linear allocation area with a free space map so it can be
   // skipped when scanning the heap.
   int old_linear_size = static_cast<int>(limit() - top());
-  AddToFreeLists(top(), old_linear_size);
+  Free(top(), old_linear_size);
   SetTop(NULL, NULL);
 
   // Stop lazy sweeping and clear marking bits for unswept pages.
@@ -2267,13 +2098,10 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) {
   // Mark the old linear allocation area with a free space so it can be
   // skipped when scanning the heap.  This also puts it back in the free list
   // if it is big enough.
-  AddToFreeLists(top(), old_linear_size);
+  Free(top(), old_linear_size);
 
   SetTop(new_area->address(), new_area->address() + size_in_bytes);
-  // The AddToFreeLists call above will reduce the size of the space in the
-  // allocation stats.  We don't need to add this linear area to the size
-  // with an Allocate(size_in_bytes) call here, because the
-  // free_list_.Allocate() call above already accounted for this memory.
+  Allocate(size_in_bytes);
   return true;
 }
 
@@ -2354,7 +2182,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   }
 
   // Try to expand the space and allocate in the new next page.
-  if (Expand(size_in_bytes)) {
+  if (Expand()) {
     return free_list_.Allocate(size_in_bytes);
   }
 
@@ -2715,7 +2543,6 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
       heap()->mark_compact_collector()->ReportDeleteIfNeeded(
           object, heap()->isolate());
       size_ -= static_cast<int>(page->size());
-      ASSERT(size_ >= 0);
       objects_size_ -= object->Size();
       page_count_--;
 
index 9864585..1a30078 100644 (file)
@@ -505,9 +505,11 @@ class MemoryChunk {
   static const int kObjectStartOffset = kBodyOffset - 1 +
       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
 
-  intptr_t size() const { return size_; }
+  size_t size() const { return size_; }
 
-  void set_size(size_t size) { size_ = size; }
+  void set_size(size_t size) {
+    size_ = size;
+  }
 
   Executability executable() {
     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
@@ -659,7 +661,7 @@ class Page : public MemoryChunk {
   Address ObjectAreaStart() { return address() + kObjectStartOffset; }
 
   // Returns the end address (exclusive) of the object area in this page.
-  Address ObjectAreaEnd() { return address() + size(); }
+  Address ObjectAreaEnd() { return address() + Page::kPageSize; }
 
   // Checks whether an address is page aligned.
   static bool IsAlignedToPageSize(Address a) {
@@ -678,17 +680,11 @@ class Page : public MemoryChunk {
     return address() + offset;
   }
 
-  // Expand the committed area for pages that are small.
-  void CommitMore(intptr_t space_needed);
-
   // ---------------------------------------------------------------------
 
   // Page size in bytes.  This must be a multiple of the OS page size.
   static const int kPageSize = 1 << kPageSizeBits;
 
-  // For a 1Mbyte page grow 64k at a time.
-  static const int kGrowthUnit = 1 << (kPageSizeBits - 4);
-
   // Page size mask.
   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
 
@@ -853,10 +849,12 @@ class CodeRange {
     FreeBlock(Address start_arg, size_t size_arg)
         : start(start_arg), size(size_arg) {
       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
+      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
     }
     FreeBlock(void* start_arg, size_t size_arg)
         : start(static_cast<Address>(start_arg)), size(size_arg) {
       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
+      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
     }
 
     Address start;
@@ -952,9 +950,7 @@ class MemoryAllocator {
 
   void TearDown();
 
-  Page* AllocatePage(intptr_t object_area_size,
-                     PagedSpace* owner,
-                     Executability executable);
+  Page* AllocatePage(PagedSpace* owner, Executability executable);
 
   LargePage* AllocateLargePage(intptr_t object_size,
                                       Executability executable,
@@ -963,14 +959,10 @@ class MemoryAllocator {
   void Free(MemoryChunk* chunk);
 
   // Returns the maximum available bytes of heaps.
-  intptr_t Available() {
-    return capacity_ < memory_allocator_reserved_ ?
-           0 :
-           capacity_ - memory_allocator_reserved_;
-  }
+  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
 
   // Returns allocated spaces in bytes.
-  intptr_t Size() { return memory_allocator_reserved_; }
+  intptr_t Size() { return size_; }
 
   // Returns the maximum available executable bytes of heaps.
   intptr_t AvailableExecutable() {
@@ -992,7 +984,6 @@ class MemoryAllocator {
 #endif
 
   MemoryChunk* AllocateChunk(intptr_t body_size,
-                             intptr_t committed_body_size,
                              Executability executable,
                              Space* space);
 
@@ -1000,7 +991,6 @@ class MemoryAllocator {
                                size_t alignment,
                                VirtualMemory* controller);
   Address AllocateAlignedMemory(size_t requested,
-                                size_t committed,
                                 size_t alignment,
                                 Executability executable,
                                 VirtualMemory* controller);
@@ -1020,12 +1010,6 @@ class MemoryAllocator {
   // and false otherwise.
   bool UncommitBlock(Address start, size_t size);
 
-  void AllocationBookkeeping(Space* owner,
-                             Address base,
-                             intptr_t reserved_size,
-                             intptr_t committed_size,
-                             Executability executable);
-
   // Zaps a contiguous block of memory [start..(start+size)[ thus
   // filling it up with a recognizable non-NULL bit pattern.
   void ZapBlock(Address start, size_t size);
@@ -1053,7 +1037,7 @@ class MemoryAllocator {
   size_t capacity_executable_;
 
   // Allocated space size in bytes.
-  size_t memory_allocator_reserved_;
+  size_t size_;
   // Allocated executable space size in bytes.
   size_t size_executable_;
 
@@ -1398,15 +1382,9 @@ class FreeList BASE_EMBEDDED {
   static const int kMinBlockSize = 3 * kPointerSize;
   static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
 
-  FreeListNode* PickNodeFromList(FreeListNode** list,
-                                 int* node_size,
-                                 int minimum_size);
+  FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size);
 
-  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size, Address limit);
-  FreeListNode* FindAbuttingNode(int size_in_bytes,
-                                 int* node_size,
-                                 Address limit,
-                                 FreeListNode** list_head);
+  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
 
   PagedSpace* owner_;
   Heap* heap_;
@@ -1506,8 +1484,6 @@ class PagedSpace : public Space {
   // free bytes that were not found at all due to lazy sweeping.
   virtual intptr_t Waste() { return accounting_stats_.Waste(); }
 
-  virtual int ObjectAlignment() { return kObjectAlignment; }
-
   // Returns the allocation pointer in this space.
   Address top() { return allocation_info_.top; }
   Address limit() { return allocation_info_.limit; }
@@ -1522,7 +1498,7 @@ class PagedSpace : public Space {
   // the free list or accounted as waste.
   // If add_to_freelist is false then just accounting stats are updated and
   // no attempt to add area to free list is made.
-  int AddToFreeLists(Address start, int size_in_bytes) {
+  int Free(Address start, int size_in_bytes) {
     int wasted = free_list_.Free(start, size_in_bytes);
     accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
     return size_in_bytes - wasted;
@@ -1530,7 +1506,6 @@ class PagedSpace : public Space {
 
   // Set space allocation info.
   void SetTop(Address top, Address limit) {
-    ASSERT(top == NULL || top >= Page::FromAddress(top - 1)->ObjectAreaStart());
     ASSERT(top == limit ||
            Page::FromAddress(top) == Page::FromAddress(limit - 1));
     allocation_info_.top = top;
@@ -1597,14 +1572,12 @@ class PagedSpace : public Space {
 
   void IncreaseUnsweptFreeBytes(Page* p) {
     ASSERT(ShouldBeSweptLazily(p));
-    unswept_free_bytes_ +=
-        (p->ObjectAreaEnd() - p->ObjectAreaStart()) - p->LiveBytes();
+    unswept_free_bytes_ += (Page::kObjectAreaSize - p->LiveBytes());
   }
 
   void DecreaseUnsweptFreeBytes(Page* p) {
     ASSERT(ShouldBeSweptLazily(p));
-    unswept_free_bytes_ -=
-        (p->ObjectAreaEnd() - p->ObjectAreaStart() - p->LiveBytes());
+    unswept_free_bytes_ -= (Page::kObjectAreaSize - p->LiveBytes());
   }
 
   bool AdvanceSweeper(intptr_t bytes_to_sweep);
@@ -1613,7 +1586,6 @@ class PagedSpace : public Space {
     return !first_unswept_page_->is_valid();
   }
 
-  inline bool HasAPage() { return anchor_.next_page() != &anchor_; }
   Page* FirstPage() { return anchor_.next_page(); }
   Page* LastPage() { return anchor_.prev_page(); }
 
@@ -1624,17 +1596,15 @@ class PagedSpace : public Space {
     FreeList::SizeStats sizes;
     free_list_.CountFreeListItems(p, &sizes);
 
-    intptr_t object_area_size = p->ObjectAreaEnd() - p->ObjectAreaStart();
-
     intptr_t ratio;
     intptr_t ratio_threshold;
     if (identity() == CODE_SPACE) {
       ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
-          object_area_size;
+          Page::kObjectAreaSize;
       ratio_threshold = 10;
     } else {
       ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
-          object_area_size;
+          Page::kObjectAreaSize;
       ratio_threshold = 15;
     }
 
@@ -1644,20 +1614,20 @@ class PagedSpace : public Space {
              identity(),
              static_cast<int>(sizes.small_size_),
              static_cast<double>(sizes.small_size_ * 100) /
-                 object_area_size,
+                 Page::kObjectAreaSize,
              static_cast<int>(sizes.medium_size_),
              static_cast<double>(sizes.medium_size_ * 100) /
-                 object_area_size,
+                 Page::kObjectAreaSize,
              static_cast<int>(sizes.large_size_),
              static_cast<double>(sizes.large_size_ * 100) /
-                 object_area_size,
+                 Page::kObjectAreaSize,
              static_cast<int>(sizes.huge_size_),
              static_cast<double>(sizes.huge_size_ * 100) /
-                 object_area_size,
+                 Page::kObjectAreaSize,
              (ratio > ratio_threshold) ? "[fragmented]" : "");
     }
 
-    if (FLAG_always_compact && sizes.Total() != object_area_size) {
+    if (FLAG_always_compact && sizes.Total() != Page::kObjectAreaSize) {
       return 1;
     }
     if (ratio <= ratio_threshold) return 0;  // Not fragmented.
@@ -1688,6 +1658,12 @@ class PagedSpace : public Space {
   // Normal allocation information.
   AllocationInfo allocation_info_;
 
+  // Bytes of each page that cannot be allocated.  Possibly non-zero
+  // for pages in spaces with only fixed-size objects.  Always zero
+  // for pages in spaces with variable sized objects (those pages are
+  // padded with free-list nodes).
+  int page_extra_;
+
   bool was_swept_conservatively_;
 
   // The first page to be swept when the lazy sweeper advances. Is set
@@ -1699,11 +1675,10 @@ class PagedSpace : public Space {
   // done conservatively.
   intptr_t unswept_free_bytes_;
 
-  // Expands the space by allocating a page. Returns false if it cannot
-  // allocate a page from OS, or if the hard heap size limit has been hit.  The
-  // new page will have at least enough committed space to satisfy the object
-  // size indicated by the allocation_size argument;
-  bool Expand(intptr_t allocation_size);
+  // Expands the space by allocating a fixed number of pages. Returns false if
+  // it cannot allocate requested number of pages from OS, or if the hard heap
+  // size limit has been hit.
+  bool Expand();
 
   // Generic fast case allocation function that tries linear allocation at the
   // address denoted by top in allocation_info_.
@@ -1858,8 +1833,7 @@ class SemiSpace : public Space {
       anchor_(this),
       current_page_(NULL) { }
 
-  // Sets up the semispace using the given chunk.  After this, call Commit()
-  // to make the semispace usable.
+  // Sets up the semispace using the given chunk.
   void SetUp(Address start, int initial_capacity, int maximum_capacity);
 
   // Tear down the space.  Heap memory was not allocated by the space, so it
@@ -2364,7 +2338,14 @@ class OldSpace : public PagedSpace {
            intptr_t max_capacity,
            AllocationSpace id,
            Executability executable)
-      : PagedSpace(heap, max_capacity, id, executable) { }
+      : PagedSpace(heap, max_capacity, id, executable) {
+    page_extra_ = 0;
+  }
+
+  // The limit of allocation for a page in this space.
+  virtual Address PageAllocationLimit(Page* page) {
+    return page->ObjectAreaEnd();
+  }
 
  public:
   TRACK_MEMORY("OldSpace")
@@ -2391,11 +2372,16 @@ class FixedSpace : public PagedSpace {
              const char* name)
       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
         object_size_in_bytes_(object_size_in_bytes),
-        name_(name) { }
+        name_(name) {
+    page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
+  }
 
-  int object_size_in_bytes() { return object_size_in_bytes_; }
+  // The limit of allocation for a page in this space.
+  virtual Address PageAllocationLimit(Page* page) {
+    return page->ObjectAreaEnd() - page_extra_;
+  }
 
-  virtual int ObjectAlignment() { return object_size_in_bytes_; }
+  int object_size_in_bytes() { return object_size_in_bytes_; }
 
   // Prepares for a mark-compact GC.
   virtual void PrepareForMarkCompact();
index f85ec27..9022b3b 100644 (file)
@@ -496,6 +496,7 @@ void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
   Address map_aligned_end   = MapEndAlign(end);
 
   ASSERT(map_aligned_start == start);
+  ASSERT(map_aligned_end == end);
 
   FindPointersToNewSpaceInMaps(map_aligned_start,
                                map_aligned_end,
@@ -523,57 +524,52 @@ void StoreBuffer::FindPointersToNewSpaceOnPage(
     RegionCallback region_callback,
     ObjectSlotCallback slot_callback) {
   Address visitable_start = page->ObjectAreaStart();
+  Address end_of_page = page->ObjectAreaEnd();
 
   Address visitable_end = visitable_start;
 
   Object* free_space_map = heap_->free_space_map();
   Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
 
-  while (true) {  // While the page grows (doesn't normally happen).
-    Address end_of_page = page->ObjectAreaEnd();
-    while (visitable_end < end_of_page) {
-      Object* o = *reinterpret_cast<Object**>(visitable_end);
-      // Skip fillers but not things that look like fillers in the special
-      // garbage section which can contain anything.
-      if (o == free_space_map ||
-          o == two_pointer_filler_map ||
-          (visitable_end == space->top() && visitable_end != space->limit())) {
-        if (visitable_start != visitable_end) {
-          // After calling this the special garbage section may have moved.
-          (this->*region_callback)(visitable_start,
-                                   visitable_end,
-                                   slot_callback);
-          if (visitable_end >= space->top() && visitable_end < space->limit()) {
-            visitable_end = space->limit();
-            visitable_start = visitable_end;
-            continue;
-          }
-        }
-        if (visitable_end == space->top() && visitable_end != space->limit()) {
-          visitable_start = visitable_end = space->limit();
-        } else {
-          // At this point we are either at the start of a filler or we are at
-          // the point where the space->top() used to be before the
-          // visit_pointer_region call above.  Either way we can skip the
-          // object at the current spot:  We don't promise to visit objects
-          // allocated during heap traversal, and if space->top() moved then it
-          // must be because an object was allocated at this point.
-          visitable_start =
-              visitable_end + HeapObject::FromAddress(visitable_end)->Size();
-          visitable_end = visitable_start;
+  while (visitable_end < end_of_page) {
+    Object* o = *reinterpret_cast<Object**>(visitable_end);
+    // Skip fillers but not things that look like fillers in the special
+    // garbage section which can contain anything.
+    if (o == free_space_map ||
+        o == two_pointer_filler_map ||
+        (visitable_end == space->top() && visitable_end != space->limit())) {
+      if (visitable_start != visitable_end) {
+        // After calling this the special garbage section may have moved.
+        (this->*region_callback)(visitable_start,
+                                 visitable_end,
+                                 slot_callback);
+        if (visitable_end >= space->top() && visitable_end < space->limit()) {
+          visitable_end = space->limit();
+          visitable_start = visitable_end;
+          continue;
         }
+      }
+      if (visitable_end == space->top() && visitable_end != space->limit()) {
+        visitable_start = visitable_end = space->limit();
       } else {
-        ASSERT(o != free_space_map);
-        ASSERT(o != two_pointer_filler_map);
-        ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
-        visitable_end += kPointerSize;
+        // At this point we are either at the start of a filler or we are at
+        // the point where the space->top() used to be before the
+        // visit_pointer_region call above.  Either way we can skip the
+        // object at the current spot:  We don't promise to visit objects
+        // allocated during heap traversal, and if space->top() moved then it
+        // must be because an object was allocated at this point.
+        visitable_start =
+            visitable_end + HeapObject::FromAddress(visitable_end)->Size();
+        visitable_end = visitable_start;
       }
+    } else {
+      ASSERT(o != free_space_map);
+      ASSERT(o != two_pointer_filler_map);
+      ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
+      visitable_end += kPointerSize;
     }
-    ASSERT(visitable_end >= end_of_page);
-    // If the page did not grow we are done.
-    if (end_of_page == page->ObjectAreaEnd()) break;
   }
-  ASSERT(visitable_end == page->ObjectAreaEnd());
+  ASSERT(visitable_end == end_of_page);
   if (visitable_start != visitable_end) {
     (this->*region_callback)(visitable_start,
                              visitable_end,
index abcbefa..1d40c98 100644 (file)
@@ -153,9 +153,11 @@ int HandleObjectPointerCompare(const Handle<T>* a, const Handle<T>* b) {
 }
 
 
-template<typename int_type>
-inline int RoundUpToPowerOf2(int_type x_argument) {
-  uintptr_t x = static_cast<uintptr_t>(x_argument);
+// Returns the smallest power of two which is >= x. If you pass in a
+// number that is already a power of two, it is returned as is.
+// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
+// figure 3-3, page 48, where the function is called clp2.
+inline uint32_t RoundUpToPowerOf2(uint32_t x) {
   ASSERT(x <= 0x80000000u);
   x = x - 1;
   x = x | (x >> 1);
@@ -163,7 +165,7 @@ inline int RoundUpToPowerOf2(int_type x_argument) {
   x = x | (x >> 4);
   x = x | (x >> 8);
   x = x | (x >> 16);
-  return static_cast<int_type>(x + 1);
+  return x + 1;
 }
 
 
index c0a2981..6de509c 100644 (file)
@@ -1236,14 +1236,17 @@ TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
        obj = iterator.next()) {
     size_of_objects_2 += obj->Size();
   }
-  // Delta must be within 1% of the larger result.
+  // Delta must be within 5% of the larger result.
+  // TODO(gc): Tighten this up by distinguishing between byte
+  // arrays that are real and those that merely mark free space
+  // on the heap.
   if (size_of_objects_1 > size_of_objects_2) {
     intptr_t delta = size_of_objects_1 - size_of_objects_2;
     PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
            "Iterator: %" V8_PTR_PREFIX "d, "
            "delta: %" V8_PTR_PREFIX "d\n",
            size_of_objects_1, size_of_objects_2, delta);
-    CHECK_GT(size_of_objects_1 / 100, delta);
+    CHECK_GT(size_of_objects_1 / 20, delta);
   } else {
     intptr_t delta = size_of_objects_2 - size_of_objects_1;
     PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
index f1b883f..9de069d 100644 (file)
@@ -534,15 +534,15 @@ TEST(BootUpMemoryUse) {
     intptr_t booted_memory = MemoryInUse();
     if (sizeof(initial_memory) == 8) {
       if (v8::internal::Snapshot::IsEnabled()) {
-        CHECK_LE(booted_memory - initial_memory, 3050 * 1024);  // 2984.
+        CHECK_LE(booted_memory - initial_memory, 6654 * 1024);  // 6444.
       } else {
-        CHECK_LE(booted_memory - initial_memory, 3050 * 1024);  // 3008.
+        CHECK_LE(booted_memory - initial_memory, 6777 * 1024);  // 6596.
       }
     } else {
       if (v8::internal::Snapshot::IsEnabled()) {
-        CHECK_LE(booted_memory - initial_memory, 2000 * 1024);  // 1940.
+        CHECK_LE(booted_memory - initial_memory, 6500 * 1024);  // 6356.
       } else {
-        CHECK_LE(booted_memory - initial_memory, 2000 * 1024);  // 1948
+        CHECK_LE(booted_memory - initial_memory, 6654 * 1024);  // 6424
       }
     }
   }
index 6eb1ddd..6e495bc 100644 (file)
@@ -140,8 +140,8 @@ TEST(MemoryAllocator) {
                        heap->MaxReserved(),
                        OLD_POINTER_SPACE,
                        NOT_EXECUTABLE);
-  Page* first_page = memory_allocator->AllocatePage(
-      Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
+  Page* first_page =
+      memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
 
   first_page->InsertAfter(faked_space.anchor()->prev_page());
   CHECK(first_page->is_valid());
@@ -154,8 +154,7 @@ TEST(MemoryAllocator) {
 
   // Again, we should get n or n - 1 pages.
   Page* other =
-      memory_allocator->AllocatePage(
-          Page::kObjectAreaSize, &faked_space, NOT_EXECUTABLE);
+      memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE);
   CHECK(other->is_valid());
   total_pages++;
   other->InsertAfter(first_page);