Reland "Only use FreeSpace objects in the free list"
authoryangguo <yangguo@chromium.org>
Tue, 27 Jan 2015 14:56:51 +0000 (06:56 -0800)
committerCommit bot <commit-bot@chromium.org>
Tue, 27 Jan 2015 14:57:00 +0000 (14:57 +0000)
Review URL: https://codereview.chromium.org/882633002

Cr-Commit-Position: refs/heads/master@{#26296}

16 files changed:
src/allocation-tracker.cc
src/api.cc
src/full-codegen.h
src/heap/heap.cc
src/heap/heap.h
src/heap/spaces-inl.h
src/heap/spaces.cc
src/heap/spaces.h
src/isolate.h
src/objects-inl.h
src/objects.h
src/serialize.cc
test/cctest/cctest.h
test/cctest/test-heap.cc
test/cctest/test-serialize.cc
test/cctest/test-spaces.cc

index 7534ffb..1ad86b8 100644 (file)
@@ -227,9 +227,7 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
 
   // Mark the new block as FreeSpace to make sure the heap is iterable
   // while we are capturing stack trace.
-  FreeListNode::FromAddress(addr)->set_size(heap, size);
-  DCHECK_EQ(HeapObject::FromAddress(addr)->Size(), size);
-  DCHECK(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
+  heap->CreateFillerObjectAt(addr, size);
 
   Isolate* isolate = heap->isolate();
   int length = 0;
index 0b0ab03..8ddd351 100644 (file)
@@ -27,6 +27,7 @@
 #include "src/debug.h"
 #include "src/deoptimizer.h"
 #include "src/execution.h"
+#include "src/full-codegen.h"
 #include "src/global-handles.h"
 #include "src/heap-profiler.h"
 #include "src/heap-snapshot-generator-inl.h"
@@ -220,6 +221,54 @@ bool RunExtraCode(Isolate* isolate, char* utf8_source) {
 }
 
 
+void CheckDefaultReservationSizes(const i::StartupSerializer& startup_ser,
+                                  const i::PartialSerializer& context_ser) {
+#ifdef DEBUG
+  i::List<i::SerializedData::Reservation> startup_reservations;
+  i::List<i::SerializedData::Reservation> context_reservations;
+  startup_ser.EncodeReservations(&startup_reservations);
+  context_ser.EncodeReservations(&context_reservations);
+  for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
+    // Exactly one chunk per space.
+    CHECK(startup_reservations[space].is_last());
+    CHECK(startup_reservations[space].is_last());
+    uint32_t sum = startup_reservations[space].chunk_size() +
+                   context_reservations[space].chunk_size();
+    uint32_t limit = 0;
+    const int constant_pool_delta = i::FLAG_enable_ool_constant_pool ? 48 : 0;
+    switch (space) {
+      case i::NEW_SPACE:
+        limit = 3 * i::kPointerSize;
+        break;
+      case i::OLD_POINTER_SPACE:
+        limit = (128 + constant_pool_delta) * i::kPointerSize * i::KB;
+        break;
+      case i::OLD_DATA_SPACE:
+        limit = 192 * i::KB;
+        break;
+      case i::MAP_SPACE:
+        limit = 16 * i::kPointerSize * i::KB;
+        break;
+      case i::CELL_SPACE:
+        limit = 16 * i::kPointerSize * i::KB;
+        break;
+      case i::PROPERTY_CELL_SPACE:
+        limit = 8 * i::kPointerSize * i::KB;
+        break;
+      case i::CODE_SPACE:
+        limit = RoundUp((480 - constant_pool_delta) * i::KB *
+                            i::FullCodeGenerator::kBootCodeSizeMultiplier / 100,
+                        i::kPointerSize);
+        break;
+      default:
+        break;
+    }
+    CHECK_LE(sum, limit);
+  }
+#endif  // DEBUG
+}
+
+
 StartupData V8::CreateSnapshotDataBlob(char* custom_source) {
   Isolate::CreateParams params;
   params.enable_serializer = true;
@@ -266,6 +315,8 @@ StartupData V8::CreateSnapshotDataBlob(char* custom_source) {
       i::SnapshotData sd(snapshot_sink, ser);
       i::SnapshotData csd(context_sink, context_ser);
 
+      if (custom_source == NULL) CheckDefaultReservationSizes(ser, context_ser);
+
       result = i::Snapshot::CreateSnapshotBlob(sd.RawData(), csd.RawData(),
                                                metadata);
     }
index f544b7f..cc24bb8 100644 (file)
@@ -124,7 +124,7 @@ class FullCodeGenerator: public AstVisitor {
   static const int kBootCodeSizeMultiplier = 120;
 #elif V8_TARGET_ARCH_MIPS64
   static const int kCodeSizeMultiplier = 149;
-  static const int kBootCodeSizeMultiplier = 120;
+  static const int kBootCodeSizeMultiplier = 170;
 #else
 #error Unsupported target architecture.
 #endif
index bc55f9e..147d5dd 100644 (file)
@@ -495,11 +495,11 @@ void Heap::ClearAllICsByKind(Code::Kind kind) {
 }
 
 
-void Heap::RepairFreeListsAfterBoot() {
+void Heap::RepairFreeListsAfterDeserialization() {
   PagedSpaces spaces(this);
   for (PagedSpace* space = spaces.next(); space != NULL;
        space = spaces.next()) {
-    space->RepairFreeListsAfterBoot();
+    space->RepairFreeListsAfterDeserialization();
   }
 }
 
@@ -952,14 +952,15 @@ bool Heap::ReserveSpace(Reservation* reservations) {
           } else {
             allocation = paged_space(space)->AllocateRaw(size);
           }
-          FreeListNode* node;
-          if (allocation.To(&node)) {
+          HeapObject* free_space;
+          if (allocation.To(&free_space)) {
             // Mark with a free list node, in case we have a GC before
             // deserializing.
-            node->set_size(this, size);
+            Address free_space_address = free_space->address();
+            CreateFillerObjectAt(free_space_address, size);
             DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
-            chunk.start = node->address();
-            chunk.end = node->address() + size;
+            chunk.start = free_space_address;
+            chunk.end = free_space_address + size;
           } else {
             perform_gc = true;
             break;
@@ -3392,13 +3393,18 @@ AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
 void Heap::CreateFillerObjectAt(Address addr, int size) {
   if (size == 0) return;
   HeapObject* filler = HeapObject::FromAddress(addr);
+  // At this point, we may be deserializing the heap from a snapshot, and
+  // none of the maps have been created yet and are NULL.
   if (size == kPointerSize) {
-    filler->set_map_no_write_barrier(one_pointer_filler_map());
+    filler->set_map_no_write_barrier(raw_unchecked_one_pointer_filler_map());
+    DCHECK(filler->map() == NULL || filler->map() == one_pointer_filler_map());
   } else if (size == 2 * kPointerSize) {
-    filler->set_map_no_write_barrier(two_pointer_filler_map());
+    filler->set_map_no_write_barrier(raw_unchecked_two_pointer_filler_map());
+    DCHECK(filler->map() == NULL || filler->map() == two_pointer_filler_map());
   } else {
-    filler->set_map_no_write_barrier(free_space_map());
-    FreeSpace::cast(filler)->set_size(size);
+    filler->set_map_no_write_barrier(raw_unchecked_free_space_map());
+    DCHECK(filler->map() == NULL || filler->map() == free_space_map());
+    FreeSpace::cast(filler)->nobarrier_set_size(size);
   }
 }
 
index 2ae4165..4ccbd04 100644 (file)
@@ -692,8 +692,8 @@ class Heap {
   // Iterates the whole code space to clear all ICs of the given kind.
   void ClearAllICsByKind(Code::Kind kind);
 
-  // For use during bootup.
-  void RepairFreeListsAfterBoot();
+  // FreeSpace objects have a null map after deserialization. Update the map.
+  void RepairFreeListsAfterDeserialization();
 
   template <typename T>
   static inline bool IsOneByte(T t, int chars);
index 9cb292e..cfa2325 100644 (file)
@@ -305,14 +305,6 @@ intptr_t LargeObjectSpace::Available() {
   return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
 }
 
-
-bool FreeListNode::IsFreeListNode(HeapObject* object) {
-  Map* map = object->map();
-  Heap* heap = object->GetHeap();
-  return map == heap->raw_unchecked_free_space_map() ||
-         map == heap->raw_unchecked_one_pointer_filler_map() ||
-         map == heap->raw_unchecked_two_pointer_filler_map();
-}
 }
 }  // namespace v8::internal
 
index 1a2ff27..1a0d0bb 100644 (file)
@@ -1028,10 +1028,6 @@ bool PagedSpace::Expand() {
 
   intptr_t size = AreaSize();
 
-  if (anchor_.next_page() == &anchor_) {
-    size = SizeOfFirstPage();
-  }
-
   Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
                                                                 executable());
   if (p == NULL) return false;
@@ -1044,50 +1040,6 @@ bool PagedSpace::Expand() {
 }
 
 
-intptr_t PagedSpace::SizeOfFirstPage() {
-  // If the snapshot contains a custom script, all size guarantees are off.
-  if (Snapshot::EmbedsScript()) return AreaSize();
-  // If using an ool constant pool then transfer the constant pool allowance
-  // from the code space to the old pointer space.
-  static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
-  int size = 0;
-  switch (identity()) {
-    case OLD_POINTER_SPACE:
-      size = (128 + constant_pool_delta) * kPointerSize * KB;
-      break;
-    case OLD_DATA_SPACE:
-      size = 192 * KB;
-      break;
-    case MAP_SPACE:
-      size = 16 * kPointerSize * KB;
-      break;
-    case CELL_SPACE:
-      size = 16 * kPointerSize * KB;
-      break;
-    case PROPERTY_CELL_SPACE:
-      size = 8 * kPointerSize * KB;
-      break;
-    case CODE_SPACE: {
-      CodeRange* code_range = heap()->isolate()->code_range();
-      if (code_range != NULL && code_range->valid()) {
-        // When code range exists, code pages are allocated in a special way
-        // (from the reserved code range). That part of the code is not yet
-        // upgraded to handle small pages.
-        size = AreaSize();
-      } else {
-        size = RoundUp((480 - constant_pool_delta) * KB *
-                           FullCodeGenerator::kBootCodeSizeMultiplier / 100,
-                       kPointerSize);
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-  return Min(size, AreaSize());
-}
-
-
 int PagedSpace::CountTotalPages() {
   PageIterator it(this);
   int count = 0;
@@ -2082,79 +2034,6 @@ size_t NewSpace::CommittedPhysicalMemory() {
 // -----------------------------------------------------------------------------
 // Free lists for old object spaces implementation
 
-void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
-  DCHECK(size_in_bytes > 0);
-  DCHECK(IsAligned(size_in_bytes, kPointerSize));
-
-  // We write a map and possibly size information to the block.  If the block
-  // is big enough to be a FreeSpace with at least one extra word (the next
-  // pointer), we set its map to be the free space map and its size to an
-  // appropriate array length for the desired size from HeapObject::Size().
-  // If the block is too small (eg, one or two words), to hold both a size
-  // field and a next pointer, we give it a filler map that gives it the
-  // correct size.
-  if (size_in_bytes > FreeSpace::kHeaderSize) {
-    // Can't use FreeSpace::cast because it fails during deserialization.
-    // We have to set the size first with a release store before we store
-    // the map because a concurrent store buffer scan on scavenge must not
-    // observe a map with an invalid size.
-    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
-    this_as_free_space->nobarrier_set_size(size_in_bytes);
-    synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
-  } else if (size_in_bytes == kPointerSize) {
-    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
-  } else if (size_in_bytes == 2 * kPointerSize) {
-    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
-  } else {
-    UNREACHABLE();
-  }
-  // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
-  // deserialization because the free space map is not done yet.
-}
-
-
-FreeListNode* FreeListNode::next() {
-  DCHECK(IsFreeListNode(this));
-  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
-    DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
-    return reinterpret_cast<FreeListNode*>(
-        Memory::Address_at(address() + kNextOffset));
-  } else {
-    return reinterpret_cast<FreeListNode*>(
-        Memory::Address_at(address() + kPointerSize));
-  }
-}
-
-
-FreeListNode** FreeListNode::next_address() {
-  DCHECK(IsFreeListNode(this));
-  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
-    DCHECK(Size() >= kNextOffset + kPointerSize);
-    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
-  } else {
-    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
-  }
-}
-
-
-void FreeListNode::set_next(FreeListNode* next) {
-  DCHECK(IsFreeListNode(this));
-  // While we are booting the VM the free space map will actually be null.  So
-  // we have to make sure that we don't try to use it for anything at that
-  // stage.
-  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
-    DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
-    base::NoBarrier_Store(
-        reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
-        reinterpret_cast<base::AtomicWord>(next));
-  } else {
-    base::NoBarrier_Store(
-        reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
-        reinterpret_cast<base::AtomicWord>(next));
-  }
-}
-
-
 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
   intptr_t free_bytes = 0;
   if (category->top() != NULL) {
@@ -2188,11 +2067,11 @@ void FreeListCategory::Reset() {
 
 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
   int sum = 0;
-  FreeListNode* t = top();
-  FreeListNode** n = &t;
+  FreeSpace* t = top();
+  FreeSpace** n = &t;
   while (*n != NULL) {
     if (Page::FromAddress((*n)->address()) == p) {
-      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+      FreeSpace* free_space = *n;
       sum += free_space->Size();
       *n = (*n)->next();
     } else {
@@ -2209,7 +2088,7 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
 
 
 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
-  FreeListNode* node = top();
+  FreeSpace* node = top();
   while (node != NULL) {
     if (Page::FromAddress(node->address()) == p) return true;
     node = node->next();
@@ -2218,20 +2097,20 @@ bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
 }
 
 
-FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
-  FreeListNode* node = top();
+FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
+  FreeSpace* node = top();
 
   if (node == NULL) return NULL;
 
   while (node != NULL &&
          Page::FromAddress(node->address())->IsEvacuationCandidate()) {
-    available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
+    available_ -= node->Size();
     node = node->next();
   }
 
   if (node != NULL) {
     set_top(node->next());
-    *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
+    *node_size = node->Size();
     available_ -= *node_size;
   } else {
     set_top(NULL);
@@ -2245,9 +2124,9 @@ FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
 }
 
 
-FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
-                                                 int* node_size) {
-  FreeListNode* node = PickNodeFromList(node_size);
+FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
+                                              int* node_size) {
+  FreeSpace* node = PickNodeFromList(node_size);
   if (node != NULL && *node_size < size_in_bytes) {
     Free(node, *node_size);
     *node_size = 0;
@@ -2257,18 +2136,19 @@ FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
 }
 
 
-void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
-  node->set_next(top());
-  set_top(node);
+void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
+  DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
+  free_space->set_next(top());
+  set_top(free_space);
   if (end_ == NULL) {
-    end_ = node;
+    end_ = free_space;
   }
   available_ += size_in_bytes;
 }
 
 
 void FreeListCategory::RepairFreeList(Heap* heap) {
-  FreeListNode* n = top();
+  FreeSpace* n = top();
   while (n != NULL) {
     Map** map_location = reinterpret_cast<Map**>(n->address());
     if (*map_location == NULL) {
@@ -2307,8 +2187,8 @@ void FreeList::Reset() {
 int FreeList::Free(Address start, int size_in_bytes) {
   if (size_in_bytes == 0) return 0;
 
-  FreeListNode* node = FreeListNode::FromAddress(start);
-  node->set_size(heap_, size_in_bytes);
+  heap_->CreateFillerObjectAt(start, size_in_bytes);
+
   Page* page = Page::FromAddress(start);
 
   // Early return to drop too-small blocks on the floor.
@@ -2317,19 +2197,20 @@ int FreeList::Free(Address start, int size_in_bytes) {
     return size_in_bytes;
   }
 
+  FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
   // Insert other blocks at the head of a free list of the appropriate
   // magnitude.
   if (size_in_bytes <= kSmallListMax) {
-    small_list_.Free(node, size_in_bytes);
+    small_list_.Free(free_space, size_in_bytes);
     page->add_available_in_small_free_list(size_in_bytes);
   } else if (size_in_bytes <= kMediumListMax) {
-    medium_list_.Free(node, size_in_bytes);
+    medium_list_.Free(free_space, size_in_bytes);
     page->add_available_in_medium_free_list(size_in_bytes);
   } else if (size_in_bytes <= kLargeListMax) {
-    large_list_.Free(node, size_in_bytes);
+    large_list_.Free(free_space, size_in_bytes);
     page->add_available_in_large_free_list(size_in_bytes);
   } else {
-    huge_list_.Free(node, size_in_bytes);
+    huge_list_.Free(free_space, size_in_bytes);
     page->add_available_in_huge_free_list(size_in_bytes);
   }
 
@@ -2338,8 +2219,8 @@ int FreeList::Free(Address start, int size_in_bytes) {
 }
 
 
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
-  FreeListNode* node = NULL;
+FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+  FreeSpace* node = NULL;
   Page* page = NULL;
 
   if (size_in_bytes <= kSmallAllocationMax) {
@@ -2376,13 +2257,13 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
   }
 
   int huge_list_available = huge_list_.available();
-  FreeListNode* top_node = huge_list_.top();
-  for (FreeListNode** cur = &top_node; *cur != NULL;
+  FreeSpace* top_node = huge_list_.top();
+  for (FreeSpace** cur = &top_node; *cur != NULL;
        cur = (*cur)->next_address()) {
-    FreeListNode* cur_node = *cur;
+    FreeSpace* cur_node = *cur;
     while (cur_node != NULL &&
            Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
-      int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
+      int size = cur_node->Size();
       huge_list_available -= size;
       page = Page::FromAddress(cur_node->address());
       page->add_available_in_huge_free_list(-size);
@@ -2395,9 +2276,7 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
       break;
     }
 
-    DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
-    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
-    int size = cur_as_free_space->Size();
+    int size = cur_node->Size();
     if (size >= size_in_bytes) {
       // Large enough node found.  Unlink it from the list.
       node = *cur;
@@ -2470,7 +2349,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
                                                       old_linear_size);
 
   int new_node_size = 0;
-  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+  FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
   if (new_node == NULL) {
     owner_->SetTopAndLimit(NULL, NULL);
     return NULL;
@@ -2564,11 +2443,10 @@ void FreeList::RepairLists(Heap* heap) {
 #ifdef DEBUG
 intptr_t FreeListCategory::SumFreeList() {
   intptr_t sum = 0;
-  FreeListNode* cur = top();
+  FreeSpace* cur = top();
   while (cur != NULL) {
     DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
-    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
-    sum += cur_as_free_space->nobarrier_size();
+    sum += cur->nobarrier_size();
     cur = cur->next();
   }
   return sum;
@@ -2580,7 +2458,7 @@ static const int kVeryLongFreeList = 500;
 
 int FreeListCategory::FreeListLength() {
   int length = 0;
-  FreeListNode* cur = top();
+  FreeSpace* cur = top();
   while (cur != NULL) {
     length++;
     cur = cur->next();
@@ -2641,7 +2519,19 @@ intptr_t PagedSpace::SizeOfObjects() {
 // on the heap.  If there was already a free list then the elements on it
 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
 // fix them.
-void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
+void PagedSpace::RepairFreeListsAfterDeserialization() {
+  free_list_.RepairLists(heap());
+  // Each page may have a small free space that is not tracked by a free list.
+  // Update the maps for those free space objects.
+  PageIterator iterator(this);
+  while (iterator.has_next()) {
+    Page* page = iterator.next();
+    int size = static_cast<int>(page->non_available_small_blocks());
+    if (size == 0) continue;
+    Address address = page->OffsetToAddress(Page::kPageSize - size);
+    heap()->CreateFillerObjectAt(address, size);
+  }
+}
 
 
 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
index dcd3364..e21876b 100644 (file)
@@ -1411,45 +1411,6 @@ class AllocationStats BASE_EMBEDDED {
 
 // -----------------------------------------------------------------------------
 // Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap.  They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object).  They have a size and a next pointer.  The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode : public HeapObject {
- public:
-  // Obtain a free-list node from a raw address.  This is not a cast because
-  // it does not check nor require that the first word at the address is a map
-  // pointer.
-  static FreeListNode* FromAddress(Address address) {
-    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
-  }
-
-  static inline bool IsFreeListNode(HeapObject* object);
-
-  // Set the size in bytes, which can be read with HeapObject::Size().  This
-  // function also writes a map to the first word of the block so that it
-  // looks like a heap object to the garbage collector and heap iteration
-  // functions.
-  void set_size(Heap* heap, int size_in_bytes);
-
-  // Accessors for the next field.
-  inline FreeListNode* next();
-  inline FreeListNode** next_address();
-  inline void set_next(FreeListNode* next);
-
-  inline void Zap();
-
-  static inline FreeListNode* cast(Object* object) {
-    return reinterpret_cast<FreeListNode*>(object);
-  }
-
- private:
-  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
 
 // The free list category holds a pointer to the top element and a pointer to
 // the end element of the linked list of free memory blocks.
@@ -1461,27 +1422,26 @@ class FreeListCategory {
 
   void Reset();
 
-  void Free(FreeListNode* node, int size_in_bytes);
+  void Free(FreeSpace* node, int size_in_bytes);
 
-  FreeListNode* PickNodeFromList(int* node_size);
-  FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
+  FreeSpace* PickNodeFromList(int* node_size);
+  FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
 
   intptr_t EvictFreeListItemsInList(Page* p);
   bool ContainsPageFreeListItemsInList(Page* p);
 
   void RepairFreeList(Heap* heap);
 
-  FreeListNode* top() const {
-    return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
+  FreeSpace* top() const {
+    return reinterpret_cast<FreeSpace*>(base::NoBarrier_Load(&top_));
   }
 
-  void set_top(FreeListNode* top) {
+  void set_top(FreeSpace* top) {
     base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
   }
 
-  FreeListNode** GetEndAddress() { return &end_; }
-  FreeListNode* end() const { return end_; }
-  void set_end(FreeListNode* end) { end_ = end; }
+  FreeSpace* end() const { return end_; }
+  void set_end(FreeSpace* end) { end_ = end; }
 
   int* GetAvailableAddress() { return &available_; }
   int available() const { return available_; }
@@ -1497,9 +1457,9 @@ class FreeListCategory {
 #endif
 
  private:
-  // top_ points to the top FreeListNode* in the free list category.
+  // top_ points to the top FreeSpace* in the free list category.
   base::AtomicWord top_;
-  FreeListNode* end_;
+  FreeSpace* end_;
   base::Mutex mutex_;
 
   // Total available bytes in all blocks of this free list category.
@@ -1596,17 +1556,18 @@ class FreeList {
   FreeListCategory* large_list() { return &large_list_; }
   FreeListCategory* huge_list() { return &huge_list_; }
 
+  static const int kSmallListMin = 0x20 * kPointerSize;
+
  private:
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
   static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
 
-  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+  FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
 
   PagedSpace* owner_;
   Heap* heap_;
 
-  static const int kSmallListMin = 0x20 * kPointerSize;
   static const int kSmallListMax = 0xff * kPointerSize;
   static const int kMediumListMax = 0x7ff * kPointerSize;
   static const int kLargeListMax = 0x3fff * kPointerSize;
@@ -1702,7 +1663,7 @@ class PagedSpace : public Space {
 
   // During boot the free_space_map is created, and afterwards we may need
   // to write it into the free list nodes that were already created.
-  void RepairFreeListsAfterBoot();
+  void RepairFreeListsAfterDeserialization();
 
   // Prepares for a mark-compact GC.
   void PrepareForMarkCompact();
@@ -1909,8 +1870,6 @@ class PagedSpace : public Space {
   // Maximum capacity of this space.
   intptr_t max_capacity_;
 
-  intptr_t SizeOfFirstPage();
-
   // Accounting information for this space.
   AllocationStats accounting_stats_;
 
index cac1d91..a737dae 100644 (file)
@@ -1362,7 +1362,6 @@ class Isolate {
 
   friend class ExecutionAccess;
   friend class HandleScopeImplementer;
-  friend class IsolateInitializer;
   friend class OptimizingCompilerThread;
   friend class SweeperThread;
   friend class ThreadManager;
index 5ee9191..402010e 100644 (file)
@@ -3334,7 +3334,6 @@ CAST_ACCESSOR(FixedArrayBase)
 CAST_ACCESSOR(FixedDoubleArray)
 CAST_ACCESSOR(FixedTypedArrayBase)
 CAST_ACCESSOR(Foreign)
-CAST_ACCESSOR(FreeSpace)
 CAST_ACCESSOR(GlobalObject)
 CAST_ACCESSOR(HeapObject)
 CAST_ACCESSOR(JSArray)
@@ -3443,6 +3442,39 @@ SMI_ACCESSORS(String, length, kLengthOffset)
 SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
 
 
+FreeSpace* FreeSpace::next() {
+  DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+         (!GetHeap()->deserialization_complete() && map() == NULL));
+  DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
+  return reinterpret_cast<FreeSpace*>(
+      Memory::Address_at(address() + kNextOffset));
+}
+
+
+FreeSpace** FreeSpace::next_address() {
+  DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+         (!GetHeap()->deserialization_complete() && map() == NULL));
+  DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
+  return reinterpret_cast<FreeSpace**>(address() + kNextOffset);
+}
+
+
+void FreeSpace::set_next(FreeSpace* next) {
+  DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+         (!GetHeap()->deserialization_complete() && map() == NULL));
+  DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
+  base::NoBarrier_Store(
+      reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
+      reinterpret_cast<base::AtomicWord>(next));
+}
+
+
+FreeSpace* FreeSpace::cast(HeapObject* o) {
+  SLOW_DCHECK(!o->GetHeap()->deserialization_complete() || o->IsFreeSpace());
+  return reinterpret_cast<FreeSpace*>(o);
+}
+
+
 uint32_t Name::hash_field() {
   return READ_UINT32_FIELD(this, kHashFieldOffset);
 }
index faab173..fdba599 100644 (file)
@@ -4448,8 +4448,11 @@ class ByteArray: public FixedArrayBase {
 };
 
 
-// FreeSpace represents fixed sized areas of the heap that are not currently in
-// use.  Used by the heap and GC.
+// FreeSpace are fixed-size free memory blocks used by the heap and GC.
+// They look like heap objects (are heap object tagged and have a map) so that
+// the heap remains iterable.  They have a size and a next pointer.
+// The next pointer is the raw address of the next FreeSpace object (or NULL)
+// in the free list.
 class FreeSpace: public HeapObject {
  public:
   // [size]: size of the free space including the header.
@@ -4461,7 +4464,12 @@ class FreeSpace: public HeapObject {
 
   inline int Size() { return size(); }
 
-  DECLARE_CAST(FreeSpace)
+  // Accessors for the next field.
+  inline FreeSpace* next();
+  inline FreeSpace** next_address();
+  inline void set_next(FreeSpace* next);
+
+  inline static FreeSpace* cast(HeapObject* obj);
 
   // Dispatched behavior.
   DECLARE_PRINTER(FreeSpace)
@@ -4470,9 +4478,7 @@ class FreeSpace: public HeapObject {
   // Layout description.
   // Size is smi tagged when it is stored.
   static const int kSizeOffset = HeapObject::kHeaderSize;
-  static const int kHeaderSize = kSizeOffset + kPointerSize;
-
-  static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
+  static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
index f140316..0c9df8e 100644 (file)
@@ -664,7 +664,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
   DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
   isolate_->heap()->IterateSmiRoots(this);
   isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
-  isolate_->heap()->RepairFreeListsAfterBoot();
+  isolate_->heap()->RepairFreeListsAfterDeserialization();
   isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
 
   isolate_->heap()->set_native_contexts_list(
index e3e7607..81a0dd9 100644 (file)
@@ -486,27 +486,39 @@ static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
   v8::internal::AllocationResult allocation =
       space->AllocateRaw(v8::internal::Page::kMaxRegularHeapObjectSize);
   if (allocation.IsRetry()) return false;
-  v8::internal::FreeListNode* node =
-      v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
-  node->set_size(space->heap(), v8::internal::Page::kMaxRegularHeapObjectSize);
+  v8::internal::HeapObject* free_space = NULL;
+  CHECK(allocation.To(&free_space));
+  space->heap()->CreateFillerObjectAt(
+      free_space->address(), v8::internal::Page::kMaxRegularHeapObjectSize);
   return true;
 }
 
 
-static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
-  int new_linear_size = static_cast<int>(*space->allocation_limit_address() -
+// Helper function that simulates a fill new-space in the heap.
+static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
+                                        int extra_bytes) {
+  int space_remaining = static_cast<int>(*space->allocation_limit_address() -
                                          *space->allocation_top_address());
-  if (new_linear_size > 0) {
-    // Fill up the current page.
-    v8::internal::AllocationResult allocation =
-        space->AllocateRaw(new_linear_size);
-    v8::internal::FreeListNode* node =
-        v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
-    node->set_size(space->heap(), new_linear_size);
+  CHECK(space_remaining >= extra_bytes);
+  int new_linear_size = space_remaining - extra_bytes;
+  if (new_linear_size == 0) return;
+  v8::internal::AllocationResult allocation =
+      space->AllocateRaw(new_linear_size);
+  v8::internal::HeapObject* free_space = NULL;
+  CHECK(allocation.To(&free_space));
+  space->heap()->CreateFillerObjectAt(free_space->address(), new_linear_size);
+}
+
+
+static inline void FillCurrentPage(v8::internal::NewSpace* space) {
+  AllocateAllButNBytes(space, 0);
+}
+
+
+static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
+  FillCurrentPage(space);
+  while (FillUpOnePage(space)) {
   }
-  // Fill up all remaining pages.
-  while (FillUpOnePage(space))
-    ;
 }
 
 
index ec17310..8295839 100644 (file)
@@ -3872,21 +3872,6 @@ TEST(Regress169209) {
 }
 
 
-// Helper function that simulates a fill new-space in the heap.
-static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
-                                        int extra_bytes) {
-  int space_remaining = static_cast<int>(
-      *space->allocation_limit_address() - *space->allocation_top_address());
-  CHECK(space_remaining >= extra_bytes);
-  int new_linear_size = space_remaining - extra_bytes;
-  v8::internal::AllocationResult allocation =
-      space->AllocateRaw(new_linear_size);
-  v8::internal::FreeListNode* node =
-      v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
-  node->set_size(space->heap(), new_linear_size);
-}
-
-
 TEST(Regress169928) {
   i::FLAG_allow_natives_syntax = true;
   i::FLAG_crankshaft = false;
index 4c6e623..354bc7c 100644 (file)
@@ -156,6 +156,23 @@ static void Serialize(v8::Isolate* isolate) {
 }
 
 
+Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
+                                      Vector<const uint8_t> body,
+                                      Vector<const uint8_t> tail, int repeats) {
+  int source_length = head.length() + body.length() * repeats + tail.length();
+  uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
+  CopyChars(source, head.start(), head.length());
+  for (int i = 0; i < repeats; i++) {
+    CopyChars(source + head.length() + i * body.length(), body.start(),
+              body.length());
+  }
+  CopyChars(source + head.length() + repeats * body.length(), tail.start(),
+            tail.length());
+  return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
+                               source_length);
+}
+
+
 // Test that the whole heap can be serialized.
 UNINITIALIZED_TEST(Serialize) {
   if (!Snapshot::HaveASnapshotToStartFrom()) {
@@ -546,7 +563,6 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
     params.enable_serializer = true;
     v8::Isolate* v8_isolate = v8::Isolate::New(params);
     Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
-    Heap* heap = isolate->heap();
     {
       v8::Isolate::Scope isolate_scope(v8_isolate);
 
@@ -569,6 +585,16 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
             "var r = Math.random() + Math.cos(0);"
             "var f = (function(a, b) { return a + b; }).bind(1, 2, 3);"
             "var s = parseInt('12345');");
+
+        Vector<const uint8_t> source = ConstructSource(
+            STATIC_CHAR_VECTOR("function g() { return [,"),
+            STATIC_CHAR_VECTOR("1,"),
+            STATIC_CHAR_VECTOR("];} a = g(); b = g(); b.push(1);"), 100000);
+        v8::Handle<v8::String> source_str = v8::String::NewFromOneByte(
+            v8_isolate, source.start(), v8::String::kNormalString,
+            source.length());
+        CompileRun(source_str);
+        source.Dispose();
       }
       // Make sure all builtin scripts are cached.
       {
@@ -579,7 +605,7 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
       }
       // If we don't do this then we end up with a stray root pointing at the
       // context even after we have disposed of env.
-      heap->CollectAllGarbage(Heap::kNoGCFlags);
+      isolate->heap()->CollectAllAvailableGarbage("snapshotting");
 
       int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
       Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
@@ -667,6 +693,10 @@ UNINITIALIZED_DEPENDENT_TEST(CustomContextDeserialization,
         CHECK_EQ(5, f);
         v8::Handle<v8::String> s = CompileRun("s")->ToString(v8_isolate);
         CHECK(s->Equals(v8_str("12345")));
+        int a = CompileRun("a.length")->ToNumber(v8_isolate)->Int32Value();
+        CHECK_EQ(100001, a);
+        int b = CompileRun("b.length")->ToNumber(v8_isolate)->Int32Value();
+        CHECK_EQ(100002, b);
       }
     }
     v8_isolate->Dispose();
@@ -819,23 +849,6 @@ TEST(SerializeToplevelInternalizedString) {
 }
 
 
-Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
-                                      Vector<const uint8_t> body,
-                                      Vector<const uint8_t> tail, int repeats) {
-  int source_length = head.length() + body.length() * repeats + tail.length();
-  uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
-  CopyChars(source, head.start(), head.length());
-  for (int i = 0; i < repeats; i++) {
-    CopyChars(source + head.length() + i * body.length(), body.start(),
-              body.length());
-  }
-  CopyChars(source + head.length() + repeats * body.length(), tail.start(),
-            tail.length());
-  return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
-                               source_length);
-}
-
-
 TEST(SerializeToplevelLargeCodeObject) {
   FLAG_serialize_toplevel = true;
   LocalContext context;
index a84b867..331ea02 100644 (file)
@@ -459,18 +459,6 @@ TEST(SizeOfFirstPageIsLargeEnough) {
 }
 
 
-static inline void FillCurrentPage(v8::internal::NewSpace* space) {
-  int new_linear_size = static_cast<int>(*space->allocation_limit_address() -
-                                         *space->allocation_top_address());
-  if (new_linear_size == 0) return;
-  v8::internal::AllocationResult allocation =
-      space->AllocateRaw(new_linear_size);
-  v8::internal::FreeListNode* node =
-      v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
-  node->set_size(space->heap(), new_linear_size);
-}
-
-
 UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
   FLAG_target_semi_space_size = 2;
   if (FLAG_optimize_for_size) return;
@@ -502,9 +490,9 @@ UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
 
       // Turn the allocation into a proper object so isolate teardown won't
       // crash.
-      v8::internal::FreeListNode* node =
-          v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
-      node->set_size(new_space->heap(), 80);
+      HeapObject* free_space = NULL;
+      CHECK(allocation.To(&free_space));
+      new_space->heap()->CreateFillerObjectAt(free_space->address(), 80);
     }
   }
   isolate->Dispose();