Revert of Only use FreeSpace objects in the free list. (patchset #3 id:40001 of https...
authoryangguo <yangguo@chromium.org>
Tue, 27 Jan 2015 09:41:10 +0000 (01:41 -0800)
committerCommit bot <commit-bot@chromium.org>
Tue, 27 Jan 2015 09:41:21 +0000 (09:41 +0000)
Reason for revert:
Test failures

Original issue's description:
> Only use FreeSpace objects in the free list.
>
> This solves an issue with the custom startup snapshot, in cases where
> deserializing the isolate requires more than one page per space.
>
> R=hpayer@chromium.org
>
> Committed: https://crrev.com/66964395108f03220cb6f45ddc73c5965e2c76a9
> Cr-Commit-Position: refs/heads/master@{#26285}

TBR=hpayer@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true

Review URL: https://codereview.chromium.org/882443004

Cr-Commit-Position: refs/heads/master@{#26287}

16 files changed:
src/allocation-tracker.cc
src/api.cc
src/full-codegen.h
src/heap/heap.cc
src/heap/heap.h
src/heap/spaces-inl.h
src/heap/spaces.cc
src/heap/spaces.h
src/isolate.h
src/objects-inl.h
src/objects.h
src/serialize.cc
test/cctest/cctest.h
test/cctest/test-heap.cc
test/cctest/test-serialize.cc
test/cctest/test-spaces.cc

index 1ad86b8..7534ffb 100644 (file)
@@ -227,7 +227,9 @@ void AllocationTracker::AllocationEvent(Address addr, int size) {
 
   // Mark the new block as FreeSpace to make sure the heap is iterable
   // while we are capturing stack trace.
-  heap->CreateFillerObjectAt(addr, size);
+  FreeListNode::FromAddress(addr)->set_size(heap, size);
+  DCHECK_EQ(HeapObject::FromAddress(addr)->Size(), size);
+  DCHECK(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
 
   Isolate* isolate = heap->isolate();
   int length = 0;
index 8ddd351..0b0ab03 100644 (file)
@@ -27,7 +27,6 @@
 #include "src/debug.h"
 #include "src/deoptimizer.h"
 #include "src/execution.h"
-#include "src/full-codegen.h"
 #include "src/global-handles.h"
 #include "src/heap-profiler.h"
 #include "src/heap-snapshot-generator-inl.h"
@@ -221,54 +220,6 @@ bool RunExtraCode(Isolate* isolate, char* utf8_source) {
 }
 
 
-void CheckDefaultReservationSizes(const i::StartupSerializer& startup_ser,
-                                  const i::PartialSerializer& context_ser) {
-#ifdef DEBUG
-  i::List<i::SerializedData::Reservation> startup_reservations;
-  i::List<i::SerializedData::Reservation> context_reservations;
-  startup_ser.EncodeReservations(&startup_reservations);
-  context_ser.EncodeReservations(&context_reservations);
-  for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
-    // Exactly one chunk per space.
-    CHECK(startup_reservations[space].is_last());
-    CHECK(startup_reservations[space].is_last());
-    uint32_t sum = startup_reservations[space].chunk_size() +
-                   context_reservations[space].chunk_size();
-    uint32_t limit = 0;
-    const int constant_pool_delta = i::FLAG_enable_ool_constant_pool ? 48 : 0;
-    switch (space) {
-      case i::NEW_SPACE:
-        limit = 3 * i::kPointerSize;
-        break;
-      case i::OLD_POINTER_SPACE:
-        limit = (128 + constant_pool_delta) * i::kPointerSize * i::KB;
-        break;
-      case i::OLD_DATA_SPACE:
-        limit = 192 * i::KB;
-        break;
-      case i::MAP_SPACE:
-        limit = 16 * i::kPointerSize * i::KB;
-        break;
-      case i::CELL_SPACE:
-        limit = 16 * i::kPointerSize * i::KB;
-        break;
-      case i::PROPERTY_CELL_SPACE:
-        limit = 8 * i::kPointerSize * i::KB;
-        break;
-      case i::CODE_SPACE:
-        limit = RoundUp((480 - constant_pool_delta) * i::KB *
-                            i::FullCodeGenerator::kBootCodeSizeMultiplier / 100,
-                        i::kPointerSize);
-        break;
-      default:
-        break;
-    }
-    CHECK_LE(sum, limit);
-  }
-#endif  // DEBUG
-}
-
-
 StartupData V8::CreateSnapshotDataBlob(char* custom_source) {
   Isolate::CreateParams params;
   params.enable_serializer = true;
@@ -315,8 +266,6 @@ StartupData V8::CreateSnapshotDataBlob(char* custom_source) {
       i::SnapshotData sd(snapshot_sink, ser);
       i::SnapshotData csd(context_sink, context_ser);
 
-      if (custom_source == NULL) CheckDefaultReservationSizes(ser, context_ser);
-
       result = i::Snapshot::CreateSnapshotBlob(sd.RawData(), csd.RawData(),
                                                metadata);
     }
index cc24bb8..f544b7f 100644 (file)
@@ -124,7 +124,7 @@ class FullCodeGenerator: public AstVisitor {
   static const int kBootCodeSizeMultiplier = 120;
 #elif V8_TARGET_ARCH_MIPS64
   static const int kCodeSizeMultiplier = 149;
-  static const int kBootCodeSizeMultiplier = 170;
+  static const int kBootCodeSizeMultiplier = 120;
 #else
 #error Unsupported target architecture.
 #endif
index 705fdd1..bc55f9e 100644 (file)
@@ -495,11 +495,11 @@ void Heap::ClearAllICsByKind(Code::Kind kind) {
 }
 
 
-void Heap::RepairFreeListsAfterDeserialization() {
+void Heap::RepairFreeListsAfterBoot() {
   PagedSpaces spaces(this);
   for (PagedSpace* space = spaces.next(); space != NULL;
        space = spaces.next()) {
-    space->RepairFreeListsAfterDeserialization();
+    space->RepairFreeListsAfterBoot();
   }
 }
 
@@ -952,15 +952,14 @@ bool Heap::ReserveSpace(Reservation* reservations) {
           } else {
             allocation = paged_space(space)->AllocateRaw(size);
           }
-          HeapObject* free_space;
-          if (allocation.To(&free_space)) {
+          FreeListNode* node;
+          if (allocation.To(&node)) {
             // Mark with a free list node, in case we have a GC before
             // deserializing.
-            Address free_space_address = free_space->address();
-            CreateFillerObjectAt(free_space_address, size);
+            node->set_size(this, size);
             DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
-            chunk.start = free_space_address;
-            chunk.end = free_space_address + size;
+            chunk.start = node->address();
+            chunk.end = node->address() + size;
           } else {
             perform_gc = true;
             break;
@@ -3393,17 +3392,12 @@ AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
 void Heap::CreateFillerObjectAt(Address addr, int size) {
   if (size == 0) return;
   HeapObject* filler = HeapObject::FromAddress(addr);
-  // At this point, we may be deserializing the heap from a snapshot, and
-  // none of the maps have been created yet and are NULL.
   if (size == kPointerSize) {
-    filler->set_map_no_write_barrier(raw_unchecked_one_pointer_filler_map());
-    DCHECK(filler->map() == NULL || filler->map() == one_pointer_filler_map());
+    filler->set_map_no_write_barrier(one_pointer_filler_map());
   } else if (size == 2 * kPointerSize) {
-    filler->set_map_no_write_barrier(raw_unchecked_two_pointer_filler_map());
-    DCHECK(filler->map() == NULL || filler->map() == two_pointer_filler_map());
+    filler->set_map_no_write_barrier(two_pointer_filler_map());
   } else {
-    filler->set_map_no_write_barrier(raw_unchecked_free_space_map());
-    DCHECK(filler->map() == NULL || filler->map() == free_space_map());
+    filler->set_map_no_write_barrier(free_space_map());
     FreeSpace::cast(filler)->set_size(size);
   }
 }
index 4ccbd04..2ae4165 100644 (file)
@@ -692,8 +692,8 @@ class Heap {
   // Iterates the whole code space to clear all ICs of the given kind.
   void ClearAllICsByKind(Code::Kind kind);
 
-  // FreeSpace objects have a null map after deserialization. Update the map.
-  void RepairFreeListsAfterDeserialization();
+  // For use during bootup.
+  void RepairFreeListsAfterBoot();
 
   template <typename T>
   static inline bool IsOneByte(T t, int chars);
index cfa2325..9cb292e 100644 (file)
@@ -305,6 +305,14 @@ intptr_t LargeObjectSpace::Available() {
   return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
 }
 
+
+bool FreeListNode::IsFreeListNode(HeapObject* object) {
+  Map* map = object->map();
+  Heap* heap = object->GetHeap();
+  return map == heap->raw_unchecked_free_space_map() ||
+         map == heap->raw_unchecked_one_pointer_filler_map() ||
+         map == heap->raw_unchecked_two_pointer_filler_map();
+}
 }
 }  // namespace v8::internal
 
index 81199e7..1a2ff27 100644 (file)
@@ -1028,6 +1028,10 @@ bool PagedSpace::Expand() {
 
   intptr_t size = AreaSize();
 
+  if (anchor_.next_page() == &anchor_) {
+    size = SizeOfFirstPage();
+  }
+
   Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
                                                                 executable());
   if (p == NULL) return false;
@@ -1040,6 +1044,50 @@ bool PagedSpace::Expand() {
 }
 
 
+intptr_t PagedSpace::SizeOfFirstPage() {
+  // If the snapshot contains a custom script, all size guarantees are off.
+  if (Snapshot::EmbedsScript()) return AreaSize();
+  // If using an ool constant pool then transfer the constant pool allowance
+  // from the code space to the old pointer space.
+  static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
+  int size = 0;
+  switch (identity()) {
+    case OLD_POINTER_SPACE:
+      size = (128 + constant_pool_delta) * kPointerSize * KB;
+      break;
+    case OLD_DATA_SPACE:
+      size = 192 * KB;
+      break;
+    case MAP_SPACE:
+      size = 16 * kPointerSize * KB;
+      break;
+    case CELL_SPACE:
+      size = 16 * kPointerSize * KB;
+      break;
+    case PROPERTY_CELL_SPACE:
+      size = 8 * kPointerSize * KB;
+      break;
+    case CODE_SPACE: {
+      CodeRange* code_range = heap()->isolate()->code_range();
+      if (code_range != NULL && code_range->valid()) {
+        // When code range exists, code pages are allocated in a special way
+        // (from the reserved code range). That part of the code is not yet
+        // upgraded to handle small pages.
+        size = AreaSize();
+      } else {
+        size = RoundUp((480 - constant_pool_delta) * KB *
+                           FullCodeGenerator::kBootCodeSizeMultiplier / 100,
+                       kPointerSize);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+  return Min(size, AreaSize());
+}
+
+
 int PagedSpace::CountTotalPages() {
   PageIterator it(this);
   int count = 0;
@@ -2034,6 +2082,79 @@ size_t NewSpace::CommittedPhysicalMemory() {
 // -----------------------------------------------------------------------------
 // Free lists for old object spaces implementation
 
+void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
+  DCHECK(size_in_bytes > 0);
+  DCHECK(IsAligned(size_in_bytes, kPointerSize));
+
+  // We write a map and possibly size information to the block.  If the block
+  // is big enough to be a FreeSpace with at least one extra word (the next
+  // pointer), we set its map to be the free space map and its size to an
+  // appropriate array length for the desired size from HeapObject::Size().
+  // If the block is too small (eg, one or two words), to hold both a size
+  // field and a next pointer, we give it a filler map that gives it the
+  // correct size.
+  if (size_in_bytes > FreeSpace::kHeaderSize) {
+    // Can't use FreeSpace::cast because it fails during deserialization.
+    // We have to set the size first with a release store before we store
+    // the map because a concurrent store buffer scan on scavenge must not
+    // observe a map with an invalid size.
+    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
+    this_as_free_space->nobarrier_set_size(size_in_bytes);
+    synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
+  } else if (size_in_bytes == kPointerSize) {
+    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
+  } else if (size_in_bytes == 2 * kPointerSize) {
+    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
+  } else {
+    UNREACHABLE();
+  }
+  // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
+  // deserialization because the free space map is not done yet.
+}
+
+
+FreeListNode* FreeListNode::next() {
+  DCHECK(IsFreeListNode(this));
+  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
+    DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kNextOffset));
+  } else {
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kPointerSize));
+  }
+}
+
+
+FreeListNode** FreeListNode::next_address() {
+  DCHECK(IsFreeListNode(this));
+  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
+    DCHECK(Size() >= kNextOffset + kPointerSize);
+    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
+  } else {
+    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
+  }
+}
+
+
+void FreeListNode::set_next(FreeListNode* next) {
+  DCHECK(IsFreeListNode(this));
+  // While we are booting the VM the free space map will actually be null.  So
+  // we have to make sure that we don't try to use it for anything at that
+  // stage.
+  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
+    DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    base::NoBarrier_Store(
+        reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
+        reinterpret_cast<base::AtomicWord>(next));
+  } else {
+    base::NoBarrier_Store(
+        reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
+        reinterpret_cast<base::AtomicWord>(next));
+  }
+}
+
+
 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
   intptr_t free_bytes = 0;
   if (category->top() != NULL) {
@@ -2067,11 +2188,11 @@ void FreeListCategory::Reset() {
 
 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
   int sum = 0;
-  FreeSpace* t = top();
-  FreeSpace** n = &t;
+  FreeListNode* t = top();
+  FreeListNode** n = &t;
   while (*n != NULL) {
     if (Page::FromAddress((*n)->address()) == p) {
-      FreeSpace* free_space = *n;
+      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
       sum += free_space->Size();
       *n = (*n)->next();
     } else {
@@ -2088,7 +2209,7 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
 
 
 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
-  FreeSpace* node = top();
+  FreeListNode* node = top();
   while (node != NULL) {
     if (Page::FromAddress(node->address()) == p) return true;
     node = node->next();
@@ -2097,20 +2218,20 @@ bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
 }
 
 
-FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
-  FreeSpace* node = top();
+FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
+  FreeListNode* node = top();
 
   if (node == NULL) return NULL;
 
   while (node != NULL &&
          Page::FromAddress(node->address())->IsEvacuationCandidate()) {
-    available_ -= node->Size();
+    available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
     node = node->next();
   }
 
   if (node != NULL) {
     set_top(node->next());
-    *node_size = node->Size();
+    *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
     available_ -= *node_size;
   } else {
     set_top(NULL);
@@ -2124,9 +2245,9 @@ FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
 }
 
 
-FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
-                                              int* node_size) {
-  FreeSpace* node = PickNodeFromList(node_size);
+FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
+                                                 int* node_size) {
+  FreeListNode* node = PickNodeFromList(node_size);
   if (node != NULL && *node_size < size_in_bytes) {
     Free(node, *node_size);
     *node_size = 0;
@@ -2136,19 +2257,18 @@ FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
 }
 
 
-void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
-  DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
-  free_space->set_next(top());
-  set_top(free_space);
+void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
+  node->set_next(top());
+  set_top(node);
   if (end_ == NULL) {
-    end_ = free_space;
+    end_ = node;
   }
   available_ += size_in_bytes;
 }
 
 
 void FreeListCategory::RepairFreeList(Heap* heap) {
-  FreeSpace* n = top();
+  FreeListNode* n = top();
   while (n != NULL) {
     Map** map_location = reinterpret_cast<Map**>(n->address());
     if (*map_location == NULL) {
@@ -2187,8 +2307,8 @@ void FreeList::Reset() {
 int FreeList::Free(Address start, int size_in_bytes) {
   if (size_in_bytes == 0) return 0;
 
-  heap_->CreateFillerObjectAt(start, size_in_bytes);
-
+  FreeListNode* node = FreeListNode::FromAddress(start);
+  node->set_size(heap_, size_in_bytes);
   Page* page = Page::FromAddress(start);
 
   // Early return to drop too-small blocks on the floor.
@@ -2197,20 +2317,19 @@ int FreeList::Free(Address start, int size_in_bytes) {
     return size_in_bytes;
   }
 
-  FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
   // Insert other blocks at the head of a free list of the appropriate
   // magnitude.
   if (size_in_bytes <= kSmallListMax) {
-    small_list_.Free(free_space, size_in_bytes);
+    small_list_.Free(node, size_in_bytes);
     page->add_available_in_small_free_list(size_in_bytes);
   } else if (size_in_bytes <= kMediumListMax) {
-    medium_list_.Free(free_space, size_in_bytes);
+    medium_list_.Free(node, size_in_bytes);
     page->add_available_in_medium_free_list(size_in_bytes);
   } else if (size_in_bytes <= kLargeListMax) {
-    large_list_.Free(free_space, size_in_bytes);
+    large_list_.Free(node, size_in_bytes);
     page->add_available_in_large_free_list(size_in_bytes);
   } else {
-    huge_list_.Free(free_space, size_in_bytes);
+    huge_list_.Free(node, size_in_bytes);
     page->add_available_in_huge_free_list(size_in_bytes);
   }
 
@@ -2219,8 +2338,8 @@ int FreeList::Free(Address start, int size_in_bytes) {
 }
 
 
-FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
-  FreeSpace* node = NULL;
+FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+  FreeListNode* node = NULL;
   Page* page = NULL;
 
   if (size_in_bytes <= kSmallAllocationMax) {
@@ -2257,13 +2376,13 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
   }
 
   int huge_list_available = huge_list_.available();
-  FreeSpace* top_node = huge_list_.top();
-  for (FreeSpace** cur = &top_node; *cur != NULL;
+  FreeListNode* top_node = huge_list_.top();
+  for (FreeListNode** cur = &top_node; *cur != NULL;
        cur = (*cur)->next_address()) {
-    FreeSpace* cur_node = *cur;
+    FreeListNode* cur_node = *cur;
     while (cur_node != NULL &&
            Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
-      int size = cur_node->Size();
+      int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
       huge_list_available -= size;
       page = Page::FromAddress(cur_node->address());
       page->add_available_in_huge_free_list(-size);
@@ -2276,7 +2395,9 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
       break;
     }
 
-    int size = cur_node->Size();
+    DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
+    int size = cur_as_free_space->Size();
     if (size >= size_in_bytes) {
       // Large enough node found.  Unlink it from the list.
       node = *cur;
@@ -2349,7 +2470,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
                                                       old_linear_size);
 
   int new_node_size = 0;
-  FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
   if (new_node == NULL) {
     owner_->SetTopAndLimit(NULL, NULL);
     return NULL;
@@ -2443,10 +2564,11 @@ void FreeList::RepairLists(Heap* heap) {
 #ifdef DEBUG
 intptr_t FreeListCategory::SumFreeList() {
   intptr_t sum = 0;
-  FreeSpace* cur = top();
+  FreeListNode* cur = top();
   while (cur != NULL) {
     DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
-    sum += cur->nobarrier_size();
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
+    sum += cur_as_free_space->nobarrier_size();
     cur = cur->next();
   }
   return sum;
@@ -2458,7 +2580,7 @@ static const int kVeryLongFreeList = 500;
 
 int FreeListCategory::FreeListLength() {
   int length = 0;
-  FreeSpace* cur = top();
+  FreeListNode* cur = top();
   while (cur != NULL) {
     length++;
     cur = cur->next();
@@ -2519,9 +2641,7 @@ intptr_t PagedSpace::SizeOfObjects() {
 // on the heap.  If there was already a free list then the elements on it
 // were created with the wrong FreeSpaceMap (normally NULL), so we need to
 // fix them.
-void PagedSpace::RepairFreeListsAfterDeserialization() {
-  free_list_.RepairLists(heap());
-}
+void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
 
 
 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
index e21876b..dcd3364 100644 (file)
@@ -1411,6 +1411,45 @@ class AllocationStats BASE_EMBEDDED {
 
 // -----------------------------------------------------------------------------
 // Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap.  They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object).  They have a size and a next pointer.  The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode : public HeapObject {
+ public:
+  // Obtain a free-list node from a raw address.  This is not a cast because
+  // it does not check nor require that the first word at the address is a map
+  // pointer.
+  static FreeListNode* FromAddress(Address address) {
+    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+  }
+
+  static inline bool IsFreeListNode(HeapObject* object);
+
+  // Set the size in bytes, which can be read with HeapObject::Size().  This
+  // function also writes a map to the first word of the block so that it
+  // looks like a heap object to the garbage collector and heap iteration
+  // functions.
+  void set_size(Heap* heap, int size_in_bytes);
+
+  // Accessors for the next field.
+  inline FreeListNode* next();
+  inline FreeListNode** next_address();
+  inline void set_next(FreeListNode* next);
+
+  inline void Zap();
+
+  static inline FreeListNode* cast(Object* object) {
+    return reinterpret_cast<FreeListNode*>(object);
+  }
+
+ private:
+  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
 
 // The free list category holds a pointer to the top element and a pointer to
 // the end element of the linked list of free memory blocks.
@@ -1422,26 +1461,27 @@ class FreeListCategory {
 
   void Reset();
 
-  void Free(FreeSpace* node, int size_in_bytes);
+  void Free(FreeListNode* node, int size_in_bytes);
 
-  FreeSpace* PickNodeFromList(int* node_size);
-  FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
+  FreeListNode* PickNodeFromList(int* node_size);
+  FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
 
   intptr_t EvictFreeListItemsInList(Page* p);
   bool ContainsPageFreeListItemsInList(Page* p);
 
   void RepairFreeList(Heap* heap);
 
-  FreeSpace* top() const {
-    return reinterpret_cast<FreeSpace*>(base::NoBarrier_Load(&top_));
+  FreeListNode* top() const {
+    return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
   }
 
-  void set_top(FreeSpace* top) {
+  void set_top(FreeListNode* top) {
     base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
   }
 
-  FreeSpace* end() const { return end_; }
-  void set_end(FreeSpace* end) { end_ = end; }
+  FreeListNode** GetEndAddress() { return &end_; }
+  FreeListNode* end() const { return end_; }
+  void set_end(FreeListNode* end) { end_ = end; }
 
   int* GetAvailableAddress() { return &available_; }
   int available() const { return available_; }
@@ -1457,9 +1497,9 @@ class FreeListCategory {
 #endif
 
  private:
-  // top_ points to the top FreeSpace* in the free list category.
+  // top_ points to the top FreeListNode* in the free list category.
   base::AtomicWord top_;
-  FreeSpace* end_;
+  FreeListNode* end_;
   base::Mutex mutex_;
 
   // Total available bytes in all blocks of this free list category.
@@ -1556,18 +1596,17 @@ class FreeList {
   FreeListCategory* large_list() { return &large_list_; }
   FreeListCategory* huge_list() { return &huge_list_; }
 
-  static const int kSmallListMin = 0x20 * kPointerSize;
-
  private:
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
   static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
 
-  FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
+  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
 
   PagedSpace* owner_;
   Heap* heap_;
 
+  static const int kSmallListMin = 0x20 * kPointerSize;
   static const int kSmallListMax = 0xff * kPointerSize;
   static const int kMediumListMax = 0x7ff * kPointerSize;
   static const int kLargeListMax = 0x3fff * kPointerSize;
@@ -1663,7 +1702,7 @@ class PagedSpace : public Space {
 
   // During boot the free_space_map is created, and afterwards we may need
   // to write it into the free list nodes that were already created.
-  void RepairFreeListsAfterDeserialization();
+  void RepairFreeListsAfterBoot();
 
   // Prepares for a mark-compact GC.
   void PrepareForMarkCompact();
@@ -1870,6 +1909,8 @@ class PagedSpace : public Space {
   // Maximum capacity of this space.
   intptr_t max_capacity_;
 
+  intptr_t SizeOfFirstPage();
+
   // Accounting information for this space.
   AllocationStats accounting_stats_;
 
index a737dae..cac1d91 100644 (file)
@@ -1362,6 +1362,7 @@ class Isolate {
 
   friend class ExecutionAccess;
   friend class HandleScopeImplementer;
+  friend class IsolateInitializer;
   friend class OptimizingCompilerThread;
   friend class SweeperThread;
   friend class ThreadManager;
index 402010e..5ee9191 100644 (file)
@@ -3334,6 +3334,7 @@ CAST_ACCESSOR(FixedArrayBase)
 CAST_ACCESSOR(FixedDoubleArray)
 CAST_ACCESSOR(FixedTypedArrayBase)
 CAST_ACCESSOR(Foreign)
+CAST_ACCESSOR(FreeSpace)
 CAST_ACCESSOR(GlobalObject)
 CAST_ACCESSOR(HeapObject)
 CAST_ACCESSOR(JSArray)
@@ -3442,39 +3443,6 @@ SMI_ACCESSORS(String, length, kLengthOffset)
 SYNCHRONIZED_SMI_ACCESSORS(String, length, kLengthOffset)
 
 
-FreeSpace* FreeSpace::next() {
-  DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
-         (!GetHeap()->deserialization_complete() && map() == NULL));
-  DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
-  return reinterpret_cast<FreeSpace*>(
-      Memory::Address_at(address() + kNextOffset));
-}
-
-
-FreeSpace** FreeSpace::next_address() {
-  DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
-         (!GetHeap()->deserialization_complete() && map() == NULL));
-  DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
-  return reinterpret_cast<FreeSpace**>(address() + kNextOffset);
-}
-
-
-void FreeSpace::set_next(FreeSpace* next) {
-  DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
-         (!GetHeap()->deserialization_complete() && map() == NULL));
-  DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
-  base::NoBarrier_Store(
-      reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
-      reinterpret_cast<base::AtomicWord>(next));
-}
-
-
-FreeSpace* FreeSpace::cast(HeapObject* o) {
-  SLOW_DCHECK(!o->GetHeap()->deserialization_complete() || o->IsFreeSpace());
-  return reinterpret_cast<FreeSpace*>(o);
-}
-
-
 uint32_t Name::hash_field() {
   return READ_UINT32_FIELD(this, kHashFieldOffset);
 }
index 80ab919..5175505 100644 (file)
@@ -4448,11 +4448,8 @@ class ByteArray: public FixedArrayBase {
 };
 
 
-// FreeSpace are fixed-size free memory blocks used by the heap and GC.
-// They look like heap objects (are heap object tagged and have a map) so that
-// the heap remains iterable.  They have a size and a next pointer.
-// The next pointer is the raw address of the next FreeSpace object (or NULL)
-// in the free list.
+// FreeSpace represents fixed sized areas of the heap that are not currently in
+// use.  Used by the heap and GC.
 class FreeSpace: public HeapObject {
  public:
   // [size]: size of the free space including the header.
@@ -4464,12 +4461,7 @@ class FreeSpace: public HeapObject {
 
   inline int Size() { return size(); }
 
-  // Accessors for the next field.
-  inline FreeSpace* next();
-  inline FreeSpace** next_address();
-  inline void set_next(FreeSpace* next);
-
-  inline static FreeSpace* cast(HeapObject* obj);
+  DECLARE_CAST(FreeSpace)
 
   // Dispatched behavior.
   DECLARE_PRINTER(FreeSpace)
@@ -4478,7 +4470,9 @@ class FreeSpace: public HeapObject {
   // Layout description.
   // Size is smi tagged when it is stored.
   static const int kSizeOffset = HeapObject::kHeaderSize;
-  static const int kNextOffset = POINTER_SIZE_ALIGN(kSizeOffset + kPointerSize);
+  static const int kHeaderSize = kSizeOffset + kPointerSize;
+
+  static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeSpace);
index 0c9df8e..f140316 100644 (file)
@@ -664,7 +664,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
   DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
   isolate_->heap()->IterateSmiRoots(this);
   isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
-  isolate_->heap()->RepairFreeListsAfterDeserialization();
+  isolate_->heap()->RepairFreeListsAfterBoot();
   isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
 
   isolate_->heap()->set_native_contexts_list(
index 92ea2fb..e3e7607 100644 (file)
@@ -486,39 +486,27 @@ static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
   v8::internal::AllocationResult allocation =
       space->AllocateRaw(v8::internal::Page::kMaxRegularHeapObjectSize);
   if (allocation.IsRetry()) return false;
-  v8::internal::HeapObject* free_space;
-  CHECK(allocation.To(&free_space));
-  space->heap()->CreateFillerObjectAt(
-      free_space->address(), v8::internal::Page::kMaxRegularHeapObjectSize);
+  v8::internal::FreeListNode* node =
+      v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
+  node->set_size(space->heap(), v8::internal::Page::kMaxRegularHeapObjectSize);
   return true;
 }
 
 
-// Helper function that simulates a fill new-space in the heap.
-static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
-                                        int extra_bytes) {
-  int space_remaining = static_cast<int>(*space->allocation_limit_address() -
-                                         *space->allocation_top_address());
-  CHECK(space_remaining >= extra_bytes);
-  int new_linear_size = space_remaining - extra_bytes;
-  if (new_linear_size == 0) return;
-  v8::internal::AllocationResult allocation =
-      space->AllocateRaw(new_linear_size);
-  v8::internal::HeapObject* free_space;
-  CHECK(allocation.To(&free_space));
-  space->heap()->CreateFillerObjectAt(free_space->address(), new_linear_size);
-}
-
-
-static inline void FillCurrentPage(v8::internal::NewSpace* space) {
-  AllocateAllButNBytes(space, 0);
-}
-
-
 static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
-  FillCurrentPage(space);
-  while (FillUpOnePage(space)) {
+  int new_linear_size = static_cast<int>(*space->allocation_limit_address() -
+                                         *space->allocation_top_address());
+  if (new_linear_size > 0) {
+    // Fill up the current page.
+    v8::internal::AllocationResult allocation =
+        space->AllocateRaw(new_linear_size);
+    v8::internal::FreeListNode* node =
+        v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
+    node->set_size(space->heap(), new_linear_size);
   }
+  // Fill up all remaining pages.
+  while (FillUpOnePage(space))
+    ;
 }
 
 
index 8295839..ec17310 100644 (file)
@@ -3872,6 +3872,21 @@ TEST(Regress169209) {
 }
 
 
+// Helper function that simulates a fill new-space in the heap.
+static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
+                                        int extra_bytes) {
+  int space_remaining = static_cast<int>(
+      *space->allocation_limit_address() - *space->allocation_top_address());
+  CHECK(space_remaining >= extra_bytes);
+  int new_linear_size = space_remaining - extra_bytes;
+  v8::internal::AllocationResult allocation =
+      space->AllocateRaw(new_linear_size);
+  v8::internal::FreeListNode* node =
+      v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
+  node->set_size(space->heap(), new_linear_size);
+}
+
+
 TEST(Regress169928) {
   i::FLAG_allow_natives_syntax = true;
   i::FLAG_crankshaft = false;
index 583bd4f..4c6e623 100644 (file)
@@ -156,23 +156,6 @@ static void Serialize(v8::Isolate* isolate) {
 }
 
 
-Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
-                                      Vector<const uint8_t> body,
-                                      Vector<const uint8_t> tail, int repeats) {
-  int source_length = head.length() + body.length() * repeats + tail.length();
-  uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
-  CopyChars(source, head.start(), head.length());
-  for (int i = 0; i < repeats; i++) {
-    CopyChars(source + head.length() + i * body.length(), body.start(),
-              body.length());
-  }
-  CopyChars(source + head.length() + repeats * body.length(), tail.start(),
-            tail.length());
-  return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
-                               source_length);
-}
-
-
 // Test that the whole heap can be serialized.
 UNINITIALIZED_TEST(Serialize) {
   if (!Snapshot::HaveASnapshotToStartFrom()) {
@@ -317,32 +300,6 @@ UNINITIALIZED_DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
 }
 
 
-// Test that the whole heap can be serialized.
-UNINITIALIZED_TEST(SerializeMultiplePages) {
-  if (!Snapshot::HaveASnapshotToStartFrom()) {
-    v8::Isolate::CreateParams params;
-    params.enable_serializer = true;
-    v8::Isolate* isolate = v8::Isolate::New(params);
-    {
-      v8::Isolate::Scope isolate_scope(isolate);
-      v8::HandleScope handle_scope(isolate);
-      v8::Local<v8::Context> context = v8::Context::New(isolate);
-      v8::Context::Scope context_scope(context);
-      Vector<const uint8_t> source = ConstructSource(
-          STATIC_CHAR_VECTOR("var s='"), STATIC_CHAR_VECTOR("A"),
-          STATIC_CHAR_VECTOR("';"), Page::kMaxRegularHeapObjectSize - 100);
-      v8::Handle<v8::String> source_str = v8::String::NewFromOneByte(
-          isolate, source.start(), v8::String::kNormalString, source.length());
-      CompileRun(source_str);
-    }
-
-    Isolate* internal_isolate = reinterpret_cast<Isolate*>(isolate);
-    internal_isolate->heap()->CollectAllAvailableGarbage("serialize");
-    WriteToFile(internal_isolate, FLAG_testing_serialization_file);
-  }
-}
-
-
 UNINITIALIZED_TEST(PartialSerialization) {
   if (!Snapshot::HaveASnapshotToStartFrom()) {
     v8::Isolate::CreateParams params;
@@ -589,6 +546,7 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
     params.enable_serializer = true;
     v8::Isolate* v8_isolate = v8::Isolate::New(params);
     Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+    Heap* heap = isolate->heap();
     {
       v8::Isolate::Scope isolate_scope(v8_isolate);
 
@@ -611,16 +569,6 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
             "var r = Math.random() + Math.cos(0);"
             "var f = (function(a, b) { return a + b; }).bind(1, 2, 3);"
             "var s = parseInt('12345');");
-
-        Vector<const uint8_t> source = ConstructSource(
-            STATIC_CHAR_VECTOR("function g() { return [,"),
-            STATIC_CHAR_VECTOR("1,"),
-            STATIC_CHAR_VECTOR("];} a = g(); b = g(); b.push(1);"), 100000);
-        v8::Handle<v8::String> source_str = v8::String::NewFromOneByte(
-            v8_isolate, source.start(), v8::String::kNormalString,
-            source.length());
-        CompileRun(source_str);
-        source.Dispose();
       }
       // Make sure all builtin scripts are cached.
       {
@@ -631,7 +579,7 @@ UNINITIALIZED_TEST(CustomContextSerialization) {
       }
       // If we don't do this then we end up with a stray root pointing at the
       // context even after we have disposed of env.
-      isolate->heap()->CollectAllAvailableGarbage("snapshotting");
+      heap->CollectAllGarbage(Heap::kNoGCFlags);
 
       int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
       Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
@@ -719,10 +667,6 @@ UNINITIALIZED_DEPENDENT_TEST(CustomContextDeserialization,
         CHECK_EQ(5, f);
         v8::Handle<v8::String> s = CompileRun("s")->ToString(v8_isolate);
         CHECK(s->Equals(v8_str("12345")));
-        int a = CompileRun("a.length")->ToNumber(v8_isolate)->Int32Value();
-        CHECK_EQ(100001, a);
-        int b = CompileRun("b.length")->ToNumber(v8_isolate)->Int32Value();
-        CHECK_EQ(100002, b);
       }
     }
     v8_isolate->Dispose();
@@ -875,6 +819,23 @@ TEST(SerializeToplevelInternalizedString) {
 }
 
 
+Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
+                                      Vector<const uint8_t> body,
+                                      Vector<const uint8_t> tail, int repeats) {
+  int source_length = head.length() + body.length() * repeats + tail.length();
+  uint8_t* source = NewArray<uint8_t>(static_cast<size_t>(source_length));
+  CopyChars(source, head.start(), head.length());
+  for (int i = 0; i < repeats; i++) {
+    CopyChars(source + head.length() + i * body.length(), body.start(),
+              body.length());
+  }
+  CopyChars(source + head.length() + repeats * body.length(), tail.start(),
+            tail.length());
+  return Vector<const uint8_t>(const_cast<const uint8_t*>(source),
+                               source_length);
+}
+
+
 TEST(SerializeToplevelLargeCodeObject) {
   FLAG_serialize_toplevel = true;
   LocalContext context;
index d6668d7..a84b867 100644 (file)
@@ -459,6 +459,18 @@ TEST(SizeOfFirstPageIsLargeEnough) {
 }
 
 
+static inline void FillCurrentPage(v8::internal::NewSpace* space) {
+  int new_linear_size = static_cast<int>(*space->allocation_limit_address() -
+                                         *space->allocation_top_address());
+  if (new_linear_size == 0) return;
+  v8::internal::AllocationResult allocation =
+      space->AllocateRaw(new_linear_size);
+  v8::internal::FreeListNode* node =
+      v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
+  node->set_size(space->heap(), new_linear_size);
+}
+
+
 UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
   FLAG_target_semi_space_size = 2;
   if (FLAG_optimize_for_size) return;
@@ -490,9 +502,9 @@ UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
 
       // Turn the allocation into a proper object so isolate teardown won't
       // crash.
-      HeapObject* free_space;
-      CHECK(allocation.To(&free_space));
-      new_space->heap()->CreateFillerObjectAt(free_space->address(), 80);
+      v8::internal::FreeListNode* node =
+          v8::internal::FreeListNode::cast(allocation.ToObjectChecked());
+      node->set_size(new_space->heap(), 80);
     }
   }
   isolate->Dispose();