Break deserializer reservations into chunks that fit onto a page.
authoryangguo@chromium.org <yangguo@chromium.org>
Wed, 15 Oct 2014 14:04:53 +0000 (14:04 +0000)
committeryangguo@chromium.org <yangguo@chromium.org>
Wed, 15 Oct 2014 14:04:53 +0000 (14:04 +0000)
R=mvstanton@chromium.org

Review URL: https://codereview.chromium.org/653033002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24639 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/compiler.cc
src/heap/heap.cc
src/heap/heap.h
src/list.h
src/mksnapshot.cc
src/serialize.cc
src/serialize.h
src/snapshot-common.cc
src/snapshot-external.cc
src/vector.h
test/cctest/test-serialize.cc

index 244ed8b..4250529 100644 (file)
@@ -1177,7 +1177,12 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
         compile_options == ScriptCompiler::kConsumeCodeCache &&
         !isolate->debug()->is_loaded()) {
       HistogramTimerScope timer(isolate->counters()->compile_deserialize());
-      return CodeSerializer::Deserialize(isolate, *cached_data, source);
+      Handle<SharedFunctionInfo> result;
+      if (CodeSerializer::Deserialize(isolate, *cached_data, source)
+              .ToHandle(&result)) {
+        return result;
+      }
+      // Deserializer failed. Fall through to compile.
     } else {
       maybe_result = compilation_cache->LookupScript(
           source, script_name, line_offset, column_offset,
index 65b3bed..7c1b2b2 100644 (file)
@@ -28,6 +28,7 @@
 #include "src/natives.h"
 #include "src/runtime-profiler.h"
 #include "src/scopeinfo.h"
+#include "src/serialize.h"
 #include "src/snapshot.h"
 #include "src/utils.h"
 #include "src/v8threads.h"
@@ -919,33 +920,41 @@ static bool AbortIncrementalMarkingAndCollectGarbage(
 }
 
 
-void Heap::ReserveSpace(int* sizes, Address* locations_out) {
+bool Heap::ReserveSpace(Reservation* reservations) {
   bool gc_performed = true;
   int counter = 0;
   static const int kThreshold = 20;
   while (gc_performed && counter++ < kThreshold) {
     gc_performed = false;
     for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
-      if (sizes[space] == 0) continue;
+      Reservation* reservation = &reservations[space];
+      DCHECK_LE(1, reservation->length());
+      if (reservation->at(0).size == 0) continue;
       bool perform_gc = false;
       if (space == LO_SPACE) {
-        perform_gc = !lo_space()->CanAllocateSize(sizes[space]);
+        DCHECK_EQ(1, reservation->length());
+        perform_gc = !lo_space()->CanAllocateSize(reservation->at(0).size);
       } else {
-        AllocationResult allocation;
-        if (space == NEW_SPACE) {
-          allocation = new_space()->AllocateRaw(sizes[space]);
-        } else {
-          allocation = paged_space(space)->AllocateRaw(sizes[space]);
-        }
-        FreeListNode* node;
-        if (allocation.To(&node)) {
-          // Mark with a free list node, in case we have a GC before
-          // deserializing.
-          node->set_size(this, sizes[space]);
-          DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
-          locations_out[space] = node->address();
-        } else {
-          perform_gc = true;
+        for (auto& chunk : *reservation) {
+          AllocationResult allocation;
+          int size = chunk.size;
+          if (space == NEW_SPACE) {
+            allocation = new_space()->AllocateRaw(size);
+          } else {
+            allocation = paged_space(space)->AllocateRaw(size);
+          }
+          FreeListNode* node;
+          if (allocation.To(&node)) {
+            // Mark with a free list node, in case we have a GC before
+            // deserializing.
+            node->set_size(this, size);
+            DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
+            chunk.start = node->address();
+            chunk.end = node->address() + size;
+          } else {
+            perform_gc = true;
+            break;
+          }
         }
       }
       if (perform_gc) {
@@ -963,10 +972,7 @@ void Heap::ReserveSpace(int* sizes, Address* locations_out) {
     }
   }
 
-  if (gc_performed) {
-    // Failed to reserve the space after several attempts.
-    V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
-  }
+  return !gc_performed;
 }
 
 
index 7186bb6..842a2c1 100644 (file)
@@ -1009,7 +1009,16 @@ class Heap {
 
   // Support for partial snapshots.  After calling this we have a linear
   // space to write objects in each space.
-  void ReserveSpace(int* sizes, Address* addresses);
+  struct Chunk {
+    uint32_t size;
+    Address start;
+    Address end;
+  };
+
+  typedef List<Chunk> Reservation;
+
+  // Returns false if not able to reserve.
+  bool ReserveSpace(Reservation* reservations);
 
   //
   // Support for the API.
index ea5fd1e..c17c4ec 100644 (file)
@@ -80,7 +80,9 @@ class List {
 
   Vector<T> ToVector() const { return Vector<T>(data_, length_); }
 
-  Vector<const T> ToConstVector() { return Vector<const T>(data_, length_); }
+  Vector<const T> ToConstVector() const {
+    return Vector<const T>(data_, length_);
+  }
 
   // Adds a copy of the given 'element' to the end of the list,
   // expanding the list if necessary.
index 58e8669..0737c9e 100644 (file)
@@ -91,14 +91,25 @@ class SnapshotWriter {
 
     i::byte* snapshot_bytes = snapshot_data.begin();
     sink.PutBlob(snapshot_bytes, snapshot_data.length(), "snapshot");
-    for (size_t i = 0; i < arraysize(spaces); ++i)
-      sink.PutInt(serializer.CurrentAllocationAddress(spaces[i]), "spaces");
+    for (size_t i = 0; i < arraysize(spaces); ++i) {
+      i::Vector<const uint32_t> chunks =
+          serializer.FinalAllocationChunks(spaces[i]);
+      // For the start-up snapshot, none of the reservations has more than
+      // one chunk (reservation for each space fits onto a single page).
+      CHECK_EQ(1, chunks.length());
+      sink.PutInt(chunks[0], "spaces");
+    }
 
     i::byte* context_bytes = context_snapshot_data.begin();
     sink.PutBlob(context_bytes, context_snapshot_data.length(), "context");
-    for (size_t i = 0; i < arraysize(spaces); ++i)
-      sink.PutInt(context_serializer.CurrentAllocationAddress(spaces[i]),
-                  "spaces");
+    for (size_t i = 0; i < arraysize(spaces); ++i) {
+      i::Vector<const uint32_t> chunks =
+          context_serializer.FinalAllocationChunks(spaces[i]);
+      // For the context snapshot, none of the reservations has more than
+      // one chunk (reservation for each space fits onto a single page).
+      CHECK_EQ(1, chunks.length());
+      sink.PutInt(chunks[0], "spaces");
+    }
 
     size_t written = fwrite(startup_blob.begin(), 1, startup_blob.length(),
                             startup_blob_file_);
@@ -203,8 +214,12 @@ class SnapshotWriter {
 
   void WriteSizeVar(const i::Serializer& ser, const char* prefix,
                     const char* name, int space) const {
-    fprintf(fp_, "const int Snapshot::%s%s_space_used_ = %d;\n",
-            prefix, name, ser.CurrentAllocationAddress(space));
+    i::Vector<const uint32_t> chunks = ser.FinalAllocationChunks(space);
+    // For the start-up snapshot, none of the reservations has more than
+    // one chunk (total reservation fits into a single page).
+    CHECK_EQ(1, chunks.length());
+    fprintf(fp_, "const int Snapshot::%s%s_space_used_ = %d;\n", prefix, name,
+            chunks[0]);
   }
 
   void WriteSnapshotData(const i::List<i::byte>* data) const {
@@ -416,6 +431,9 @@ int main(int argc, char** argv) {
     context_ser.Serialize(&raw_context);
     ser.SerializeWeakReferences();
 
+    context_ser.FinalizeAllocation();
+    ser.FinalizeAllocation();
+
     {
       SnapshotWriter writer(argv[1]);
       if (i::FLAG_raw_file && i::FLAG_raw_context_file)
index 42ca6b9..4c9675e 100644 (file)
@@ -598,9 +598,7 @@ Deserializer::Deserializer(SnapshotByteSource* source)
       source_(source),
       external_reference_decoder_(NULL),
       deserialized_large_objects_(0) {
-  for (int i = 0; i < kNumberOfSpaces; i++) {
-    reservations_[i] = kUninitializedReservation;
-  }
+  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
 }
 
 
@@ -613,10 +611,19 @@ void Deserializer::FlushICacheForNewCodeObjects() {
 }
 
 
+bool Deserializer::ReserveSpace() {
+  if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
+  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
+    high_water_[i] = reservations_[i][0].start;
+  }
+  return true;
+}
+
+
 void Deserializer::Deserialize(Isolate* isolate) {
   isolate_ = isolate;
   DCHECK(isolate_ != NULL);
-  isolate_->heap()->ReserveSpace(reservations_, high_water_);
+  if (!ReserveSpace()) FatalProcessOutOfMemory("deserializing context");
   // No active threads.
   DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
   // No active handles.
@@ -658,13 +665,17 @@ void Deserializer::Deserialize(Isolate* isolate) {
 }
 
 
-void Deserializer::DeserializePartial(Isolate* isolate, Object** root) {
+void Deserializer::DeserializePartial(Isolate* isolate, Object** root,
+                                      OnOOM on_oom) {
   isolate_ = isolate;
   for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
-    DCHECK(reservations_[i] != kUninitializedReservation);
+    DCHECK(reservations_[i].length() > 0);
+  }
+  if (!ReserveSpace()) {
+    if (on_oom == FATAL_ON_OOM) FatalProcessOutOfMemory("deserialize context");
+    *root = NULL;
+    return;
   }
-  Heap* heap = isolate->heap();
-  heap->ReserveSpace(reservations_, high_water_);
   if (external_reference_decoder_ == NULL) {
     external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
   }
@@ -700,7 +711,7 @@ Deserializer::~Deserializer() {
 void Deserializer::VisitPointers(Object** start, Object** end) {
   // The space must be new space.  Any other space would cause ReadChunk to try
   // to update the remembered using NULL as the address.
-  ReadChunk(start, end, NEW_SPACE, NULL);
+  ReadData(start, end, NEW_SPACE, NULL);
 }
 
 
@@ -788,7 +799,7 @@ void Deserializer::ReadObject(int space_number,
   if (FLAG_log_snapshot_positions) {
     LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
   }
-  ReadChunk(current, limit, space_number, address);
+  ReadData(current, limit, space_number, address);
 
   // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
   // as a (weak) root. If this root is relocated correctly,
@@ -813,6 +824,9 @@ void Deserializer::ReadObject(int space_number,
 // pre-allocate that reserved space. During deserialization, all we need
 // to do is to bump up the pointer for each space in the reserved
 // space. This is also used for fixing back references.
+// We may have to split up the pre-allocation into several chunks
+// because it would not fit onto a single page, we have to keep track
+// of when to move to the next chunk.
 // Since multiple large objects cannot be folded into one large object
 // space allocation, we have to do an actual allocation when deserializing
 // each large object. Instead of tracking offset for back references, we
@@ -821,7 +835,7 @@ Address Deserializer::Allocate(int space_index, int size) {
   if (space_index == LO_SPACE) {
     AlwaysAllocateScope scope(isolate_);
     LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
-    Executability exec = static_cast<Executability>(source_->GetInt());
+    Executability exec = static_cast<Executability>(source_->Get());
     AllocationResult result = lo_space->AllocateRaw(size, exec);
     HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
     deserialized_large_objects_.Add(obj);
@@ -829,16 +843,28 @@ Address Deserializer::Allocate(int space_index, int size) {
   } else {
     DCHECK(space_index < kNumberOfPreallocatedSpaces);
     Address address = high_water_[space_index];
+    DCHECK_NE(NULL, address);
+    const Heap::Reservation& reservation = reservations_[space_index];
+    int chunk_index = current_chunk_[space_index];
+    if (address + size > reservation[chunk_index].end) {
+      // The last chunk size matches exactly the already deserialized data.
+      DCHECK_EQ(address, reservation[chunk_index].end);
+      // Move to next reserved chunk.
+      chunk_index = ++current_chunk_[space_index];
+      DCHECK_LT(chunk_index, reservation.length());
+      // Prepare for next allocation in the next chunk.
+      address = reservation[chunk_index].start;
+    } else {
+      high_water_[space_index] = address + size;
+    }
     high_water_[space_index] = address + size;
     return address;
   }
 }
 
 
-void Deserializer::ReadChunk(Object** current,
-                             Object** limit,
-                             int source_space,
-                             Address current_object_address) {
+void Deserializer::ReadData(Object** current, Object** limit, int source_space,
+                            Address current_object_address) {
   Isolate* const isolate = isolate_;
   // Write barrier support costs around 1% in startup time.  In fact there
   // are no new space objects in current boot snapshots, so it's not needed,
@@ -890,7 +916,7 @@ void Deserializer::ReadChunk(Object** current,
         new_object = reinterpret_cast<Object*>(address);                       \
       } else if (where == kBackref) {                                          \
         emit_write_barrier = (space_number == NEW_SPACE);                      \
-        new_object = GetAddressFromEnd(data & kSpaceMask);                     \
+        new_object = GetBackReferencedObject(data & kSpaceMask);               \
         if (deserializing_user_code()) {                                       \
           new_object = ProcessBackRefInSerializedCode(new_object);             \
         }                                                                      \
@@ -913,7 +939,7 @@ void Deserializer::ReadChunk(Object** current,
         current = reinterpret_cast<Object**>(                                  \
             reinterpret_cast<Address>(current) + skip);                        \
         emit_write_barrier = (space_number == NEW_SPACE);                      \
-        new_object = GetAddressFromEnd(data & kSpaceMask);                     \
+        new_object = GetBackReferencedObject(data & kSpaceMask);               \
         if (deserializing_user_code()) {                                       \
           new_object = ProcessBackRefInSerializedCode(new_object);             \
         }                                                                      \
@@ -1221,7 +1247,7 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
       seen_large_objects_index_(0) {
   // The serializer is meant to be used only to generate initial heap images
   // from a context in which there is only one isolate.
-  for (int i = 0; i < kNumberOfSpaces; i++) fullness_[i] = 0;
+  for (int i = 0; i < kNumberOfSpaces; i++) pending_chunk_[i] = 0;
 }
 
 
@@ -1283,6 +1309,19 @@ void Serializer::VisitPointers(Object** start, Object** end) {
 }
 
 
+void Serializer::FinalizeAllocation() {
+  DCHECK_EQ(0, completed_chunks_[LO_SPACE].length());  // Not yet finalized.
+  for (int i = 0; i < kNumberOfSpaces; i++) {
+    // Complete the last pending chunk and if there are no completed chunks,
+    // make sure there is at least one empty chunk.
+    if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
+      completed_chunks_[i].Add(pending_chunk_[i]);
+      pending_chunk_[i] = 0;
+    }
+  }
+}
+
+
 // This ensures that the partial snapshot cache keeps things alive during GC and
 // tracks their movement.  When it is called during serialization of the startup
 // snapshot nothing happens.  When the partial (context) snapshot is created,
@@ -1369,11 +1408,10 @@ void Serializer::SerializeReferenceToPreviousObject(HeapObject* heap_object,
     int index = address_mapper_.MappedTo(heap_object);
     sink_->PutInt(index, "large object index");
   } else {
-    int address = address_mapper_.MappedTo(heap_object);
-    int offset = CurrentAllocationAddress(space) - address;
+    uint32_t existing_allocation = address_mapper_.MappedTo(heap_object);
     // Shift out the bits that are always 0.
-    offset >>= kObjectAlignmentBits;
-    sink_->PutInt(offset, "offset");
+    existing_allocation >>= kObjectAlignmentBits;
+    sink_->PutInt(existing_allocation, "allocation");
   }
 }
 
@@ -1533,15 +1571,15 @@ void Serializer::ObjectSerializer::SerializePrologue(int space, int size,
   // Mark this object as already serialized.
   if (space == LO_SPACE) {
     if (object_->IsCode()) {
-      sink_->PutInt(EXECUTABLE, "executable large object");
+      sink_->Put(EXECUTABLE, "executable large object");
     } else {
-      sink_->PutInt(NOT_EXECUTABLE, "not executable large object");
+      sink_->Put(NOT_EXECUTABLE, "not executable large object");
     }
     int index = serializer_->AllocateLargeObject(size);
     serializer_->address_mapper()->AddMapping(object_, index);
   } else {
-    int offset = serializer_->Allocate(space, size);
-    serializer_->address_mapper()->AddMapping(object_, offset);
+    int allocation = serializer_->Allocate(space, size);
+    serializer_->address_mapper()->AddMapping(object_, allocation);
   }
 
   // Serialize the map (first word of the object).
@@ -1867,17 +1905,32 @@ int Serializer::SpaceOfObject(HeapObject* object) {
 }
 
 
-int Serializer::AllocateLargeObject(int size) {
-  fullness_[LO_SPACE] += size;
+uint32_t Serializer::AllocateLargeObject(int size) {
+  // Large objects are allocated one-by-one when deserializing. We do not
+  // have to keep track of multiple chunks.
+  pending_chunk_[LO_SPACE] += size;
   return seen_large_objects_index_++;
 }
 
 
-int Serializer::Allocate(int space, int size) {
+uint32_t Serializer::Allocate(int space, int size) {
   CHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
-  int allocation_address = fullness_[space];
-  fullness_[space] = allocation_address + size;
-  return allocation_address;
+  DCHECK(size > 0 && size < Page::kMaxRegularHeapObjectSize);
+  uint32_t new_chunk_size = pending_chunk_[space] + size;
+  uint32_t allocation;
+  if (new_chunk_size > Page::kMaxRegularHeapObjectSize) {
+    // The new chunk size would not fit onto a single page. Complete the
+    // current chunk and start a new one.
+    completed_chunks_[space].Add(pending_chunk_[space]);
+    pending_chunk_[space] = 0;
+    new_chunk_size = size;
+  }
+  // For back-referencing, each allocation is encoded as a combination
+  // of chunk index and offset inside the chunk.
+  allocation = ChunkIndexBits::encode(completed_chunks_[space].length()) |
+               OffsetBits::encode(pending_chunk_[space]);
+  pending_chunk_[space] = new_chunk_size;
+  return allocation;
 }
 
 
@@ -1923,6 +1976,7 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
   Object** location = Handle<Object>::cast(info).location();
   cs.VisitPointer(location);
   cs.Pad();
+  cs.FinalizeAllocation();
 
   SerializedCodeData data(&payload, &cs);
   ScriptData* script_data = data.GetScriptData();
@@ -2093,9 +2147,8 @@ void CodeSerializer::SerializeSourceObject(HowToCode how_to_code,
 }
 
 
-Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate,
-                                                       ScriptData* data,
-                                                       Handle<String> source) {
+MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
+    Isolate* isolate, ScriptData* data, Handle<String> source) {
   base::ElapsedTimer timer;
   if (FLAG_profile_deserialization) timer.Start();
 
@@ -2107,10 +2160,15 @@ Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate,
     SerializedCodeData scd(data, *source);
     SnapshotByteSource payload(scd.Payload(), scd.PayloadLength());
     Deserializer deserializer(&payload);
+
     STATIC_ASSERT(NEW_SPACE == 0);
-    for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
-      deserializer.set_reservation(i, scd.GetReservation(i));
+    int current_space = NEW_SPACE;
+    Vector<const SerializedCodeData::Reservation> res = scd.Reservations();
+    for (const auto& r : res) {
+      deserializer.AddReservation(current_space, r.chunk_size());
+      if (r.is_last_chunk()) current_space++;
     }
+    DCHECK_EQ(kNumberOfSpaces, current_space);
 
     // Prepare and register list of attached objects.
     Vector<const uint32_t> code_stub_keys = scd.CodeStubKeys();
@@ -2124,7 +2182,12 @@ Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate,
     deserializer.SetAttachedObjects(&attached_objects);
 
     // Deserialize.
-    deserializer.DeserializePartial(isolate, &root);
+    deserializer.DeserializePartial(isolate, &root, Deserializer::NULL_ON_OOM);
+    if (root == NULL) {
+      // Deserializing may fail if the reservations cannot be fulfilled.
+      if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
+      return MaybeHandle<SharedFunctionInfo>();
+    }
     deserializer.FlushICacheForNewCodeObjects();
   }
 
@@ -2144,10 +2207,25 @@ SerializedCodeData::SerializedCodeData(List<byte>* payload, CodeSerializer* cs)
   DisallowHeapAllocation no_gc;
   List<uint32_t>* stub_keys = cs->stub_keys();
 
+  // Gather reservation chunk sizes.
+  List<uint32_t> reservations(SerializerDeserializer::kNumberOfSpaces);
+  STATIC_ASSERT(NEW_SPACE == 0);
+  for (int i = 0; i < SerializerDeserializer::kNumberOfSpaces; i++) {
+    Vector<const uint32_t> chunks = cs->FinalAllocationChunks(i);
+    for (int j = 0; j < chunks.length(); j++) {
+      DCHECK(i == LO_SPACE || chunks[j] < Page::kMaxRegularHeapObjectSize);
+      uint32_t chunk = ChunkSizeBits::encode(chunks[j]) |
+                       IsLastChunkBits::encode(j == chunks.length() - 1);
+      reservations.Add(chunk);
+    }
+  }
+
   // Calculate sizes.
+  int reservation_size = reservations.length() * kInt32Size;
   int num_stub_keys = stub_keys->length();
   int stub_keys_size = stub_keys->length() * kInt32Size;
-  int data_length = kHeaderSize + stub_keys_size + payload->length();
+  int data_length =
+      kHeaderSize + reservation_size + stub_keys_size + payload->length();
 
   // Allocate backing store and create result data.
   byte* data = NewArray<byte>(data_length);
@@ -2157,20 +2235,21 @@ SerializedCodeData::SerializedCodeData(List<byte>* payload, CodeSerializer* cs)
 
   // Set header values.
   SetHeaderValue(kCheckSumOffset, CheckSum(cs->source()));
+  SetHeaderValue(kReservationsOffset, reservations.length());
   SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
   SetHeaderValue(kPayloadLengthOffset, payload->length());
-  STATIC_ASSERT(NEW_SPACE == 0);
-  for (int i = 0; i < SerializerDeserializer::kNumberOfSpaces; i++) {
-    SetHeaderValue(kReservationsOffset + i, cs->CurrentAllocationAddress(i));
-  }
+
+  // Copy reservation chunk sizes.
+  CopyBytes(data + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
+            reservation_size);
 
   // Copy code stub keys.
-  CopyBytes(data + kHeaderSize, reinterpret_cast<byte*>(stub_keys->begin()),
-            stub_keys_size);
+  CopyBytes(data + kHeaderSize + reservation_size,
+            reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
 
   // Copy serialized data.
-  CopyBytes(data + kHeaderSize + stub_keys_size, payload->begin(),
-            static_cast<size_t>(payload->length()));
+  CopyBytes(data + kHeaderSize + reservation_size + stub_keys_size,
+            payload->begin(), static_cast<size_t>(payload->length()));
 }
 
 
index 6fa7a34..6e1f651 100644 (file)
@@ -152,6 +152,11 @@ class SerializerDeserializer: public ObjectVisitor {
   static const int kNumberOfPreallocatedSpaces = LO_SPACE;
   static const int kNumberOfSpaces = INVALID_SPACE;
 
+  // To encode object for back-references.
+  class OffsetBits : public BitField<uint32_t, 0, kPageSizeBits> {};
+  class ChunkIndexBits
+      : public BitField<uint32_t, kPageSizeBits, 32 - kPageSizeBits> {};
+
  protected:
   // Where the pointed-to object can be found:
   enum Where {
@@ -248,13 +253,18 @@ class Deserializer: public SerializerDeserializer {
   // Deserialize the snapshot into an empty heap.
   void Deserialize(Isolate* isolate);
 
+  enum OnOOM { FATAL_ON_OOM, NULL_ON_OOM };
+
   // Deserialize a single object and the objects reachable from it.
-  void DeserializePartial(Isolate* isolate, Object** root);
+  // We may want to abort gracefully even if deserialization fails.
+  void DeserializePartial(Isolate* isolate, Object** root,
+                          OnOOM on_oom = FATAL_ON_OOM);
 
-  void set_reservation(int space_number, int reservation) {
-    DCHECK(space_number >= 0);
-    DCHECK(space_number < kNumberOfSpaces);
-    reservations_[space_number] = reservation;
+  void AddReservation(int space, uint32_t chunk) {
+    DCHECK(space >= 0);
+    DCHECK(space < kNumberOfSpaces);
+    DCHECK(space == LO_SPACE || chunk < Page::kMaxRegularHeapObjectSize);
+    reservations_[space].Add({chunk, NULL, NULL});
   }
 
   void FlushICacheForNewCodeObjects();
@@ -274,6 +284,8 @@ class Deserializer: public SerializerDeserializer {
     UNREACHABLE();
   }
 
+  bool ReserveSpace();
+
   // Allocation sites are present in the snapshot, and must be linked into
   // a list at deserialization time.
   void RelinkAllocationSite(AllocationSite* site);
@@ -283,8 +295,8 @@ class Deserializer: public SerializerDeserializer {
   // of the object we are writing into, or NULL if we are not writing into an
   // object, i.e. if we are writing a series of tagged values that are not on
   // the heap.
-  void ReadChunk(
-      Object** start, Object** end, int space, Address object_address);
+  void ReadData(Object** start, Object** end, int space,
+                Address object_address);
   void ReadObject(int space_number, Object** write_back);
   Address Allocate(int space_index, int size);
 
@@ -293,13 +305,20 @@ class Deserializer: public SerializerDeserializer {
   Object* ProcessBackRefInSerializedCode(Object* obj);
 
   // This returns the address of an object that has been described in the
-  // snapshot as being offset bytes back in a particular space.
-  HeapObject* GetAddressFromEnd(int space) {
-    int offset = source_->GetInt();
-    if (space == LO_SPACE) return deserialized_large_objects_[offset];
-    DCHECK(space < kNumberOfPreallocatedSpaces);
-    offset <<= kObjectAlignmentBits;
-    return HeapObject::FromAddress(high_water_[space] - offset);
+  // snapshot by chunk index and offset.
+  HeapObject* GetBackReferencedObject(int space) {
+    if (space == LO_SPACE) {
+      uint32_t index = source_->GetInt();
+      return deserialized_large_objects_[index];
+    } else {
+      uint32_t allocation = source_->GetInt() << kObjectAlignmentBits;
+      DCHECK(space < kNumberOfPreallocatedSpaces);
+      uint32_t chunk_index = ChunkIndexBits::decode(allocation);
+      uint32_t offset = OffsetBits::decode(allocation);
+      DCHECK_LE(chunk_index, current_chunk_[space]);
+      return HeapObject::FromAddress(reservations_[space][chunk_index].start +
+                                     offset);
+    }
   }
 
   // Cached current isolate.
@@ -309,13 +328,14 @@ class Deserializer: public SerializerDeserializer {
   Vector<Handle<Object> >* attached_objects_;
 
   SnapshotByteSource* source_;
-  // This is the address of the next object that will be allocated in each
-  // space.  It is used to calculate the addresses of back-references.
+  // The address of the next object that will be allocated in each space.
+  // Each space has a number of chunks reserved by the GC, with each chunk
+  // fitting into a page. Deserialized objects are allocated into the
+  // current chunk of the target space by bumping up high water mark.
+  Heap::Reservation reservations_[kNumberOfSpaces];
+  uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
   Address high_water_[kNumberOfPreallocatedSpaces];
 
-  int reservations_[kNumberOfSpaces];
-  static const intptr_t kUninitializedReservation = -1;
-
   ExternalReferenceDecoder* external_reference_decoder_;
 
   List<HeapObject*> deserialized_large_objects_;
@@ -380,11 +400,13 @@ class Serializer : public SerializerDeserializer {
   Serializer(Isolate* isolate, SnapshotByteSink* sink);
   ~Serializer();
   void VisitPointers(Object** start, Object** end);
-  // You can call this after serialization to find out how much space was used
-  // in each space.
-  int CurrentAllocationAddress(int space) const {
-    DCHECK(space < kNumberOfSpaces);
-    return fullness_[space];
+
+  void FinalizeAllocation();
+
+  Vector<const uint32_t> FinalAllocationChunks(int space) const {
+    DCHECK_EQ(1, completed_chunks_[LO_SPACE].length());  // Already finalized.
+    DCHECK_EQ(0, pending_chunk_[space]);                 // No pending chunks.
+    return completed_chunks_[space].ToConstVector();
   }
 
   Isolate* isolate() const { return isolate_; }
@@ -470,8 +492,8 @@ class Serializer : public SerializerDeserializer {
   void InitializeAllocators();
   // This will return the space for an object.
   static int SpaceOfObject(HeapObject* object);
-  int AllocateLargeObject(int size);
-  int Allocate(int space, int size);
+  uint32_t AllocateLargeObject(int size);
+  uint32_t Allocate(int space, int size);
   int EncodeExternalReference(Address addr) {
     return external_reference_encoder_->Encode(addr);
   }
@@ -483,9 +505,14 @@ class Serializer : public SerializerDeserializer {
   bool ShouldBeSkipped(Object** current);
 
   Isolate* isolate_;
-  // Keep track of the fullness of each space in order to generate
-  // relative addresses for back references.
-  int fullness_[kNumberOfSpaces];
+
+  // Objects from the same space are put into chunks for bulk-allocation
+  // when deserializing. We have to make sure that each chunk fits into a
+  // page. So we track the chunk size in pending_chunk_ of a space, but
+  // when it exceeds a page, we complete the current chunk and start a new one.
+  uint32_t pending_chunk_[kNumberOfSpaces];
+  List<uint32_t> completed_chunks_[kNumberOfSpaces];
+
   SnapshotByteSink* sink_;
   ExternalReferenceEncoder* external_reference_encoder_;
 
@@ -503,7 +530,7 @@ class Serializer : public SerializerDeserializer {
  private:
   CodeAddressMap* code_address_map_;
   // We map serialized large objects to indexes for back-referencing.
-  int seen_large_objects_index_;
+  uint32_t seen_large_objects_index_;
   DISALLOW_COPY_AND_ASSIGN(Serializer);
 };
 
@@ -585,9 +612,8 @@ class CodeSerializer : public Serializer {
                                Handle<SharedFunctionInfo> info,
                                Handle<String> source);
 
-  static Handle<SharedFunctionInfo> Deserialize(Isolate* isolate,
-                                                ScriptData* data,
-                                                Handle<String> source);
+  MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
+      Isolate* isolate, ScriptData* data, Handle<String> source);
 
   static const int kSourceObjectIndex = 0;
   static const int kCodeStubsBaseIndex = 1;
@@ -654,15 +680,35 @@ class SerializedCodeData {
     return result;
   }
 
+  class Reservation {
+   public:
+    uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation); }
+    bool is_last_chunk() const { return IsLastChunkBits::decode(reservation); }
+
+   private:
+    uint32_t reservation;
+
+    DISALLOW_COPY_AND_ASSIGN(Reservation);
+  };
+
+  Vector<const Reservation> Reservations() const {
+    return Vector<const Reservation>(reinterpret_cast<const Reservation*>(
+                                         script_data_->data() + kHeaderSize),
+                                     GetHeaderValue(kReservationsOffset));
+  }
+
   Vector<const uint32_t> CodeStubKeys() const {
-    return Vector<const uint32_t>(
-        reinterpret_cast<const uint32_t*>(script_data_->data() + kHeaderSize),
-        GetHeaderValue(kNumCodeStubKeysOffset));
+    int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
+    const byte* start = script_data_->data() + kHeaderSize + reservations_size;
+    return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
+                                  GetHeaderValue(kNumCodeStubKeysOffset));
   }
 
   const byte* Payload() const {
+    int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
     int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
-    return script_data_->data() + kHeaderSize + code_stubs_size;
+    return script_data_->data() + kHeaderSize + reservations_size +
+           code_stubs_size;
   }
 
   int PayloadLength() const {
@@ -672,10 +718,6 @@ class SerializedCodeData {
     return payload_length;
   }
 
-  int GetReservation(int space) const {
-    return GetHeaderValue(kReservationsOffset + space);
-  }
-
  private:
   void SetHeaderValue(int offset, int value) {
     reinterpret_cast<int*>(const_cast<byte*>(script_data_->data()))[offset] =
@@ -696,13 +738,13 @@ class SerializedCodeData {
   // [2] payload length
   // [3..9] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE.
   static const int kCheckSumOffset = 0;
-  static const int kNumCodeStubKeysOffset = 1;
-  static const int kPayloadLengthOffset = 2;
-  static const int kReservationsOffset = 3;
+  static const int kReservationsOffset = 1;
+  static const int kNumCodeStubKeysOffset = 2;
+  static const int kPayloadLengthOffset = 3;
+  static const int kHeaderSize = (kPayloadLengthOffset + 1) * kIntSize;
 
-  static const int kHeaderEntries =
-      kReservationsOffset + SerializerDeserializer::kNumberOfSpaces;
-  static const int kHeaderSize = kHeaderEntries * kIntSize;
+  class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
+  class IsLastChunkBits : public BitField<bool, 31, 1> {};
 
   // Following the header, we store, in sequential order
   // - code stub keys
index 4e90ce1..b152ad3 100644 (file)
@@ -15,14 +15,14 @@ namespace v8 {
 namespace internal {
 
 void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
-  deserializer->set_reservation(NEW_SPACE, new_space_used_);
-  deserializer->set_reservation(OLD_POINTER_SPACE, pointer_space_used_);
-  deserializer->set_reservation(OLD_DATA_SPACE, data_space_used_);
-  deserializer->set_reservation(CODE_SPACE, code_space_used_);
-  deserializer->set_reservation(MAP_SPACE, map_space_used_);
-  deserializer->set_reservation(CELL_SPACE, cell_space_used_);
-  deserializer->set_reservation(PROPERTY_CELL_SPACE, property_cell_space_used_);
-  deserializer->set_reservation(LO_SPACE, lo_space_used_);
+  deserializer->AddReservation(NEW_SPACE, new_space_used_);
+  deserializer->AddReservation(OLD_POINTER_SPACE, pointer_space_used_);
+  deserializer->AddReservation(OLD_DATA_SPACE, data_space_used_);
+  deserializer->AddReservation(CODE_SPACE, code_space_used_);
+  deserializer->AddReservation(MAP_SPACE, map_space_used_);
+  deserializer->AddReservation(CELL_SPACE, cell_space_used_);
+  deserializer->AddReservation(PROPERTY_CELL_SPACE, property_cell_space_used_);
+  deserializer->AddReservation(LO_SPACE, lo_space_used_);
 }
 
 
@@ -59,15 +59,15 @@ Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
                             context_raw_size_);
   Deserializer deserializer(&source);
   Object* root;
-  deserializer.set_reservation(NEW_SPACE, context_new_space_used_);
-  deserializer.set_reservation(OLD_POINTER_SPACE, context_pointer_space_used_);
-  deserializer.set_reservation(OLD_DATA_SPACE, context_data_space_used_);
-  deserializer.set_reservation(CODE_SPACE, context_code_space_used_);
-  deserializer.set_reservation(MAP_SPACE, context_map_space_used_);
-  deserializer.set_reservation(CELL_SPACE, context_cell_space_used_);
-  deserializer.set_reservation(PROPERTY_CELL_SPACE,
-                               context_property_cell_space_used_);
-  deserializer.set_reservation(LO_SPACE, context_lo_space_used_);
+  deserializer.AddReservation(NEW_SPACE, context_new_space_used_);
+  deserializer.AddReservation(OLD_POINTER_SPACE, context_pointer_space_used_);
+  deserializer.AddReservation(OLD_DATA_SPACE, context_data_space_used_);
+  deserializer.AddReservation(CODE_SPACE, context_code_space_used_);
+  deserializer.AddReservation(MAP_SPACE, context_map_space_used_);
+  deserializer.AddReservation(CELL_SPACE, context_cell_space_used_);
+  deserializer.AddReservation(PROPERTY_CELL_SPACE,
+                              context_property_cell_space_used_);
+  deserializer.AddReservation(LO_SPACE, context_lo_space_used_);
   deserializer.DeserializePartial(isolate, &root);
   CHECK(root->IsContext());
   return Handle<Context>(Context::cast(root));
index 9b8bc1b..838e93f 100644 (file)
@@ -58,17 +58,16 @@ bool Snapshot::Initialize(Isolate* isolate) {
   }
   SnapshotByteSource source(snapshot_impl_->data, snapshot_impl_->size);
   Deserializer deserializer(&source);
-  deserializer.set_reservation(NEW_SPACE, snapshot_impl_->new_space_used);
-  deserializer.set_reservation(OLD_POINTER_SPACE,
-                               snapshot_impl_->pointer_space_used);
-  deserializer.set_reservation(OLD_DATA_SPACE,
-                               snapshot_impl_->data_space_used);
-  deserializer.set_reservation(CODE_SPACE, snapshot_impl_->code_space_used);
-  deserializer.set_reservation(MAP_SPACE, snapshot_impl_->map_space_used);
-  deserializer.set_reservation(CELL_SPACE, snapshot_impl_->cell_space_used);
-  deserializer.set_reservation(PROPERTY_CELL_SPACE,
-                               snapshot_impl_->property_cell_space_used);
-  deserializer.set_reservation(LO_SPACE, snapshot_impl_->lo_space_used);
+  deserializer.AddReservation(NEW_SPACE, snapshot_impl_->new_space_used);
+  deserializer.AddReservation(OLD_POINTER_SPACE,
+                              snapshot_impl_->pointer_space_used);
+  deserializer.AddReservation(OLD_DATA_SPACE, snapshot_impl_->data_space_used);
+  deserializer.AddReservation(CODE_SPACE, snapshot_impl_->code_space_used);
+  deserializer.AddReservation(MAP_SPACE, snapshot_impl_->map_space_used);
+  deserializer.AddReservation(CELL_SPACE, snapshot_impl_->cell_space_used);
+  deserializer.AddReservation(PROPERTY_CELL_SPACE,
+                              snapshot_impl_->property_cell_space_used);
+  deserializer.AddReservation(LO_SPACE, snapshot_impl_->lo_space_used);
   bool success = isolate->Init(&deserializer);
   if (FLAG_profile_deserialization) {
     double ms = timer.Elapsed().InMillisecondsF();
@@ -85,22 +84,21 @@ Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
   SnapshotByteSource source(snapshot_impl_->context_data,
                             snapshot_impl_->context_size);
   Deserializer deserializer(&source);
-  deserializer.set_reservation(NEW_SPACE,
-                               snapshot_impl_->context_new_space_used);
-  deserializer.set_reservation(OLD_POINTER_SPACE,
-                               snapshot_impl_->context_pointer_space_used);
-  deserializer.set_reservation(OLD_DATA_SPACE,
-                               snapshot_impl_->context_data_space_used);
-  deserializer.set_reservation(CODE_SPACE,
-                               snapshot_impl_->context_code_space_used);
-  deserializer.set_reservation(MAP_SPACE,
-                               snapshot_impl_->context_map_space_used);
-  deserializer.set_reservation(CELL_SPACE,
-                               snapshot_impl_->context_cell_space_used);
-  deserializer.set_reservation(PROPERTY_CELL_SPACE,
-                               snapshot_impl_->
-                                   context_property_cell_space_used);
-  deserializer.set_reservation(LO_SPACE, snapshot_impl_->context_lo_space_used);
+  deserializer.AddReservation(NEW_SPACE,
+                              snapshot_impl_->context_new_space_used);
+  deserializer.AddReservation(OLD_POINTER_SPACE,
+                              snapshot_impl_->context_pointer_space_used);
+  deserializer.AddReservation(OLD_DATA_SPACE,
+                              snapshot_impl_->context_data_space_used);
+  deserializer.AddReservation(CODE_SPACE,
+                              snapshot_impl_->context_code_space_used);
+  deserializer.AddReservation(MAP_SPACE,
+                              snapshot_impl_->context_map_space_used);
+  deserializer.AddReservation(CELL_SPACE,
+                              snapshot_impl_->context_cell_space_used);
+  deserializer.AddReservation(PROPERTY_CELL_SPACE,
+                              snapshot_impl_->context_property_cell_space_used);
+  deserializer.AddReservation(LO_SPACE, snapshot_impl_->context_lo_space_used);
   Object* root;
   deserializer.DeserializePartial(isolate, &root);
   CHECK(root->IsContext());
index d3ba775..a4fdb10 100644 (file)
@@ -58,6 +58,10 @@ class Vector {
 
   T& last() { return start_[length_ - 1]; }
 
+  typedef T* iterator;
+  inline iterator begin() const { return &start_[0]; }
+  inline iterator end() const { return &start_[length_]; }
+
   // Returns a clone of this vector with a new backing store.
   Vector<T> Clone() const {
     T* result = NewArray<T>(length_);
index 8ba5c79..15c587b 100644 (file)
@@ -137,10 +137,7 @@ class FileByteSink : public SnapshotByteSink {
   virtual int Position() {
     return ftell(fp_);
   }
-  void WriteSpaceUsed(int new_space_used, int pointer_space_used,
-                      int data_space_used, int code_space_used,
-                      int map_space_used, int cell_space_used,
-                      int property_cell_space_used, int lo_space_used);
+  void WriteSpaceUsed(Serializer* serializer);
 
  private:
   FILE* fp_;
@@ -148,24 +145,37 @@ class FileByteSink : public SnapshotByteSink {
 };
 
 
-void FileByteSink::WriteSpaceUsed(int new_space_used, int pointer_space_used,
-                                  int data_space_used, int code_space_used,
-                                  int map_space_used, int cell_space_used,
-                                  int property_cell_space_used,
-                                  int lo_space_used) {
+void FileByteSink::WriteSpaceUsed(Serializer* ser) {
   int file_name_length = StrLength(file_name_) + 10;
   Vector<char> name = Vector<char>::New(file_name_length + 1);
   SNPrintF(name, "%s.size", file_name_);
   FILE* fp = v8::base::OS::FOpen(name.start(), "w");
   name.Dispose();
-  fprintf(fp, "new %d\n", new_space_used);
-  fprintf(fp, "pointer %d\n", pointer_space_used);
-  fprintf(fp, "data %d\n", data_space_used);
-  fprintf(fp, "code %d\n", code_space_used);
-  fprintf(fp, "map %d\n", map_space_used);
-  fprintf(fp, "cell %d\n", cell_space_used);
-  fprintf(fp, "property cell %d\n", property_cell_space_used);
-  fprintf(fp, "lo %d\n", lo_space_used);
+
+  Vector<const uint32_t> chunks = ser->FinalAllocationChunks(NEW_SPACE);
+  CHECK_EQ(1, chunks.length());
+  fprintf(fp, "new %d\n", chunks[0]);
+  chunks = ser->FinalAllocationChunks(OLD_POINTER_SPACE);
+  CHECK_EQ(1, chunks.length());
+  fprintf(fp, "pointer %d\n", chunks[0]);
+  chunks = ser->FinalAllocationChunks(OLD_DATA_SPACE);
+  CHECK_EQ(1, chunks.length());
+  fprintf(fp, "data %d\n", chunks[0]);
+  chunks = ser->FinalAllocationChunks(CODE_SPACE);
+  CHECK_EQ(1, chunks.length());
+  fprintf(fp, "code %d\n", chunks[0]);
+  chunks = ser->FinalAllocationChunks(MAP_SPACE);
+  CHECK_EQ(1, chunks.length());
+  fprintf(fp, "map %d\n", chunks[0]);
+  chunks = ser->FinalAllocationChunks(CELL_SPACE);
+  CHECK_EQ(1, chunks.length());
+  fprintf(fp, "cell %d\n", chunks[0]);
+  chunks = ser->FinalAllocationChunks(PROPERTY_CELL_SPACE);
+  CHECK_EQ(1, chunks.length());
+  fprintf(fp, "property cell %d\n", chunks[0]);
+  chunks = ser->FinalAllocationChunks(LO_SPACE);
+  CHECK_EQ(1, chunks.length());
+  fprintf(fp, "lo %d\n", chunks[0]);
   fclose(fp);
 }
 
@@ -174,15 +184,9 @@ static bool WriteToFile(Isolate* isolate, const char* snapshot_file) {
   FileByteSink file(snapshot_file);
   StartupSerializer ser(isolate, &file);
   ser.Serialize();
+  ser.FinalizeAllocation();
 
-  file.WriteSpaceUsed(ser.CurrentAllocationAddress(NEW_SPACE),
-                      ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
-                      ser.CurrentAllocationAddress(OLD_DATA_SPACE),
-                      ser.CurrentAllocationAddress(CODE_SPACE),
-                      ser.CurrentAllocationAddress(MAP_SPACE),
-                      ser.CurrentAllocationAddress(CELL_SPACE),
-                      ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
-                      ser.CurrentAllocationAddress(LO_SPACE));
+  file.WriteSpaceUsed(&ser);
 
   return true;
 }
@@ -258,14 +262,14 @@ static void ReserveSpaceForSnapshot(Deserializer* deserializer,
 #undef fscanf
 #endif
   fclose(fp);
-  deserializer->set_reservation(NEW_SPACE, new_size);
-  deserializer->set_reservation(OLD_POINTER_SPACE, pointer_size);
-  deserializer->set_reservation(OLD_DATA_SPACE, data_size);
-  deserializer->set_reservation(CODE_SPACE, code_size);
-  deserializer->set_reservation(MAP_SPACE, map_size);
-  deserializer->set_reservation(CELL_SPACE, cell_size);
-  deserializer->set_reservation(PROPERTY_CELL_SPACE, property_cell_size);
-  deserializer->set_reservation(LO_SPACE, lo_size);
+  deserializer->AddReservation(NEW_SPACE, new_size);
+  deserializer->AddReservation(OLD_POINTER_SPACE, pointer_size);
+  deserializer->AddReservation(OLD_DATA_SPACE, data_size);
+  deserializer->AddReservation(CODE_SPACE, code_size);
+  deserializer->AddReservation(MAP_SPACE, map_size);
+  deserializer->AddReservation(CELL_SPACE, cell_size);
+  deserializer->AddReservation(PROPERTY_CELL_SPACE, property_cell_size);
+  deserializer->AddReservation(LO_SPACE, lo_size);
 }
 
 
@@ -445,25 +449,12 @@ UNINITIALIZED_TEST(PartialSerialization) {
       p_ser.Serialize(&raw_foo);
       startup_serializer.SerializeWeakReferences();
 
-      partial_sink.WriteSpaceUsed(
-          p_ser.CurrentAllocationAddress(NEW_SPACE),
-          p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
-          p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
-          p_ser.CurrentAllocationAddress(CODE_SPACE),
-          p_ser.CurrentAllocationAddress(MAP_SPACE),
-          p_ser.CurrentAllocationAddress(CELL_SPACE),
-          p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
-          p_ser.CurrentAllocationAddress(LO_SPACE));
-
-      startup_sink.WriteSpaceUsed(
-          startup_serializer.CurrentAllocationAddress(NEW_SPACE),
-          startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
-          startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
-          startup_serializer.CurrentAllocationAddress(CODE_SPACE),
-          startup_serializer.CurrentAllocationAddress(MAP_SPACE),
-          startup_serializer.CurrentAllocationAddress(CELL_SPACE),
-          startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
-          startup_serializer.CurrentAllocationAddress(LO_SPACE));
+      p_ser.FinalizeAllocation();
+      startup_serializer.FinalizeAllocation();
+
+      partial_sink.WriteSpaceUsed(&p_ser);
+
+      startup_sink.WriteSpaceUsed(&startup_serializer);
       startup_name.Dispose();
     }
     v8_isolate->Exit();
@@ -570,25 +561,12 @@ UNINITIALIZED_TEST(ContextSerialization) {
       p_ser.Serialize(&raw_context);
       startup_serializer.SerializeWeakReferences();
 
-      partial_sink.WriteSpaceUsed(
-          p_ser.CurrentAllocationAddress(NEW_SPACE),
-          p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
-          p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
-          p_ser.CurrentAllocationAddress(CODE_SPACE),
-          p_ser.CurrentAllocationAddress(MAP_SPACE),
-          p_ser.CurrentAllocationAddress(CELL_SPACE),
-          p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
-          p_ser.CurrentAllocationAddress(LO_SPACE));
-
-      startup_sink.WriteSpaceUsed(
-          startup_serializer.CurrentAllocationAddress(NEW_SPACE),
-          startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
-          startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
-          startup_serializer.CurrentAllocationAddress(CODE_SPACE),
-          startup_serializer.CurrentAllocationAddress(MAP_SPACE),
-          startup_serializer.CurrentAllocationAddress(CELL_SPACE),
-          startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE),
-          startup_serializer.CurrentAllocationAddress(LO_SPACE));
+      p_ser.FinalizeAllocation();
+      startup_serializer.FinalizeAllocation();
+
+      partial_sink.WriteSpaceUsed(&p_ser);
+
+      startup_sink.WriteSpaceUsed(&startup_serializer);
       startup_name.Dispose();
     }
     v8_isolate->Dispose();
@@ -901,6 +879,78 @@ TEST(SerializeToplevelLargeString) {
 }
 
 
+TEST(SerializeToplevelThreeBigStrings) {
+  FLAG_serialize_toplevel = true;
+  LocalContext context;
+  Isolate* isolate = CcTest::i_isolate();
+  Factory* f = isolate->factory();
+  isolate->compilation_cache()->Disable();  // Disable same-isolate code cache.
+
+  v8::HandleScope scope(CcTest::isolate());
+
+  Vector<const uint8_t> source_a =
+      ConstructSource(STATIC_CHAR_VECTOR("var a = \""), STATIC_CHAR_VECTOR("a"),
+                      STATIC_CHAR_VECTOR("\";"), 700000);
+  Handle<String> source_a_str =
+      f->NewStringFromOneByte(source_a).ToHandleChecked();
+
+  Vector<const uint8_t> source_b =
+      ConstructSource(STATIC_CHAR_VECTOR("var b = \""), STATIC_CHAR_VECTOR("b"),
+                      STATIC_CHAR_VECTOR("\";"), 600000);
+  Handle<String> source_b_str =
+      f->NewStringFromOneByte(source_b).ToHandleChecked();
+
+  Vector<const uint8_t> source_c =
+      ConstructSource(STATIC_CHAR_VECTOR("var c = \""), STATIC_CHAR_VECTOR("c"),
+                      STATIC_CHAR_VECTOR("\";"), 500000);
+  Handle<String> source_c_str =
+      f->NewStringFromOneByte(source_c).ToHandleChecked();
+
+  Handle<String> source_str =
+      f->NewConsString(
+             f->NewConsString(source_a_str, source_b_str).ToHandleChecked(),
+             source_c_str).ToHandleChecked();
+
+  Handle<JSObject> global(isolate->context()->global_object());
+  ScriptData* cache = NULL;
+
+  Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
+      source_str, Handle<String>(), 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, &cache,
+      v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+
+  Handle<SharedFunctionInfo> copy;
+  {
+    DisallowCompilation no_compile_expected(isolate);
+    copy = Compiler::CompileScript(
+        source_str, Handle<String>(), 0, 0, false,
+        Handle<Context>(isolate->native_context()), NULL, &cache,
+        v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+  }
+  CHECK_NE(*orig, *copy);
+
+  Handle<JSFunction> copy_fun =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          copy, isolate->native_context());
+
+  Execution::Call(isolate, copy_fun, global, 0, NULL);
+
+  CHECK_EQ(600000 + 700000, CompileRun("(a + b).length")->Int32Value());
+  CHECK_EQ(500000 + 600000, CompileRun("(b + c).length")->Int32Value());
+  Heap* heap = isolate->heap();
+  CHECK(heap->InSpace(*v8::Utils::OpenHandle(*CompileRun("a")->ToString()),
+                      OLD_DATA_SPACE));
+  CHECK(heap->InSpace(*v8::Utils::OpenHandle(*CompileRun("b")->ToString()),
+                      OLD_DATA_SPACE));
+  CHECK(heap->InSpace(*v8::Utils::OpenHandle(*CompileRun("c")->ToString()),
+                      OLD_DATA_SPACE));
+
+  delete cache;
+  source_a.Dispose();
+  source_b.Dispose();
+}
+
+
 class SerializerOneByteResource
     : public v8::String::ExternalOneByteStringResource {
  public: