void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
+ IterateWeakRoots(v, mode);
+}
+
+
+void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
v->Synchronize("symbol_table");
if (mode != VISIT_ALL_IN_SCAVENGE) {
// Iterate over pointers being held by inactive threads.
ThreadManager::Iterate(v);
v->Synchronize("threadmanager");
+
+ // Iterate over the pointers the Serialization/Deserialization code is
+ // holding.
+ // During garbage collection this keeps the partial snapshot cache alive.
+ // During deserialization of the startup snapshot this creates the partial
+ // snapshot cache and deserializes the objects it refers to. During
+ // serialization this does nothing, since the partial snapshot cache is
+ // empty. However the next thing we do is create the partial snapshot,
+ // filling up the partial snapshot cache with objects it needs as we go.
+ SerializerDeserializer::Iterate(v);
+ // We don't do a v->Synchronize call here, because in debug mode that will
+ // output a flag to the snapshot. However at this point the serializer and
+ // deserializer are deliberately a little unsynchronized (see above) so the
+ // checking of the sync flag in the snapshot would fail.
}
static void IterateRoots(ObjectVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
static void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ // Iterates over all the other roots in the heap.
+ static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
// Iterates remembered set of an old space.
static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
}
context.Dispose();
CppByteSink sink(argv[1]);
- i::Serializer ser(&sink);
// This results in a somewhat smaller snapshot, probably because it gets rid
// of some things that are cached between garbage collections.
i::Heap::CollectAllGarbage(true);
+ i::StartupSerializer ser(&sink);
ser.Serialize();
return 0;
}
namespace v8 {
namespace internal {
-// Mapping objects to their location after deserialization.
-// This is used during building, but not at runtime by V8.
-class SerializationAddressMapper {
- public:
- static bool IsMapped(HeapObject* obj) {
- EnsureMapExists();
- return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
- }
-
- static int MappedTo(HeapObject* obj) {
- ASSERT(IsMapped(obj));
- return static_cast<int>(reinterpret_cast<intptr_t>(
- serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
- }
-
- static void Map(HeapObject* obj, int to) {
- EnsureMapExists();
- ASSERT(!IsMapped(obj));
- HashMap::Entry* entry =
- serialization_map_->Lookup(Key(obj), Hash(obj), true);
- entry->value = Value(to);
- }
-
- static void Zap() {
- if (serialization_map_ != NULL) {
- delete serialization_map_;
- }
- serialization_map_ = NULL;
- }
-
- private:
- static bool SerializationMatchFun(void* key1, void* key2) {
- return key1 == key2;
- }
-
- static uint32_t Hash(HeapObject* obj) {
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
- }
-
- static void* Key(HeapObject* obj) {
- return reinterpret_cast<void*>(obj->address());
- }
-
- static void* Value(int v) {
- return reinterpret_cast<void*>(v);
- }
-
- static void EnsureMapExists() {
- if (serialization_map_ == NULL) {
- serialization_map_ = new HashMap(&SerializationMatchFun);
- }
- }
-
- static HashMap* serialization_map_;
-};
-
-
-HashMap* SerializationAddressMapper::serialization_map_ = NULL;
-
-
-
// -----------------------------------------------------------------------------
// Coding of external references.
ASSERT_EQ(NULL, ThreadState::FirstInUse());
// No active handles.
ASSERT(HandleScopeImplementer::instance()->blocks()->is_empty());
+ // Make sure the entire partial snapshot cache is traversed, filling it with
+ // valid object pointers.
+ partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
ASSERT_EQ(NULL, external_reference_decoder_);
external_reference_decoder_ = new ExternalReferenceDecoder();
- Heap::IterateRoots(this, VISIT_ONLY_STRONG);
- ASSERT(source_->AtEOF());
+ Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
+ Heap::IterateWeakRoots(this, VISIT_ALL);
}
}
-void Deserializer::TearDown() {
+Deserializer::~Deserializer() {
+ ASSERT(source_->AtEOF());
if (external_reference_decoder_ != NULL) {
delete external_reference_decoder_;
external_reference_decoder_ = NULL;
*current++ = Heap::roots_address()[root_id];
break;
}
+ case PARTIAL_SNAPSHOT_CACHE_ENTRY: {
+ int cache_index = source_->GetInt();
+ *current++ = partial_snapshot_cache_[cache_index];
+ break;
+ }
+ case SYNCHRONIZE: {
+ // If we get here then that indicates that you have a mismatch between
+ // the number of GC roots when serializing and deserializing.
+ UNREACHABLE();
+ }
default:
UNREACHABLE();
}
: sink_(sink),
current_root_index_(0),
external_reference_encoder_(NULL),
- partial_(false),
large_object_total_(0) {
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
-void Serializer::Serialize() {
+void StartupSerializer::SerializeStrongReferences() {
// No active threads.
CHECK_EQ(NULL, ThreadState::FirstInUse());
// No active or weak handles.
CHECK_NE(v8::INSTALLED, ext->state());
}
external_reference_encoder_ = new ExternalReferenceEncoder();
- Heap::IterateRoots(this, VISIT_ONLY_STRONG);
+ Heap::IterateStrongRoots(this, VISIT_ONLY_STRONG);
delete external_reference_encoder_;
external_reference_encoder_ = NULL;
- SerializationAddressMapper::Zap();
}
-void Serializer::SerializePartial(Object** object) {
- partial_ = true;
+void PartialSerializer::Serialize(Object** object) {
external_reference_encoder_ = new ExternalReferenceEncoder();
this->VisitPointer(object);
+
+ // After we have done the partial serialization the partial snapshot cache
+ // will contain some references needed to decode the partial snapshot. We
+ // fill it up with undefineds so it has a predictable length so the
+ // deserialization code doesn't need to know the length.
+ for (int index = partial_snapshot_cache_length_;
+ index < kPartialSnapshotCacheCapacity;
+ index++) {
+ partial_snapshot_cache_[index] = Heap::undefined_value();
+ startup_serializer_->VisitPointer(&partial_snapshot_cache_[index]);
+ }
+ partial_snapshot_cache_length_ = kPartialSnapshotCacheCapacity;
+
delete external_reference_encoder_;
external_reference_encoder_ = NULL;
- SerializationAddressMapper::Zap();
}
}
-int Serializer::RootIndex(HeapObject* heap_object) {
+Object* SerializerDeserializer::partial_snapshot_cache_[
+ kPartialSnapshotCacheCapacity];
+int SerializerDeserializer::partial_snapshot_cache_length_ = 0;
+
+
+// This ensures that the partial snapshot cache keeps things alive during GC and
+// tracks their movement. When it is called during serialization of the startup
+// snapshot the partial snapshot is empty, so nothing happens. When the partial
+// (context) snapshot is created, this array is populated with the pointers that
+// the partial snapshot will need. As that happens we emit serialized objects to
+// the startup snapshot that correspond to the elements of this cache array. On
+// deserialization we therefore need to visit the cache array. This fills it up
+// with pointers to deserialized objects.
+void SerializerDeserializer::Iterate(ObjectVisitor *visitor) {
+ visitor->VisitPointers(
+ &partial_snapshot_cache_[0],
+ &partial_snapshot_cache_[partial_snapshot_cache_length_]);
+}
+
+
+// When deserializing we need to set the size of the snapshot cache. This means
+// the root iteration code (above) will iterate over array elements, writing the
+// references to deserialized objects in them.
+void SerializerDeserializer::SetSnapshotCacheSize(int size) {
+ partial_snapshot_cache_length_ = size;
+}
+
+
+int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
+ for (int i = 0; i < partial_snapshot_cache_length_; i++) {
+ Object* entry = partial_snapshot_cache_[i];
+ if (entry == heap_object) return i;
+ }
+ // We didn't find the object in the cache. So we add it to the cache and
+ // then visit the pointer so that it becomes part of the startup snapshot
+ // and we can refer to it from the partial snapshot.
+ int length = partial_snapshot_cache_length_;
+ CHECK(length < kPartialSnapshotCacheCapacity);
+ partial_snapshot_cache_[length] = heap_object;
+ startup_serializer_->VisitPointer(&partial_snapshot_cache_[length]);
+ // We don't recurse from the startup snapshot generator into the partial
+ // snapshot generator.
+ ASSERT(length == partial_snapshot_cache_length_);
+ return partial_snapshot_cache_length_++;
+}
+
+
+int PartialSerializer::RootIndex(HeapObject* heap_object) {
for (int i = 0; i < Heap::kRootListLength; i++) {
Object* root = Heap::roots_address()[i];
if (root == heap_object) return i;
}
-void Serializer::SerializeObject(
- Object* o,
+// Encode the location of an already deserialized object in order to write its
+// location into a later object. We can encode the location as an offset from
+// the start of the deserialized objects or as an offset backwards from the
+// current allocation pointer.
+void Serializer::SerializeReferenceToPreviousObject(
+ int space,
+ int address,
ReferenceRepresentation reference_representation) {
- CHECK(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
- if (partial_) {
- int root_index = RootIndex(heap_object);
- if (root_index != kInvalidRootIndex) {
- sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
- sink_->PutInt(root_index, "root_index");
- return;
+ int offset = CurrentAllocationAddress(space) - address;
+ bool from_start = true;
+ if (SpaceIsPaged(space)) {
+ // For paged space it is simple to encode back from current allocation if
+ // the object is on the same page as the current allocation pointer.
+ if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
+ (address >> kPageSizeBits)) {
+ from_start = false;
+ address = offset;
}
- // All the symbols that the snapshot needs should be in the root table.
- ASSERT(!heap_object->IsSymbol());
- }
- if (SerializationAddressMapper::IsMapped(heap_object)) {
- int space = SpaceOfAlreadySerializedObject(heap_object);
- int address = SerializationAddressMapper::MappedTo(heap_object);
- int offset = CurrentAllocationAddress(space) - address;
- bool from_start = true;
- if (SpaceIsPaged(space)) {
- if ((CurrentAllocationAddress(space) >> kPageSizeBits) ==
- (address >> kPageSizeBits)) {
- from_start = false;
- address = offset;
- }
- } else if (space == NEW_SPACE) {
- if (offset < address) {
- from_start = false;
- address = offset;
- }
+ } else if (space == NEW_SPACE) {
+ // For new space it is always simple to encode back from current allocation.
+ if (offset < address) {
+ from_start = false;
+ address = offset;
}
- // If we are actually dealing with real offsets (and not a numbering of
- // all objects) then we should shift out the bits that are always 0.
- if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
- if (reference_representation == CODE_TARGET_REPRESENTATION) {
- if (from_start) {
- sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
- sink_->PutInt(address, "address");
- } else {
- sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
- sink_->PutInt(address, "address");
- }
+ }
+ // If we are actually dealing with real offsets (and not a numbering of
+ // all objects) then we should shift out the bits that are always 0.
+ if (!SpaceIsLarge(space)) address >>= kObjectAlignmentBits;
+ // On some architectures references between code objects are encoded
+ // specially (as relative offsets). Such references have their own
+ // special tags to simplify the deserializer.
+ if (reference_representation == CODE_TARGET_REPRESENTATION) {
+ if (from_start) {
+ sink_->Put(CODE_REFERENCE_SERIALIZATION + space, "RefCodeSer");
+ sink_->PutInt(address, "address");
} else {
- CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
- if (from_start) {
-#define COMMON_REFS_CASE(tag, common_space, common_offset) \
- if (space == common_space && address == common_offset) { \
- sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \
- } else /* NOLINT */
- COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
+ sink_->Put(CODE_BACKREF_SERIALIZATION + space, "BackRefCodeSer");
+ sink_->PutInt(address, "address");
+ }
+ } else {
+ // Regular absolute references.
+ CHECK_EQ(TAGGED_REPRESENTATION, reference_representation);
+ if (from_start) {
+ // There are some common offsets that have their own specialized encoding.
+#define COMMON_REFS_CASE(tag, common_space, common_offset) \
+ if (space == common_space && address == common_offset) { \
+ sink_->PutSection(tag + REFERENCE_SERIALIZATION, "RefSer"); \
+ } else /* NOLINT */
+ COMMON_REFERENCE_PATTERNS(COMMON_REFS_CASE)
#undef COMMON_REFS_CASE
- { /* NOLINT */
- sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
- sink_->PutInt(address, "address");
- }
- } else {
- sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
+ { /* NOLINT */
+ sink_->Put(REFERENCE_SERIALIZATION + space, "RefSer");
sink_->PutInt(address, "address");
}
+ } else {
+ sink_->Put(BACKREF_SERIALIZATION + space, "BackRefSer");
+ sink_->PutInt(address, "address");
}
+ }
+}
+
+
+void StartupSerializer::SerializeObject(
+ Object* o,
+ ReferenceRepresentation reference_representation) {
+ CHECK(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+
+ if (address_mapper_.IsMapped(heap_object)) {
+ int space = SpaceOfAlreadySerializedObject(heap_object);
+ int address = address_mapper_.MappedTo(heap_object);
+ SerializeReferenceToPreviousObject(space,
+ address,
+ reference_representation);
+ } else {
+ // Object has not yet been serialized. Serialize it here.
+ ObjectSerializer object_serializer(this,
+ heap_object,
+ sink_,
+ reference_representation);
+ object_serializer.Serialize();
+ }
+}
+
+
+void StartupSerializer::SerializeWeakReferences() {
+ for (int i = partial_snapshot_cache_length_;
+ i < kPartialSnapshotCacheCapacity;
+ i++) {
+ sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+ sink_->PutInt(Heap::kUndefinedValueRootIndex, "root_index");
+ }
+ Heap::IterateWeakRoots(this, VISIT_ALL);
+}
+
+
+void PartialSerializer::SerializeObject(
+ Object* o,
+ ReferenceRepresentation reference_representation) {
+ CHECK(o->IsHeapObject());
+ HeapObject* heap_object = HeapObject::cast(o);
+
+ int root_index;
+ if ((root_index = RootIndex(heap_object)) != kInvalidRootIndex) {
+ sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+ sink_->PutInt(root_index, "root_index");
+ return;
+ }
+
+ if (ShouldBeInThePartialSnapshotCache(heap_object)) {
+ int cache_index = PartialSnapshotCacheIndex(heap_object);
+ sink_->Put(PARTIAL_SNAPSHOT_CACHE_ENTRY, "PartialSnapshotCache");
+ sink_->PutInt(cache_index, "partial_snapshot_cache_index");
+ return;
+ }
+
+ // Pointers from the partial snapshot to the objects in the startup snapshot
+ // should go through the root array or through the partial snapshot cache.
+ // If this is not the case you may have to add something to the root array.
+ ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
+ // All the symbols that the partial snapshot needs should be either in the
+ // root table or in the partial snapshot cache.
+ ASSERT(!heap_object->IsSymbol());
+
+ if (address_mapper_.IsMapped(heap_object)) {
+ int space = SpaceOfAlreadySerializedObject(heap_object);
+ int address = address_mapper_.MappedTo(heap_object);
+ SerializeReferenceToPreviousObject(space,
+ address,
+ reference_representation);
} else {
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this,
}
-
void Serializer::ObjectSerializer::Serialize() {
int space = Serializer::SpaceOfObject(object_);
int size = object_->Size();
// Mark this object as already serialized.
bool start_new_page;
- SerializationAddressMapper::Map(
- object_,
- serializer_->Allocate(space, size, &start_new_page));
+ int offset = serializer_->Allocate(space, size, &start_new_page);
+ serializer_->address_mapper()->AddMapping(object_, offset);
if (start_new_page) {
sink_->Put(START_NEW_PAGE_SERIALIZATION, "NewPage");
sink_->PutSection(space, "NewPageSpace");
f(14, 32) \
f(15, 36)
-// The SerDes class is a common superclass for Serializer and Deserializer
-// which is used to store common constants and methods used by both.
-class SerDes: public ObjectVisitor {
+// The Serializer/Deserializer class is a common superclass for Serializer and
+// Deserializer which is used to store common constants and methods used by
+// both.
+class SerializerDeserializer: public ObjectVisitor {
+ public:
+ static void Iterate(ObjectVisitor* visitor);
+ static void SetSnapshotCacheSize(int size);
+
protected:
enum DataType {
RAW_DATA_SERIALIZATION = 0,
START_NEW_PAGE_SERIALIZATION = 37,
NATIVES_STRING_RESOURCE = 38,
ROOT_SERIALIZATION = 39,
- // Free: 40-47.
+ PARTIAL_SNAPSHOT_CACHE_ENTRY = 40,
+ // Free: 41-47.
BACKREF_SERIALIZATION = 48,
// One per space, must be kSpaceMask aligned.
// Free: 57-63.
static inline bool SpaceIsPaged(int space) {
return space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE;
}
+
+ static int partial_snapshot_cache_length_;
+ static const int kPartialSnapshotCacheCapacity = 1024;
+ static Object* partial_snapshot_cache_[];
};
// A Deserializer reads a snapshot and reconstructs the Object graph it defines.
-class Deserializer: public SerDes {
+class Deserializer: public SerializerDeserializer {
public:
// Create a deserializer from a snapshot byte source.
explicit Deserializer(SnapshotByteSource* source);
- virtual ~Deserializer() { }
+ virtual ~Deserializer();
// Deserialize the snapshot into an empty heap.
void Deserialize();
virtual void Synchronize(const char* tag);
#endif
- static void TearDown();
-
private:
virtual void VisitPointers(Object** start, Object** end);
// (In large object space we are keeping track of individual objects
// rather than pages.) In new space we just need the address of the
// first object and the others will flow from that.
- List<Address> pages_[SerDes::kNumberOfSpaces];
+ List<Address> pages_[SerializerDeserializer::kNumberOfSpaces];
SnapshotByteSource* source_;
static ExternalReferenceDecoder* external_reference_decoder_;
};
-class Serializer : public SerDes {
+// Mapping objects to their location after deserialization.
+// This is used during building, but not at runtime by V8.
+class SerializationAddressMapper {
+ public:
+ SerializationAddressMapper()
+ : serialization_map_(new HashMap(&SerializationMatchFun)),
+ no_allocation_(new AssertNoAllocation()) { }
+
+ ~SerializationAddressMapper() {
+ delete serialization_map_;
+ delete no_allocation_;
+ }
+
+ bool IsMapped(HeapObject* obj) {
+ return serialization_map_->Lookup(Key(obj), Hash(obj), false) != NULL;
+ }
+
+ int MappedTo(HeapObject* obj) {
+ ASSERT(IsMapped(obj));
+ return static_cast<int>(reinterpret_cast<intptr_t>(
+ serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
+ }
+
+ void AddMapping(HeapObject* obj, int to) {
+ ASSERT(!IsMapped(obj));
+ HashMap::Entry* entry =
+ serialization_map_->Lookup(Key(obj), Hash(obj), true);
+ entry->value = Value(to);
+ }
+
+ private:
+ static bool SerializationMatchFun(void* key1, void* key2) {
+ return key1 == key2;
+ }
+
+ static uint32_t Hash(HeapObject* obj) {
+ return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
+ }
+
+ static void* Key(HeapObject* obj) {
+ return reinterpret_cast<void*>(obj->address());
+ }
+
+ static void* Value(int v) {
+ return reinterpret_cast<void*>(v);
+ }
+
+ HashMap* serialization_map_;
+ AssertNoAllocation* no_allocation_;
+ DISALLOW_COPY_AND_ASSIGN(SerializationAddressMapper);
+};
+
+
+class Serializer : public SerializerDeserializer {
public:
explicit Serializer(SnapshotByteSink* sink);
- // Serialize the current state of the heap.
- void Serialize();
- // Serialize a single object and the objects reachable from it.
- void SerializePartial(Object** obj);
void VisitPointers(Object** start, Object** end);
// You can call this after serialization to find out how much space was used
// in each space.
// going on.
static void TooLateToEnableNow() { too_late_to_enable_now_ = true; }
static bool enabled() { return serialization_enabled_; }
+ SerializationAddressMapper* address_mapper() { return &address_mapper_; }
#ifdef DEBUG
virtual void Synchronize(const char* tag);
#endif
- private:
+ protected:
enum ReferenceRepresentation {
TAGGED_REPRESENTATION, // A tagged object reference.
CODE_TARGET_REPRESENTATION // A reference to first instruction in target.
};
+ static const int kInvalidRootIndex = -1;
+ virtual int RootIndex(HeapObject* heap_object) = 0;
+ virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) = 0;
+
class ObjectSerializer : public ObjectVisitor {
public:
ObjectSerializer(Serializer* serializer,
int bytes_processed_so_far_;
};
- void SerializeObject(Object* o, ReferenceRepresentation representation);
+ virtual void SerializeObject(Object* o,
+ ReferenceRepresentation representation) = 0;
+ void SerializeReferenceToPreviousObject(
+ int space,
+ int address,
+ ReferenceRepresentation reference_representation);
void InitializeAllocators();
// This will return the space for an object. If the object is in large
// object space it may return kLargeCode or kLargeFixedArray in order
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
}
- int RootIndex(HeapObject* heap_object);
- static const int kInvalidRootIndex = -1;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
SnapshotByteSink* sink_;
int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
- bool partial_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
static bool too_late_to_enable_now_;
int large_object_total_;
+ SerializationAddressMapper address_mapper_;
friend class ObjectSerializer;
friend class Deserializer;
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
+
+class PartialSerializer : public Serializer {
+ public:
+ PartialSerializer(Serializer* startup_snapshot_serializer,
+ SnapshotByteSink* sink)
+ : Serializer(sink),
+ startup_serializer_(startup_snapshot_serializer) {
+ }
+
+ // Serialize the objects reachable from a single object pointer.
+ virtual void Serialize(Object** o);
+ virtual void SerializeObject(Object* o,
+ ReferenceRepresentation representation);
+
+ protected:
+ virtual int RootIndex(HeapObject* o);
+ virtual int PartialSnapshotCacheIndex(HeapObject* o);
+ virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+ return o->IsString() || o->IsSharedFunctionInfo();
+ }
+
+ private:
+ Serializer* startup_serializer_;
+ DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
+};
+
+
+class StartupSerializer : public Serializer {
+ public:
+ explicit StartupSerializer(SnapshotByteSink* sink) : Serializer(sink) {
+ // Clear the cache of objects used by the partial snapshot. After the
+ // strong roots have been serialized we can create a partial snapshot
+ // which will repopulate the cache with objects neede by that partial
+ // snapshot.
+ partial_snapshot_cache_length_ = 0;
+ }
+ // Serialize the current state of the heap. The order is:
+ // 1) Strong references.
+ // 2) Partial snapshot cache.
+ // 3) Weak references (eg the symbol table).
+ virtual void SerializeStrongReferences();
+ virtual void SerializeObject(Object* o,
+ ReferenceRepresentation representation);
+ void SerializeWeakReferences();
+ void Serialize() {
+ SerializeStrongReferences();
+ SerializeWeakReferences();
+ }
+
+ private:
+ virtual int RootIndex(HeapObject* o) { return kInvalidRootIndex; }
+ virtual bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+ return false;
+ }
+};
+
} } // namespace v8::internal
#endif // V8_SERIALIZE_H_
return false;
}
-
-class FileByteSink : public SnapshotByteSink {
- public:
- explicit FileByteSink(const char* snapshot_file) {
- fp_ = OS::FOpen(snapshot_file, "wb");
- if (fp_ == NULL) {
- PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
- exit(1);
- }
- }
- virtual ~FileByteSink() {
- if (fp_ != NULL) {
- fclose(fp_);
- }
- }
- virtual void Put(int byte, const char* description) {
- if (fp_ != NULL) {
- fputc(byte, fp_);
- }
- }
- virtual int Position() {
- return ftell(fp_);
- }
-
- private:
- FILE* fp_;
-};
-
-
-bool Snapshot::WriteToFile(const char* snapshot_file) {
- FileByteSink file(snapshot_file);
- Serializer ser(&file);
- ser.Serialize();
- return true;
-}
-
-
-
} } // namespace v8::internal
Heap::TearDown();
Logger::TearDown();
- Deserializer::TearDown();
is_running_ = false;
has_been_disposed_ = true;
#include "cctest.h"
#include "spaces.h"
#include "objects.h"
+#include "natives.h"
+#include "bootstrapper.h"
using namespace v8::internal;
}
+class FileByteSink : public SnapshotByteSink {
+ public:
+ explicit FileByteSink(const char* snapshot_file) {
+ fp_ = OS::FOpen(snapshot_file, "wb");
+ file_name_ = snapshot_file;
+ if (fp_ == NULL) {
+ PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+ exit(1);
+ }
+ }
+ virtual ~FileByteSink() {
+ if (fp_ != NULL) {
+ fclose(fp_);
+ }
+ }
+ virtual void Put(int byte, const char* description) {
+ if (fp_ != NULL) {
+ fputc(byte, fp_);
+ }
+ }
+ virtual int Position() {
+ return ftell(fp_);
+ }
+ void WriteSpaceUsed(
+ int new_space_used,
+ int pointer_space_used,
+ int data_space_used,
+ int code_space_used,
+ int map_space_used,
+ int cell_space_used,
+ int large_space_used);
+
+ private:
+ FILE* fp_;
+ const char* file_name_;
+};
+
+
+void FileByteSink::WriteSpaceUsed(
+ int new_space_used,
+ int pointer_space_used,
+ int data_space_used,
+ int code_space_used,
+ int map_space_used,
+ int cell_space_used,
+ int large_space_used) {
+ int file_name_length = strlen(file_name_) + 10;
+ Vector<char> name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(name, "%s.size", file_name_);
+ FILE* fp = OS::FOpen(name.start(), "w");
+ fprintf(fp, "new %d\n", new_space_used);
+ fprintf(fp, "pointer %d\n", pointer_space_used);
+ fprintf(fp, "data %d\n", data_space_used);
+ fprintf(fp, "code %d\n", code_space_used);
+ fprintf(fp, "map %d\n", map_space_used);
+ fprintf(fp, "cell %d\n", cell_space_used);
+ fprintf(fp, "large %d\n", large_space_used);
+ fclose(fp);
+}
+
+
+static bool WriteToFile(const char* snapshot_file) {
+ FileByteSink file(snapshot_file);
+ StartupSerializer ser(&file);
+ ser.Serialize();
+ return true;
+}
+
+
static void Serialize() {
// We have to create one context. One reason for this is so that the builtins
// can be loaded from v8natives.js and their addresses can be processed. This
// that would confuse the serialization/deserialization process.
v8::Persistent<v8::Context> env = v8::Context::New();
env.Dispose();
- Snapshot::WriteToFile(FLAG_testing_serialization_file);
+ WriteToFile(FLAG_testing_serialization_file);
}
}
-class FileByteSink : public SnapshotByteSink {
- public:
- explicit FileByteSink(const char* snapshot_file) {
- fp_ = OS::FOpen(snapshot_file, "wb");
- file_name_ = snapshot_file;
- if (fp_ == NULL) {
- PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
- exit(1);
- }
- }
- virtual ~FileByteSink() {
- if (fp_ != NULL) {
- fclose(fp_);
- }
- }
- virtual void Put(int byte, const char* description) {
- if (fp_ != NULL) {
- fputc(byte, fp_);
+TEST(PartialSerialization) {
+ Serializer::Enable();
+ v8::V8::Initialize();
+
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ ASSERT(!env.IsEmpty());
+ env->Enter();
+ // Make sure all builtin scripts are cached.
+ { HandleScope scope;
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ Bootstrapper::NativesSourceLookup(i);
}
}
- virtual int Position() {
- return ftell(fp_);
+ Heap::CollectAllGarbage(true);
+ Heap::CollectAllGarbage(true);
+
+ Object* raw_foo;
+ {
+ v8::HandleScope handle_scope;
+ v8::Local<v8::String> foo = v8::String::New("foo");
+ ASSERT(!foo.IsEmpty());
+ raw_foo = *(v8::Utils::OpenHandle(*foo));
}
- void WriteSpaceUsed(
- int new_space_used,
- int pointer_space_used,
- int data_space_used,
- int code_space_used,
- int map_space_used,
- int cell_space_used,
- int large_space_used);
- private:
- FILE* fp_;
- const char* file_name_;
-};
+ int file_name_length = strlen(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ env->Exit();
+ env.Dispose();
-void FileByteSink::WriteSpaceUsed(
- int new_space_used,
- int pointer_space_used,
- int data_space_used,
- int code_space_used,
- int map_space_used,
- int cell_space_used,
- int large_space_used) {
- int file_name_length = StrLength(file_name_) + 10;
- Vector<char> name = Vector<char>::New(file_name_length + 1);
- OS::SNPrintF(name, "%s.size", file_name_);
- FILE* fp = OS::FOpen(name.start(), "w");
- fprintf(fp, "new %d\n", new_space_used);
- fprintf(fp, "pointer %d\n", pointer_space_used);
- fprintf(fp, "data %d\n", data_space_used);
- fprintf(fp, "code %d\n", code_space_used);
- fprintf(fp, "map %d\n", map_space_used);
- fprintf(fp, "cell %d\n", cell_space_used);
- fprintf(fp, "large %d\n", large_space_used);
- fclose(fp);
+ FileByteSink startup_sink(startup_name.start());
+ StartupSerializer startup_serializer(&startup_sink);
+ startup_serializer.SerializeStrongReferences();
+
+ FileByteSink partial_sink(FLAG_testing_serialization_file);
+ PartialSerializer p_ser(&startup_serializer, &partial_sink);
+ p_ser.Serialize(&raw_foo);
+ startup_serializer.SerializeWeakReferences();
+ partial_sink.WriteSpaceUsed(p_ser.CurrentAllocationAddress(NEW_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+ p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+ p_ser.CurrentAllocationAddress(CODE_SPACE),
+ p_ser.CurrentAllocationAddress(MAP_SPACE),
+ p_ser.CurrentAllocationAddress(CELL_SPACE),
+ p_ser.CurrentAllocationAddress(LO_SPACE));
}
-TEST(PartialSerialization) {
- Serializer::Enable();
- v8::V8::Initialize();
- v8::Persistent<v8::Context> env = v8::Context::New();
- env->Enter();
-
- v8::HandleScope handle_scope;
- v8::Local<v8::String> foo = v8::String::New("foo");
-
- FileByteSink file(FLAG_testing_serialization_file);
- Serializer ser(&file);
- i::Handle<i::String> internal_foo = v8::Utils::OpenHandle(*foo);
- Object* raw_foo = *internal_foo;
- ser.SerializePartial(&raw_foo);
- file.WriteSpaceUsed(ser.CurrentAllocationAddress(NEW_SPACE),
- ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
- ser.CurrentAllocationAddress(OLD_DATA_SPACE),
- ser.CurrentAllocationAddress(CODE_SPACE),
- ser.CurrentAllocationAddress(MAP_SPACE),
- ser.CurrentAllocationAddress(CELL_SPACE),
- ser.CurrentAllocationAddress(LO_SPACE));
-}
+DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
+ int file_name_length = strlen(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ OS::SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ CHECK(Snapshot::Initialize(startup_name.start()));
-DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
- v8::V8::Initialize();
const char* file_name = FLAG_testing_serialization_file;
- int file_name_length = StrLength(file_name) + 10;
Vector<char> name = Vector<char>::New(file_name_length + 1);
OS::SNPrintF(name, "%s.size", file_name);
FILE* fp = OS::FOpen(name.start(), "r");
large_size);
int snapshot_size = 0;
byte* snapshot = ReadBytes(file_name, &snapshot_size);
- SnapshotByteSource source(snapshot, snapshot_size);
- Deserializer deserializer(&source);
+
Object* root;
- deserializer.DeserializePartial(&root);
- CHECK(root->IsString());
+ {
+ SnapshotByteSource source(snapshot, snapshot_size);
+ Deserializer deserializer(&source);
+ deserializer.DeserializePartial(&root);
+ CHECK(root->IsString());
+ }
+ v8::HandleScope handle_scope;
+ Handle<Object>root_handle(root);
+
+ Object* root2;
+ {
+ SnapshotByteSource source(snapshot, snapshot_size);
+ Deserializer deserializer(&source);
+ deserializer.DeserializePartial(&root2);
+ CHECK(root2->IsString());
+ CHECK(*root_handle == root2);
+ }
}