1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_SNAPSHOT_SERIALIZE_H_
6 #define V8_SNAPSHOT_SERIALIZE_H_
8 #include "src/hashmap.h"
9 #include "src/heap/heap.h"
10 #include "src/objects.h"
11 #include "src/snapshot/snapshot-source-sink.h"
19 static const int kDeoptTableSerializeEntryCount = 64;
21 // ExternalReferenceTable is a helper class that defines the relationship
22 // between external references and their encodings. It is used to build
23 // hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
24 class ExternalReferenceTable {
26 static ExternalReferenceTable* instance(Isolate* isolate);
28 int size() const { return refs_.length(); }
29 Address address(int i) { return refs_[i].address; }
30 const char* name(int i) { return refs_[i].name; }
32 inline static Address NotAvailable() { return NULL; }
35 struct ExternalReferenceEntry {
40 explicit ExternalReferenceTable(Isolate* isolate);
42 void Add(Address address, const char* name) {
43 ExternalReferenceEntry entry = {address, name};
47 List<ExternalReferenceEntry> refs_;
49 DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
53 class ExternalReferenceEncoder {
55 explicit ExternalReferenceEncoder(Isolate* isolate);
57 uint32_t Encode(Address key) const;
59 const char* NameOfAddress(Isolate* isolate, Address address) const;
62 static uint32_t Hash(Address key) {
63 return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >>
69 DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
73 class AddressMapBase {
75 static void SetValue(HashMap::Entry* entry, uint32_t v) {
76 entry->value = reinterpret_cast<void*>(v);
79 static uint32_t GetValue(HashMap::Entry* entry) {
80 return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
83 inline static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
86 map->LookupOrInsert(Key(obj), Hash(obj));
88 return map->Lookup(Key(obj), Hash(obj));
92 static uint32_t Hash(HeapObject* obj) {
93 return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
96 static void* Key(HeapObject* obj) {
97 return reinterpret_cast<void*>(obj->address());
102 class RootIndexMap : public AddressMapBase {
104 explicit RootIndexMap(Isolate* isolate);
106 static const int kInvalidRootIndex = -1;
108 int Lookup(HeapObject* obj) {
109 HashMap::Entry* entry = LookupEntry(map_, obj, false);
110 if (entry) return GetValue(entry);
111 return kInvalidRootIndex;
117 DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
121 class PartialCacheIndexMap : public AddressMapBase {
123 PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
125 static const int kInvalidIndex = -1;
127 // Lookup object in the map. Return its index if found, or create
128 // a new entry with new_index as value, and return kInvalidIndex.
129 int LookupOrInsert(HeapObject* obj, int new_index) {
130 HashMap::Entry* entry = LookupEntry(&map_, obj, false);
131 if (entry != NULL) return GetValue(entry);
132 SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index));
133 return kInvalidIndex;
139 DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
143 class BackReference {
145 explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
147 BackReference() : bitfield_(kInvalidValue) {}
149 static BackReference SourceReference() { return BackReference(kSourceValue); }
151 static BackReference GlobalProxyReference() {
152 return BackReference(kGlobalProxyValue);
155 static BackReference LargeObjectReference(uint32_t index) {
156 return BackReference(SpaceBits::encode(LO_SPACE) |
157 ChunkOffsetBits::encode(index));
160 static BackReference DummyReference() { return BackReference(kDummyValue); }
162 static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
163 uint32_t chunk_offset) {
164 DCHECK(IsAligned(chunk_offset, kObjectAlignment));
165 DCHECK_NE(LO_SPACE, space);
166 return BackReference(
167 SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
168 ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
171 bool is_valid() const { return bitfield_ != kInvalidValue; }
172 bool is_source() const { return bitfield_ == kSourceValue; }
173 bool is_global_proxy() const { return bitfield_ == kGlobalProxyValue; }
175 AllocationSpace space() const {
177 return SpaceBits::decode(bitfield_);
180 uint32_t chunk_offset() const {
182 return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
185 uint32_t large_object_index() const {
187 DCHECK(chunk_index() == 0);
188 return ChunkOffsetBits::decode(bitfield_);
191 uint32_t chunk_index() const {
193 return ChunkIndexBits::decode(bitfield_);
196 uint32_t reference() const {
198 return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
201 uint32_t bitfield() const { return bitfield_; }
204 static const uint32_t kInvalidValue = 0xFFFFFFFF;
205 static const uint32_t kSourceValue = 0xFFFFFFFE;
206 static const uint32_t kGlobalProxyValue = 0xFFFFFFFD;
207 static const uint32_t kDummyValue = 0xFFFFFFFC;
208 static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
209 static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
212 static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1;
215 class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
217 : public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
219 : public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> {
226 // Mapping objects to their location after deserialization.
227 // This is used during building, but not at runtime by V8.
228 class BackReferenceMap : public AddressMapBase {
231 : no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {}
233 ~BackReferenceMap() { delete map_; }
235 BackReference Lookup(HeapObject* obj) {
236 HashMap::Entry* entry = LookupEntry(map_, obj, false);
237 return entry ? BackReference(GetValue(entry)) : BackReference();
240 void Add(HeapObject* obj, BackReference b) {
241 DCHECK(b.is_valid());
242 DCHECK_NULL(LookupEntry(map_, obj, false));
243 HashMap::Entry* entry = LookupEntry(map_, obj, true);
244 SetValue(entry, b.bitfield());
247 void AddSourceString(String* string) {
248 Add(string, BackReference::SourceReference());
251 void AddGlobalProxy(HeapObject* global_proxy) {
252 Add(global_proxy, BackReference::GlobalProxyReference());
256 DisallowHeapAllocation no_allocation_;
258 DISALLOW_COPY_AND_ASSIGN(BackReferenceMap);
262 class HotObjectsList {
264 HotObjectsList() : index_(0) {
265 for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL;
268 void Add(HeapObject* object) {
269 circular_queue_[index_] = object;
270 index_ = (index_ + 1) & kSizeMask;
273 HeapObject* Get(int index) {
274 DCHECK_NOT_NULL(circular_queue_[index]);
275 return circular_queue_[index];
278 static const int kNotFound = -1;
280 int Find(HeapObject* object) {
281 for (int i = 0; i < kSize; i++) {
282 if (circular_queue_[i] == object) return i;
287 static const int kSize = 8;
290 STATIC_ASSERT(IS_POWER_OF_TWO(kSize));
291 static const int kSizeMask = kSize - 1;
292 HeapObject* circular_queue_[kSize];
295 DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
299 // The Serializer/Deserializer class is a common superclass for Serializer and
300 // Deserializer which is used to store common constants and methods used by
302 class SerializerDeserializer: public ObjectVisitor {
304 static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
306 static int nop() { return kNop; }
308 // No reservation for large object space necessary.
309 static const int kNumberOfPreallocatedSpaces = LAST_PAGED_SPACE + 1;
310 static const int kNumberOfSpaces = LAST_SPACE + 1;
313 static bool CanBeDeferred(HeapObject* o);
315 // ---------- byte code range 0x00..0x7f ----------
316 // Byte codes in this range represent Where, HowToCode and WhereToPoint.
317 // Where the pointed-to object can be found:
318 // The static assert below will trigger when the number of preallocated spaces
319 // changed. If that happens, update the bytecode ranges in the comments below.
320 STATIC_ASSERT(5 == kNumberOfSpaces);
322 // 0x00..0x04 Allocate new object, in specified space.
324 // 0x05 Unused (including 0x25, 0x45, 0x65).
325 // 0x06 Unused (including 0x26, 0x46, 0x66).
326 // 0x07 Unused (including 0x27, 0x47, 0x67).
327 // 0x08..0x0c Reference to previous object from space.
329 // 0x0d Unused (including 0x2d, 0x4d, 0x6d).
330 // 0x0e Unused (including 0x2e, 0x4e, 0x6e).
331 // 0x0f Unused (including 0x2f, 0x4f, 0x6f).
332 // 0x10..0x14 Reference to previous object from space after skip.
333 kBackrefWithSkip = 0x10,
334 // 0x15 Unused (including 0x35, 0x55, 0x75).
335 // 0x16 Unused (including 0x36, 0x56, 0x76).
336 // 0x17 Misc (including 0x37, 0x57, 0x77).
337 // 0x18 Root array item.
339 // 0x19 Object in the partial snapshot cache.
340 kPartialSnapshotCache = 0x19,
341 // 0x1a External reference referenced by id.
342 kExternalReference = 0x1a,
343 // 0x1b Object provided in the attached list.
344 kAttachedReference = 0x1b,
345 // 0x1c Builtin code referenced by index.
347 // 0x1d..0x1f Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f)
350 static const int kWhereMask = 0x1f;
351 static const int kSpaceMask = 7;
352 STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
354 // How to code the pointer to the object.
358 // A pointer inlined in code. What this means depends on the architecture.
362 static const int kHowToCodeMask = 0x20;
364 // Where to point within the object.
366 // Points to start of object
368 // Points to instruction in code object or payload of cell.
372 static const int kWhereToPointMask = 0x40;
374 // ---------- Misc ----------
376 static const int kSkip = 0x1d;
377 // Internal reference encoded as offsets of pc and target from code entry.
378 static const int kInternalReference = 0x1e;
379 static const int kInternalReferenceEncoded = 0x1f;
380 // Do nothing, used for padding.
381 static const int kNop = 0x3d;
382 // Move to next reserved chunk.
383 static const int kNextChunk = 0x3e;
384 // Deferring object content.
385 static const int kDeferred = 0x3f;
386 // Used for the source code of the natives, which is in the executable, but
387 // is referred to from external strings in the snapshot.
388 static const int kNativesStringResource = 0x5d;
389 // Used for the source code for compiled stubs, which is in the executable,
390 // but is referred to from external strings in the snapshot.
391 static const int kCodeStubNativesStringResource = 0x5e;
392 // Used for the source code for V8 extras, which is in the executable,
393 // but is referred to from external strings in the snapshot.
394 static const int kExtraNativesStringResource = 0x5f;
395 // A tag emitted at strategic points in the snapshot to delineate sections.
396 // If the deserializer does not find these at the expected moments then it
397 // is an indication that the snapshot and the VM do not fit together.
398 // Examine the build process for architecture, version or configuration
400 static const int kSynchronize = 0x17;
401 // Repeats of variable length.
402 static const int kVariableRepeat = 0x37;
403 // Raw data of variable length.
404 static const int kVariableRawData = 0x57;
405 // Alignment prefixes 0x7d..0x7f
406 static const int kAlignmentPrefix = 0x7d;
410 // ---------- byte code range 0x80..0xff ----------
411 // First 32 root array items.
412 static const int kNumberOfRootArrayConstants = 0x20;
414 static const int kRootArrayConstants = 0x80;
416 static const int kRootArrayConstantsWithSkip = 0xa0;
417 static const int kRootArrayConstantsMask = 0x1f;
419 // 8 hot (recently seen or back-referenced) objects with optional skip.
420 static const int kNumberOfHotObjects = 0x08;
422 static const int kHotObject = 0xc0;
424 static const int kHotObjectWithSkip = 0xc8;
425 static const int kHotObjectMask = 0x07;
427 // 32 common raw data lengths.
428 static const int kNumberOfFixedRawData = 0x20;
430 static const int kFixedRawData = 0xd0;
431 static const int kOnePointerRawData = kFixedRawData;
432 static const int kFixedRawDataStart = kFixedRawData - 1;
434 // 16 repeats lengths.
435 static const int kNumberOfFixedRepeat = 0x10;
437 static const int kFixedRepeat = 0xf0;
438 static const int kFixedRepeatStart = kFixedRepeat - 1;
440 // ---------- special values ----------
441 static const int kAnyOldSpace = -1;
443 // Sentinel after a new object to indicate that double alignment is needed.
444 static const int kDoubleAlignmentSentinel = 0;
446 // Used as index for the attached reference representing the source object.
447 static const int kSourceObjectReference = 0;
449 // Used as index for the attached reference representing the global proxy.
450 static const int kGlobalProxyReference = 0;
452 // ---------- member variable ----------
453 HotObjectsList hot_objects_;
457 class SerializedData {
461 explicit Reservation(uint32_t size)
462 : reservation_(ChunkSizeBits::encode(size)) {}
464 uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
465 bool is_last() const { return IsLastChunkBits::decode(reservation_); }
467 void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
470 uint32_t reservation_;
473 SerializedData(byte* data, int size)
474 : data_(data), size_(size), owns_data_(false) {}
475 SerializedData() : data_(NULL), size_(0), owns_data_(false) {}
478 if (owns_data_) DeleteArray<byte>(data_);
481 uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); }
483 class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
484 class IsLastChunkBits : public BitField<bool, 31, 1> {};
486 static uint32_t ComputeMagicNumber(ExternalReferenceTable* table) {
487 uint32_t external_refs = table->size();
488 return 0xC0DE0000 ^ external_refs;
492 void SetHeaderValue(int offset, uint32_t value) {
493 uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset);
494 memcpy(reinterpret_cast<uint32_t*>(address), &value, sizeof(value));
497 uint32_t GetHeaderValue(int offset) const {
499 memcpy(&value, reinterpret_cast<int*>(data_ + offset), sizeof(value));
503 void AllocateData(int size);
505 static uint32_t ComputeMagicNumber(Isolate* isolate) {
506 return ComputeMagicNumber(ExternalReferenceTable::instance(isolate));
509 void SetMagicNumber(Isolate* isolate) {
510 SetHeaderValue(kMagicNumberOffset, ComputeMagicNumber(isolate));
513 static const int kMagicNumberOffset = 0;
521 // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
522 class Deserializer: public SerializerDeserializer {
524 // Create a deserializer from a snapshot byte source.
525 template <class Data>
526 explicit Deserializer(Data* data)
528 source_(data->Payload()),
529 magic_number_(data->GetMagicNumber()),
530 external_reference_table_(NULL),
531 deserialized_large_objects_(0),
532 deserializing_user_code_(false),
533 next_alignment_(kWordAligned) {
534 DecodeReservation(data->Reservations());
537 virtual ~Deserializer();
539 // Deserialize the snapshot into an empty heap.
540 void Deserialize(Isolate* isolate);
542 // Deserialize a single object and the objects reachable from it.
543 MaybeHandle<Object> DeserializePartial(
544 Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
545 Handle<FixedArray>* outdated_contexts_out);
547 // Deserialize a shared function info. Fail gracefully.
548 MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
550 // Pass a vector of externally-provided objects referenced by the snapshot.
551 // The ownership to its backing store is handed over as well.
552 void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
553 attached_objects_ = attached_objects;
557 virtual void VisitPointers(Object** start, Object** end);
559 virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
563 void Initialize(Isolate* isolate);
565 bool deserializing_user_code() { return deserializing_user_code_; }
567 void DecodeReservation(Vector<const SerializedData::Reservation> res);
571 void UnalignedCopy(Object** dest, Object** src) {
572 memcpy(dest, src, sizeof(*src));
575 void DeserializeDeferredObjects();
577 void FlushICacheForNewIsolate();
578 void FlushICacheForNewCodeObjects();
580 void CommitPostProcessedObjects(Isolate* isolate);
582 // Fills in some heap data in an area from start to end (non-inclusive). The
583 // space id is used for the write barrier. The object_address is the address
584 // of the object we are writing into, or NULL if we are not writing into an
585 // object, i.e. if we are writing a series of tagged values that are not on
586 // the heap. Return false if the object content has been deferred.
587 bool ReadData(Object** start, Object** end, int space,
588 Address object_address);
589 void ReadObject(int space_number, Object** write_back);
590 Address Allocate(int space_index, int size);
592 // Special handling for serialized code like hooking up internalized strings.
593 HeapObject* PostProcessNewObject(HeapObject* obj, int space);
595 // This returns the address of an object that has been described in the
596 // snapshot by chunk index and offset.
597 HeapObject* GetBackReferencedObject(int space);
599 Object** CopyInNativesSource(Vector<const char> source_vector,
602 // Cached current isolate.
605 // Objects from the attached object descriptions in the serialized user code.
606 Vector<Handle<Object> > attached_objects_;
608 SnapshotByteSource source_;
609 uint32_t magic_number_;
611 // The address of the next object that will be allocated in each space.
612 // Each space has a number of chunks reserved by the GC, with each chunk
613 // fitting into a page. Deserialized objects are allocated into the
614 // current chunk of the target space by bumping up high water mark.
615 Heap::Reservation reservations_[kNumberOfSpaces];
616 uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
617 Address high_water_[kNumberOfPreallocatedSpaces];
619 ExternalReferenceTable* external_reference_table_;
621 List<HeapObject*> deserialized_large_objects_;
622 List<Code*> new_code_objects_;
623 List<Handle<String> > new_internalized_strings_;
624 List<Handle<Script> > new_scripts_;
626 bool deserializing_user_code_;
628 AllocationAlignment next_alignment_;
630 DISALLOW_COPY_AND_ASSIGN(Deserializer);
634 class CodeAddressMap;
636 // There can be only one serializer per V8 process.
637 class Serializer : public SerializerDeserializer {
639 Serializer(Isolate* isolate, SnapshotByteSink* sink);
641 void VisitPointers(Object** start, Object** end) override;
643 void EncodeReservations(List<SerializedData::Reservation>* out) const;
645 void SerializeDeferredObjects();
647 Isolate* isolate() const { return isolate_; }
649 BackReferenceMap* back_reference_map() { return &back_reference_map_; }
650 RootIndexMap* root_index_map() { return &root_index_map_; }
653 void CountInstanceType(Map* map, int size);
654 #endif // OBJECT_PRINT
657 class ObjectSerializer;
658 class RecursionScope {
660 explicit RecursionScope(Serializer* serializer) : serializer_(serializer) {
661 serializer_->recursion_depth_++;
663 ~RecursionScope() { serializer_->recursion_depth_--; }
664 bool ExceedsMaximum() {
665 return serializer_->recursion_depth_ >= kMaxRecursionDepth;
669 static const int kMaxRecursionDepth = 32;
670 Serializer* serializer_;
673 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
674 WhereToPoint where_to_point, int skip) = 0;
676 void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
679 void PutBackReference(HeapObject* object, BackReference reference);
681 // Emit alignment prefix if necessary, return required padding space in bytes.
682 int PutAlignmentPrefix(HeapObject* object);
684 // Returns true if the object was successfully serialized.
685 bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
686 WhereToPoint where_to_point, int skip);
688 inline void FlushSkip(int skip) {
690 sink_->Put(kSkip, "SkipFromSerializeObject");
691 sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
695 bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
697 // This will return the space for an object.
698 BackReference AllocateLargeObject(int size);
699 BackReference Allocate(AllocationSpace space, int size);
700 int EncodeExternalReference(Address addr) {
701 return external_reference_encoder_.Encode(addr);
704 // GetInt reads 4 bytes at once, requiring padding at the end.
707 // Some roots should not be serialized, because their actual value depends on
708 // absolute addresses and they are reset after deserialization, anyway.
709 bool ShouldBeSkipped(Object** current);
711 // We may not need the code address map for logging for every instance
712 // of the serializer. Initialize it on demand.
713 void InitializeCodeAddressMap();
715 Code* CopyCode(Code* code);
717 inline uint32_t max_chunk_size(int space) const {
719 DCHECK_LT(space, kNumberOfSpaces);
720 return max_chunk_size_[space];
723 SnapshotByteSink* sink() const { return sink_; }
725 void QueueDeferredObject(HeapObject* obj) {
726 DCHECK(back_reference_map_.Lookup(obj).is_valid());
727 deferred_objects_.Add(obj);
730 void OutputStatistics(const char* name);
734 SnapshotByteSink* sink_;
735 ExternalReferenceEncoder external_reference_encoder_;
737 BackReferenceMap back_reference_map_;
738 RootIndexMap root_index_map_;
740 int recursion_depth_;
742 friend class Deserializer;
743 friend class ObjectSerializer;
744 friend class RecursionScope;
745 friend class SnapshotData;
748 CodeAddressMap* code_address_map_;
749 // Objects from the same space are put into chunks for bulk-allocation
750 // when deserializing. We have to make sure that each chunk fits into a
751 // page. So we track the chunk size in pending_chunk_ of a space, but
752 // when it exceeds a page, we complete the current chunk and start a new one.
753 uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
754 List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
755 uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
757 // We map serialized large objects to indexes for back-referencing.
758 uint32_t large_objects_total_size_;
759 uint32_t seen_large_objects_index_;
761 List<byte> code_buffer_;
763 // To handle stack overflow.
764 List<HeapObject*> deferred_objects_;
767 static const int kInstanceTypes = 256;
768 int* instance_type_count_;
769 size_t* instance_type_size_;
770 #endif // OBJECT_PRINT
772 DISALLOW_COPY_AND_ASSIGN(Serializer);
776 class PartialSerializer : public Serializer {
778 PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
779 SnapshotByteSink* sink)
780 : Serializer(isolate, sink),
781 startup_serializer_(startup_snapshot_serializer),
782 outdated_contexts_(0),
783 global_object_(NULL) {
784 InitializeCodeAddressMap();
787 ~PartialSerializer() { OutputStatistics("PartialSerializer"); }
789 // Serialize the objects reachable from a single object pointer.
790 void Serialize(Object** o);
791 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
792 WhereToPoint where_to_point, int skip) override;
795 int PartialSnapshotCacheIndex(HeapObject* o);
796 bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
798 void SerializeOutdatedContextsAsFixedArray();
800 Serializer* startup_serializer_;
801 List<Context*> outdated_contexts_;
802 Object* global_object_;
803 PartialCacheIndexMap partial_cache_index_map_;
804 DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
808 class StartupSerializer : public Serializer {
810 StartupSerializer(Isolate* isolate, SnapshotByteSink* sink);
811 ~StartupSerializer() { OutputStatistics("StartupSerializer"); }
813 // The StartupSerializer has to serialize the root array, which is slightly
815 void VisitPointers(Object** start, Object** end) override;
817 // Serialize the current state of the heap. The order is:
818 // 1) Strong references.
819 // 2) Partial snapshot cache.
820 // 3) Weak references (e.g. the string table).
821 virtual void SerializeStrongReferences();
822 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
823 WhereToPoint where_to_point, int skip) override;
824 void SerializeWeakReferencesAndDeferred();
826 SerializeStrongReferences();
827 SerializeWeakReferencesAndDeferred();
831 intptr_t root_index_wave_front_;
832 DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
836 class CodeSerializer : public Serializer {
838 static ScriptData* Serialize(Isolate* isolate,
839 Handle<SharedFunctionInfo> info,
840 Handle<String> source);
842 MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
843 Isolate* isolate, ScriptData* cached_data, Handle<String> source);
845 static const int kSourceObjectIndex = 0;
846 STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex);
848 static const int kCodeStubsBaseIndex = 1;
850 String* source() const {
851 DCHECK(!AllowHeapAllocation::IsAllowed());
855 const List<uint32_t>* stub_keys() const { return &stub_keys_; }
858 CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source,
860 : Serializer(isolate, sink), source_(source), main_code_(main_code) {
861 back_reference_map_.AddSourceString(source);
864 ~CodeSerializer() { OutputStatistics("CodeSerializer"); }
866 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
867 WhereToPoint where_to_point, int skip) override;
869 void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
870 WhereToPoint where_to_point);
871 void SerializeIC(Code* ic, HowToCode how_to_code,
872 WhereToPoint where_to_point);
873 void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
874 WhereToPoint where_to_point);
875 void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
876 WhereToPoint where_to_point);
877 int AddCodeStubKey(uint32_t stub_key);
879 DisallowHeapAllocation no_gc_;
882 List<uint32_t> stub_keys_;
883 DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
887 // Wrapper around reservation sizes and the serialization payload.
888 class SnapshotData : public SerializedData {
890 // Used when producing.
891 explicit SnapshotData(const Serializer& ser);
893 // Used when consuming.
894 explicit SnapshotData(const Vector<const byte> snapshot)
895 : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
899 Vector<const Reservation> Reservations() const;
900 Vector<const byte> Payload() const;
902 Vector<const byte> RawData() const {
903 return Vector<const byte>(data_, size_);
909 // The data header consists of uint32_t-sized entries:
910 // [0] magic number and external reference count
912 // [2] number of reservation size entries
913 // [3] payload length
915 // ... serialized payload
916 static const int kCheckSumOffset = kMagicNumberOffset + kInt32Size;
917 static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
918 static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
919 static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
923 // Wrapper around ScriptData to provide code-serializer-specific functionality.
924 class SerializedCodeData : public SerializedData {
926 // Used when consuming.
927 static SerializedCodeData* FromCachedData(Isolate* isolate,
928 ScriptData* cached_data,
931 // Used when producing.
932 SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
934 // Return ScriptData object and relinquish ownership over it to the caller.
935 ScriptData* GetScriptData();
937 Vector<const Reservation> Reservations() const;
938 Vector<const byte> Payload() const;
940 Vector<const uint32_t> CodeStubKeys() const;
943 explicit SerializedCodeData(ScriptData* data);
945 enum SanityCheckResult {
947 MAGIC_NUMBER_MISMATCH = 1,
948 VERSION_MISMATCH = 2,
950 CPU_FEATURES_MISMATCH = 4,
952 CHECKSUM_MISMATCH = 6
955 SanityCheckResult SanityCheck(Isolate* isolate, String* source) const;
957 uint32_t SourceHash(String* source) const;
959 // The data header consists of uint32_t-sized entries:
960 // [0] magic number and external reference count
965 // [5] number of code stub keys
966 // [6] number of reservation size entries
967 // [7] payload length
968 // [8] payload checksum part 1
969 // [9] payload checksum part 2
971 // ... code stub keys
972 // ... serialized payload
973 static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
974 static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
975 static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
976 static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
977 static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
978 static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
979 static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
980 static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
981 static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
982 static const int kHeaderSize = kChecksum2Offset + kInt32Size;
984 } } // namespace v8::internal
986 #endif // V8_SNAPSHOT_SERIALIZE_H_