1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_SERIALIZE_H_
6 #define V8_SERIALIZE_H_
8 #include "src/hashmap.h"
9 #include "src/heap-profiler.h"
10 #include "src/isolate.h"
11 #include "src/snapshot-source-sink.h"
18 // A TypeCode is used to distinguish different kinds of external reference.
19 // It is a single bit to make testing for types easy.
21 UNCLASSIFIED, // One-of-a-kind references.
34 const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
35 const int kFirstTypeCode = UNCLASSIFIED;
37 const int kReferenceIdBits = 16;
38 const int kReferenceIdMask = (1 << kReferenceIdBits) - 1;
39 const int kReferenceTypeShift = kReferenceIdBits;
41 const int kDeoptTableSerializeEntryCount = 64;
43 // ExternalReferenceTable is a helper class that defines the relationship
44 // between external references and their encodings. It is used to build
45 // hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
46 class ExternalReferenceTable {
48 static ExternalReferenceTable* instance(Isolate* isolate);
50 ~ExternalReferenceTable() { }
52 int size() const { return refs_.length(); }
54 Address address(int i) { return refs_[i].address; }
56 uint32_t code(int i) { return refs_[i].code; }
58 const char* name(int i) { return refs_[i].name; }
60 int max_id(int code) { return max_id_[code]; }
63 explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) {
64 PopulateTable(isolate);
67 struct ExternalReferenceEntry {
73 void PopulateTable(Isolate* isolate);
75 // For a few types of references, we can get their address from their id.
76 void AddFromId(TypeCode type,
81 // For other types of references, the caller will figure out the address.
82 void Add(Address address, TypeCode type, uint16_t id, const char* name);
84 void Add(Address address, const char* name) {
85 Add(address, UNCLASSIFIED, ++max_id_[UNCLASSIFIED], name);
88 List<ExternalReferenceEntry> refs_;
89 uint16_t max_id_[kTypeCodeCount];
93 class ExternalReferenceEncoder {
95 explicit ExternalReferenceEncoder(Isolate* isolate);
97 uint32_t Encode(Address key) const;
99 const char* NameOfAddress(Address key) const;
103 static uint32_t Hash(Address key) {
104 return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2);
107 int IndexOf(Address key) const;
109 void Put(Address key, int index);
115 class ExternalReferenceDecoder {
117 explicit ExternalReferenceDecoder(Isolate* isolate);
118 ~ExternalReferenceDecoder();
120 Address Decode(uint32_t key) const {
121 if (key == 0) return NULL;
126 Address** encodings_;
128 Address* Lookup(uint32_t key) const {
129 int type = key >> kReferenceTypeShift;
130 DCHECK(kFirstTypeCode <= type && type < kTypeCodeCount);
131 int id = key & kReferenceIdMask;
132 return &encodings_[type][id];
135 void Put(uint32_t key, Address value) {
136 *Lookup(key) = value;
143 class AddressMapBase {
145 static void SetValue(HashMap::Entry* entry, uint32_t v) {
146 entry->value = reinterpret_cast<void*>(v);
149 static uint32_t GetValue(HashMap::Entry* entry) {
150 return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
153 static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
155 return map->Lookup(Key(obj), Hash(obj), insert);
159 static uint32_t Hash(HeapObject* obj) {
160 return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
163 static void* Key(HeapObject* obj) {
164 return reinterpret_cast<void*>(obj->address());
169 class RootIndexMap : public AddressMapBase {
171 explicit RootIndexMap(Isolate* isolate);
173 ~RootIndexMap() { delete map_; }
175 static const int kInvalidRootIndex = -1;
176 int Lookup(HeapObject* obj) {
177 HashMap::Entry* entry = LookupEntry(map_, obj, false);
178 if (entry) return GetValue(entry);
179 return kInvalidRootIndex;
185 DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
189 class BackReference {
191 explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
193 BackReference() : bitfield_(kInvalidValue) {}
195 static BackReference SourceReference() { return BackReference(kSourceValue); }
197 static BackReference GlobalProxyReference() {
198 return BackReference(kGlobalProxyValue);
201 static BackReference LargeObjectReference(uint32_t index) {
202 return BackReference(SpaceBits::encode(LO_SPACE) |
203 ChunkOffsetBits::encode(index));
206 static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
207 uint32_t chunk_offset) {
208 DCHECK(IsAligned(chunk_offset, kObjectAlignment));
209 DCHECK_NE(LO_SPACE, space);
210 return BackReference(
211 SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
212 ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
215 bool is_valid() const { return bitfield_ != kInvalidValue; }
216 bool is_source() const { return bitfield_ == kSourceValue; }
217 bool is_global_proxy() const { return bitfield_ == kGlobalProxyValue; }
219 AllocationSpace space() const {
221 return SpaceBits::decode(bitfield_);
224 uint32_t chunk_offset() const {
226 return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
229 uint32_t large_object_index() const {
231 DCHECK(chunk_index() == 0);
232 return ChunkOffsetBits::decode(bitfield_);
235 uint32_t chunk_index() const {
237 return ChunkIndexBits::decode(bitfield_);
240 uint32_t reference() const {
242 return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
245 uint32_t bitfield() const { return bitfield_; }
248 static const uint32_t kInvalidValue = 0xFFFFFFFF;
249 static const uint32_t kSourceValue = 0xFFFFFFFE;
250 static const uint32_t kGlobalProxyValue = 0xFFFFFFFD;
251 static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
252 static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
255 static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1;
258 class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
260 : public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
262 : public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> {
269 // Mapping objects to their location after deserialization.
270 // This is used during building, but not at runtime by V8.
271 class BackReferenceMap : public AddressMapBase {
274 : no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {}
276 ~BackReferenceMap() { delete map_; }
278 BackReference Lookup(HeapObject* obj) {
279 HashMap::Entry* entry = LookupEntry(map_, obj, false);
280 return entry ? BackReference(GetValue(entry)) : BackReference();
283 void Add(HeapObject* obj, BackReference b) {
284 DCHECK(b.is_valid());
285 DCHECK_NULL(LookupEntry(map_, obj, false));
286 HashMap::Entry* entry = LookupEntry(map_, obj, true);
287 SetValue(entry, b.bitfield());
290 void AddSourceString(String* string) {
291 Add(string, BackReference::SourceReference());
294 void AddGlobalProxy(HeapObject* global_proxy) {
295 Add(global_proxy, BackReference::GlobalProxyReference());
299 DisallowHeapAllocation no_allocation_;
301 DISALLOW_COPY_AND_ASSIGN(BackReferenceMap);
305 class HotObjectsList {
307 HotObjectsList() : index_(0) {
308 for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL;
311 void Add(HeapObject* object) {
312 circular_queue_[index_] = object;
313 index_ = (index_ + 1) & kSizeMask;
316 HeapObject* Get(int index) {
317 DCHECK_NOT_NULL(circular_queue_[index]);
318 return circular_queue_[index];
321 static const int kNotFound = -1;
323 int Find(HeapObject* object) {
324 for (int i = 0; i < kSize; i++) {
325 if (circular_queue_[i] == object) return i;
330 static const int kSize = 8;
333 STATIC_ASSERT(IS_POWER_OF_TWO(kSize));
334 static const int kSizeMask = kSize - 1;
335 HeapObject* circular_queue_[kSize];
338 DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
342 // The Serializer/Deserializer class is a common superclass for Serializer and
343 // Deserializer which is used to store common constants and methods used by
345 class SerializerDeserializer: public ObjectVisitor {
347 static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
349 static int nop() { return kNop; }
351 // No reservation for large object space necessary.
352 static const int kNumberOfPreallocatedSpaces = LO_SPACE;
353 static const int kNumberOfSpaces = LAST_SPACE + 1;
356 // Where the pointed-to object can be found:
358 kNewObject = 0, // Object is next in snapshot.
359 // 1-7 One per space.
361 kRootArray = 0x9, // Object is found in root array.
362 kPartialSnapshotCache = 0xa, // Object is in the cache.
363 kExternalReference = 0xb, // Pointer to an external reference.
364 kSkip = 0xc, // Skip n bytes.
365 kBuiltin = 0xd, // Builtin code object.
366 kAttachedReference = 0xe, // Object is described in an attached list.
367 // 0xf Used by misc. See below.
368 kBackref = 0x10, // Object is described relative to end.
369 // 0x11-0x17 One per space.
370 kBackrefWithSkip = 0x18, // Object is described relative to end.
371 // 0x19-0x1f One per space.
372 // 0x20-0x3f Used by misc. See below.
373 kPointedToMask = 0x3f
376 // How to code the pointer to the object.
378 kPlain = 0, // Straight pointer.
379 // What this means depends on the architecture:
380 kFromCode = 0x40, // A pointer inlined in code.
381 kHowToCodeMask = 0x40
384 // For kRootArrayConstants
387 kHasSkipDistance = 0x40,
391 // Where to point within the object.
394 kInnerPointer = 0x80, // First insn in code object or payload of cell.
395 kWhereToPointMask = 0x80
399 // Raw data to be copied from the snapshot. This byte code does not advance
400 // the current pointer, which is used for code objects, where we write the
401 // entire code in one memcpy, then fix up stuff with kSkip and other byte
402 // codes that overwrite data.
403 static const int kRawData = 0x20;
404 // Some common raw lengths: 0x21-0x3f.
405 // These autoadvance the current pointer.
406 static const int kOnePointerRawData = 0x21;
408 static const int kVariableRepeat = 0x60;
409 // 0x61-0x6f Repeat last word
410 static const int kFixedRepeat = 0x61;
411 static const int kFixedRepeatBase = kFixedRepeat - 1;
412 static const int kLastFixedRepeat = 0x6f;
413 static const int kMaxFixedRepeats = kLastFixedRepeat - kFixedRepeatBase;
414 static int CodeForRepeats(int repeats) {
415 DCHECK(repeats >= 1 && repeats <= kMaxFixedRepeats);
416 return kFixedRepeatBase + repeats;
418 static int RepeatsForCode(int byte_code) {
419 DCHECK(byte_code > kFixedRepeatBase && byte_code <= kLastFixedRepeat);
420 return byte_code - kFixedRepeatBase;
423 // Hot objects are a small set of recently seen or back-referenced objects.
424 // They are represented by a single opcode to save space.
425 // We use 0x70..0x77 for 8 hot objects, and 0x78..0x7f to add skip.
426 static const int kHotObject = 0x70;
427 static const int kMaxHotObjectIndex = 0x77 - kHotObject;
428 static const int kHotObjectWithSkip = 0x78;
429 STATIC_ASSERT(HotObjectsList::kSize == kMaxHotObjectIndex + 1);
430 STATIC_ASSERT(0x7f - kHotObjectWithSkip == kMaxHotObjectIndex);
431 static const int kHotObjectIndexMask = 0x7;
433 static const int kRootArrayConstants = 0xa0;
434 // 0xa0-0xbf Things from the first 32 elements of the root array.
435 static const int kRootArrayNumberOfConstantEncodings = 0x20;
436 static int RootArrayConstantFromByteCode(int byte_code) {
437 return byte_code & 0x1f;
440 // Do nothing, used for padding.
441 static const int kNop = 0xf;
443 // Move to next reserved chunk.
444 static const int kNextChunk = 0x4f;
446 // A tag emitted at strategic points in the snapshot to delineate sections.
447 // If the deserializer does not find these at the expected moments then it
448 // is an indication that the snapshot and the VM do not fit together.
449 // Examine the build process for architecture, version or configuration
451 static const int kSynchronize = 0x8f;
453 // Used for the source code of the natives, which is in the executable, but
454 // is referred to from external strings in the snapshot.
455 static const int kNativesStringResource = 0xcf;
457 static const int kAnyOldSpace = -1;
459 // A bitmask for getting the space out of an instruction.
460 static const int kSpaceMask = 7;
461 STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
463 // Sentinel after a new object to indicate that double alignment is needed.
464 static const int kDoubleAlignmentSentinel = 0;
466 // Used as index for the attached reference representing the source object.
467 static const int kSourceObjectReference = 0;
469 // Used as index for the attached reference representing the global proxy.
470 static const int kGlobalProxyReference = 0;
472 HotObjectsList hot_objects_;
476 class SerializedData {
480 explicit Reservation(uint32_t size)
481 : reservation_(ChunkSizeBits::encode(size)) {}
483 uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
484 bool is_last() const { return IsLastChunkBits::decode(reservation_); }
486 void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
489 uint32_t reservation_;
492 SerializedData(byte* data, int size)
493 : data_(data), size_(size), owns_data_(false) {}
494 SerializedData() : data_(NULL), size_(0), owns_data_(false) {}
497 if (owns_data_) DeleteArray<byte>(data_);
500 class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
501 class IsLastChunkBits : public BitField<bool, 31, 1> {};
504 void SetHeaderValue(int offset, uint32_t value) {
505 uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset);
506 memcpy(reinterpret_cast<uint32_t*>(address), &value, sizeof(value));
509 uint32_t GetHeaderValue(int offset) const {
511 memcpy(&value, reinterpret_cast<int*>(data_ + offset), sizeof(value));
515 void AllocateData(int size);
523 // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
524 class Deserializer: public SerializerDeserializer {
526 // Create a deserializer from a snapshot byte source.
527 template <class Data>
528 explicit Deserializer(Data* data)
530 source_(data->Payload()),
531 external_reference_decoder_(NULL),
532 deserialized_large_objects_(0),
533 deserializing_user_code_(false) {
534 DecodeReservation(data->Reservations());
537 virtual ~Deserializer();
539 // Deserialize the snapshot into an empty heap.
540 void Deserialize(Isolate* isolate);
542 // Deserialize a single object and the objects reachable from it.
543 MaybeHandle<Object> DeserializePartial(
544 Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
545 Handle<FixedArray>* outdated_contexts_out);
547 // Deserialize a shared function info. Fail gracefully.
548 MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
550 void FlushICacheForNewCodeObjects();
552 // Pass a vector of externally-provided objects referenced by the snapshot.
553 // The ownership to its backing store is handed over as well.
554 void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
555 attached_objects_ = attached_objects;
559 virtual void VisitPointers(Object** start, Object** end);
561 virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
565 void Initialize(Isolate* isolate);
567 bool deserializing_user_code() { return deserializing_user_code_; }
569 void DecodeReservation(Vector<const SerializedData::Reservation> res);
573 void UnalignedCopy(Object** dest, Object** src) {
574 memcpy(dest, src, sizeof(*src));
577 // Allocation sites are present in the snapshot, and must be linked into
578 // a list at deserialization time.
579 void RelinkAllocationSite(AllocationSite* site);
581 // Fills in some heap data in an area from start to end (non-inclusive). The
582 // space id is used for the write barrier. The object_address is the address
583 // of the object we are writing into, or NULL if we are not writing into an
584 // object, i.e. if we are writing a series of tagged values that are not on
586 void ReadData(Object** start, Object** end, int space,
587 Address object_address);
588 void ReadObject(int space_number, Object** write_back);
589 Address Allocate(int space_index, int size);
591 // Special handling for serialized code like hooking up internalized strings.
592 HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj);
594 // This returns the address of an object that has been described in the
595 // snapshot by chunk index and offset.
596 HeapObject* GetBackReferencedObject(int space);
598 // Cached current isolate.
601 // Objects from the attached object descriptions in the serialized user code.
602 Vector<Handle<Object> > attached_objects_;
604 SnapshotByteSource source_;
605 // The address of the next object that will be allocated in each space.
606 // Each space has a number of chunks reserved by the GC, with each chunk
607 // fitting into a page. Deserialized objects are allocated into the
608 // current chunk of the target space by bumping up high water mark.
609 Heap::Reservation reservations_[kNumberOfSpaces];
610 uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
611 Address high_water_[kNumberOfPreallocatedSpaces];
613 ExternalReferenceDecoder* external_reference_decoder_;
615 List<HeapObject*> deserialized_large_objects_;
617 bool deserializing_user_code_;
619 DISALLOW_COPY_AND_ASSIGN(Deserializer);
623 class CodeAddressMap;
625 // There can be only one serializer per V8 process.
626 class Serializer : public SerializerDeserializer {
628 Serializer(Isolate* isolate, SnapshotByteSink* sink);
630 void VisitPointers(Object** start, Object** end) OVERRIDE;
632 void EncodeReservations(List<SerializedData::Reservation>* out) const;
634 Isolate* isolate() const { return isolate_; }
636 BackReferenceMap* back_reference_map() { return &back_reference_map_; }
637 RootIndexMap* root_index_map() { return &root_index_map_; }
640 class ObjectSerializer : public ObjectVisitor {
642 ObjectSerializer(Serializer* serializer,
644 SnapshotByteSink* sink,
645 HowToCode how_to_code,
646 WhereToPoint where_to_point)
647 : serializer_(serializer),
648 object_(HeapObject::cast(o)),
650 reference_representation_(how_to_code + where_to_point),
651 bytes_processed_so_far_(0),
652 code_object_(o->IsCode()),
653 code_has_been_output_(false) { }
655 void VisitPointers(Object** start, Object** end);
656 void VisitEmbeddedPointer(RelocInfo* target);
657 void VisitExternalReference(Address* p);
658 void VisitExternalReference(RelocInfo* rinfo);
659 void VisitCodeTarget(RelocInfo* target);
660 void VisitCodeEntry(Address entry_address);
661 void VisitCell(RelocInfo* rinfo);
662 void VisitRuntimeEntry(RelocInfo* reloc);
663 // Used for seralizing the external strings that hold the natives source.
664 void VisitExternalOneByteString(
665 v8::String::ExternalOneByteStringResource** resource);
666 // We can't serialize a heap with external two byte strings.
667 void VisitExternalTwoByteString(
668 v8::String::ExternalStringResource** resource) {
673 void SerializePrologue(AllocationSpace space, int size, Map* map);
675 enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
676 // This function outputs or skips the raw data between the last pointer and
677 // up to the current position. It optionally can just return the number of
678 // bytes to skip instead of performing a skip instruction, in case the skip
679 // can be merged into the next instruction.
680 int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
681 // External strings are serialized in a way to resemble sequential strings.
682 void SerializeExternalString();
684 Serializer* serializer_;
686 SnapshotByteSink* sink_;
687 int reference_representation_;
688 int bytes_processed_so_far_;
690 bool code_has_been_output_;
693 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
694 WhereToPoint where_to_point, int skip) = 0;
696 void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
699 // Returns true if the object was successfully serialized.
700 bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
701 WhereToPoint where_to_point, int skip);
703 inline void FlushSkip(int skip) {
705 sink_->Put(kSkip, "SkipFromSerializeObject");
706 sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
710 bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
712 // This will return the space for an object.
713 static AllocationSpace SpaceOfObject(HeapObject* object);
714 BackReference AllocateLargeObject(int size);
715 BackReference Allocate(AllocationSpace space, int size);
716 int EncodeExternalReference(Address addr) {
717 return external_reference_encoder_->Encode(addr);
720 // GetInt reads 4 bytes at once, requiring padding at the end.
723 // Some roots should not be serialized, because their actual value depends on
724 // absolute addresses and they are reset after deserialization, anyway.
725 bool ShouldBeSkipped(Object** current);
727 // We may not need the code address map for logging for every instance
728 // of the serializer. Initialize it on demand.
729 void InitializeCodeAddressMap();
731 inline uint32_t max_chunk_size(int space) const {
733 DCHECK_LT(space, kNumberOfSpaces);
734 return max_chunk_size_[space];
737 SnapshotByteSink* sink() const { return sink_; }
741 SnapshotByteSink* sink_;
742 ExternalReferenceEncoder* external_reference_encoder_;
744 BackReferenceMap back_reference_map_;
745 RootIndexMap root_index_map_;
747 friend class Deserializer;
748 friend class ObjectSerializer;
749 friend class SnapshotData;
752 CodeAddressMap* code_address_map_;
753 // Objects from the same space are put into chunks for bulk-allocation
754 // when deserializing. We have to make sure that each chunk fits into a
755 // page. So we track the chunk size in pending_chunk_ of a space, but
756 // when it exceeds a page, we complete the current chunk and start a new one.
757 uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
758 List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
759 uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
761 // We map serialized large objects to indexes for back-referencing.
762 uint32_t large_objects_total_size_;
763 uint32_t seen_large_objects_index_;
765 DISALLOW_COPY_AND_ASSIGN(Serializer);
769 class PartialSerializer : public Serializer {
771 PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
772 SnapshotByteSink* sink)
773 : Serializer(isolate, sink),
774 startup_serializer_(startup_snapshot_serializer),
775 outdated_contexts_(0),
776 global_object_(NULL) {
777 InitializeCodeAddressMap();
780 // Serialize the objects reachable from a single object pointer.
781 void Serialize(Object** o);
782 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
783 WhereToPoint where_to_point, int skip) OVERRIDE;
786 int PartialSnapshotCacheIndex(HeapObject* o);
787 bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
788 // Scripts should be referred only through shared function infos. We can't
789 // allow them to be part of the partial snapshot because they contain a
790 // unique ID, and deserializing several partial snapshots containing script
791 // would cause dupes.
792 DCHECK(!o->IsScript());
793 return o->IsName() || o->IsSharedFunctionInfo() ||
794 o->IsHeapNumber() || o->IsCode() ||
797 startup_serializer_->isolate()->heap()->fixed_cow_array_map();
800 void SerializeOutdatedContextsAsFixedArray();
802 Serializer* startup_serializer_;
803 List<BackReference> outdated_contexts_;
804 Object* global_object_;
805 DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
809 class StartupSerializer : public Serializer {
811 StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
812 : Serializer(isolate, sink), root_index_wave_front_(0) {
813 // Clear the cache of objects used by the partial snapshot. After the
814 // strong roots have been serialized we can create a partial snapshot
815 // which will repopulate the cache with objects needed by that partial
817 isolate->set_serialize_partial_snapshot_cache_length(0);
818 InitializeCodeAddressMap();
821 // The StartupSerializer has to serialize the root array, which is slightly
823 void VisitPointers(Object** start, Object** end) OVERRIDE;
825 // Serialize the current state of the heap. The order is:
826 // 1) Strong references.
827 // 2) Partial snapshot cache.
828 // 3) Weak references (e.g. the string table).
829 virtual void SerializeStrongReferences();
830 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
831 WhereToPoint where_to_point, int skip) OVERRIDE;
832 void SerializeWeakReferences();
834 SerializeStrongReferences();
835 SerializeWeakReferences();
840 intptr_t root_index_wave_front_;
841 DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
845 class CodeSerializer : public Serializer {
847 static ScriptData* Serialize(Isolate* isolate,
848 Handle<SharedFunctionInfo> info,
849 Handle<String> source);
851 MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
852 Isolate* isolate, ScriptData* cached_data, Handle<String> source);
854 static const int kSourceObjectIndex = 0;
855 STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex);
857 static const int kCodeStubsBaseIndex = 1;
859 String* source() const {
860 DCHECK(!AllowHeapAllocation::IsAllowed());
864 const List<uint32_t>* stub_keys() const { return &stub_keys_; }
865 int num_internalized_strings() const { return num_internalized_strings_; }
868 CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source,
870 : Serializer(isolate, sink),
872 main_code_(main_code),
873 num_internalized_strings_(0) {
874 back_reference_map_.AddSourceString(source);
877 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
878 WhereToPoint where_to_point, int skip) OVERRIDE;
880 void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
881 WhereToPoint where_to_point);
882 void SerializeIC(Code* ic, HowToCode how_to_code,
883 WhereToPoint where_to_point);
884 void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
885 WhereToPoint where_to_point);
886 void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
887 WhereToPoint where_to_point);
888 int AddCodeStubKey(uint32_t stub_key);
890 DisallowHeapAllocation no_gc_;
893 int num_internalized_strings_;
894 List<uint32_t> stub_keys_;
895 DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
899 // Wrapper around reservation sizes and the serialization payload.
900 class SnapshotData : public SerializedData {
902 // Used when producing.
903 explicit SnapshotData(const Serializer& ser);
905 // Used when consuming.
906 explicit SnapshotData(const Vector<const byte> snapshot)
907 : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
911 Vector<const Reservation> Reservations() const;
912 Vector<const byte> Payload() const;
914 Vector<const byte> RawData() const {
915 return Vector<const byte>(data_, size_);
920 // The data header consists of uint32_t-sized entries:
922 // [1] number of reservation size entries
923 // [2] payload length
925 // ... serialized payload
926 static const int kCheckSumOffset = 0;
927 static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
928 static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
929 static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
933 // Wrapper around ScriptData to provide code-serializer-specific functionality.
934 class SerializedCodeData : public SerializedData {
936 // Used when consuming.
937 static SerializedCodeData* FromCachedData(ScriptData* cached_data,
940 // Used when producing.
941 SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
943 // Return ScriptData object and relinquish ownership over it to the caller.
944 ScriptData* GetScriptData();
946 Vector<const Reservation> Reservations() const;
947 Vector<const byte> Payload() const;
949 int NumInternalizedStrings() const;
950 Vector<const uint32_t> CodeStubKeys() const;
953 explicit SerializedCodeData(ScriptData* data);
955 enum SanityCheckResult {
957 MAGIC_NUMBER_MISMATCH = 1,
958 VERSION_MISMATCH = 2,
960 CPU_FEATURES_MISMATCH = 4,
962 CHECKSUM_MISMATCH = 6,
965 SanityCheckResult SanityCheck(String* source) const;
967 uint32_t SourceHash(String* source) const { return source->length(); }
969 static const uint32_t kMagicNumber = 0xC0D1F1ED;
971 // The data header consists of uint32_t-sized entries:
977 // [ 5] number of internalized strings
978 // [ 6] number of code stub keys
979 // [ 7] number of reservation size entries
980 // [ 8] payload length
981 // [ 9] payload checksum part 1
982 // [10] payload checksum part 2
984 // ... code stub keys
985 // ... serialized payload
986 static const int kMagicNumberOffset = 0;
987 static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
988 static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
989 static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
990 static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
991 static const int kNumInternalizedStringsOffset = kFlagHashOffset + kInt32Size;
992 static const int kNumReservationsOffset =
993 kNumInternalizedStringsOffset + kInt32Size;
994 static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
995 static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
996 static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
997 static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
998 static const int kHeaderSize = kChecksum2Offset + kInt32Size;
1000 } } // namespace v8::internal
1002 #endif // V8_SERIALIZE_H_