1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_SERIALIZE_H_
6 #define V8_SERIALIZE_H_
8 #include "src/compiler.h"
9 #include "src/hashmap.h"
10 #include "src/heap-profiler.h"
11 #include "src/isolate.h"
12 #include "src/snapshot-source-sink.h"
17 // A TypeCode is used to distinguish different kinds of external reference.
18 // It is a single bit to make testing for types easy.
20 UNCLASSIFIED, // One-of-a-kind references.
33 const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
34 const int kFirstTypeCode = UNCLASSIFIED;
36 const int kReferenceIdBits = 16;
37 const int kReferenceIdMask = (1 << kReferenceIdBits) - 1;
38 const int kReferenceTypeShift = kReferenceIdBits;
40 const int kDeoptTableSerializeEntryCount = 64;
42 // ExternalReferenceTable is a helper class that defines the relationship
43 // between external references and their encodings. It is used to build
44 // hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
45 class ExternalReferenceTable {
47 static ExternalReferenceTable* instance(Isolate* isolate);
49 ~ExternalReferenceTable() { }
51 int size() const { return refs_.length(); }
53 Address address(int i) { return refs_[i].address; }
55 uint32_t code(int i) { return refs_[i].code; }
57 const char* name(int i) { return refs_[i].name; }
59 int max_id(int code) { return max_id_[code]; }
62 explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) {
63 PopulateTable(isolate);
66 struct ExternalReferenceEntry {
72 void PopulateTable(Isolate* isolate);
74 // For a few types of references, we can get their address from their id.
75 void AddFromId(TypeCode type,
80 // For other types of references, the caller will figure out the address.
81 void Add(Address address, TypeCode type, uint16_t id, const char* name);
83 void Add(Address address, const char* name) {
84 Add(address, UNCLASSIFIED, ++max_id_[UNCLASSIFIED], name);
87 List<ExternalReferenceEntry> refs_;
88 uint16_t max_id_[kTypeCodeCount];
92 class ExternalReferenceEncoder {
94 explicit ExternalReferenceEncoder(Isolate* isolate);
96 uint32_t Encode(Address key) const;
98 const char* NameOfAddress(Address key) const;
102 static uint32_t Hash(Address key) {
103 return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2);
106 int IndexOf(Address key) const;
108 void Put(Address key, int index);
114 class ExternalReferenceDecoder {
116 explicit ExternalReferenceDecoder(Isolate* isolate);
117 ~ExternalReferenceDecoder();
119 Address Decode(uint32_t key) const {
120 if (key == 0) return NULL;
125 Address** encodings_;
127 Address* Lookup(uint32_t key) const {
128 int type = key >> kReferenceTypeShift;
129 DCHECK(kFirstTypeCode <= type && type < kTypeCodeCount);
130 int id = key & kReferenceIdMask;
131 return &encodings_[type][id];
134 void Put(uint32_t key, Address value) {
135 *Lookup(key) = value;
142 class AddressMapBase {
144 static void SetValue(HashMap::Entry* entry, uint32_t v) {
145 entry->value = reinterpret_cast<void*>(v);
148 static uint32_t GetValue(HashMap::Entry* entry) {
149 return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
152 static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
154 return map->Lookup(Key(obj), Hash(obj), insert);
158 static uint32_t Hash(HeapObject* obj) {
159 return static_cast<int32_t>(reinterpret_cast<intptr_t>(obj->address()));
162 static void* Key(HeapObject* obj) {
163 return reinterpret_cast<void*>(obj->address());
168 class RootIndexMap : public AddressMapBase {
170 explicit RootIndexMap(Isolate* isolate);
172 ~RootIndexMap() { delete map_; }
174 static const int kInvalidRootIndex = -1;
175 int Lookup(HeapObject* obj) {
176 HashMap::Entry* entry = LookupEntry(map_, obj, false);
177 if (entry) return GetValue(entry);
178 return kInvalidRootIndex;
184 DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
188 class BackReference {
190 explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
192 BackReference() : bitfield_(kInvalidValue) {}
194 static BackReference SourceReference() { return BackReference(kSourceValue); }
196 static BackReference LargeObjectReference(uint32_t index) {
197 return BackReference(SpaceBits::encode(LO_SPACE) |
198 ChunkOffsetBits::encode(index));
201 static BackReference Reference(AllocationSpace space, uint32_t chunk_index,
202 uint32_t chunk_offset) {
203 DCHECK(IsAligned(chunk_offset, kObjectAlignment));
204 DCHECK_NE(LO_SPACE, space);
205 return BackReference(
206 SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
207 ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
210 bool is_valid() const { return bitfield_ != kInvalidValue; }
211 bool is_source() const { return bitfield_ == kSourceValue; }
213 AllocationSpace space() const {
215 return SpaceBits::decode(bitfield_);
218 uint32_t chunk_offset() const {
220 return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
223 uint32_t chunk_index() const {
225 return ChunkIndexBits::decode(bitfield_);
228 uint32_t reference() const {
230 return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
233 uint32_t bitfield() const { return bitfield_; }
236 static const uint32_t kInvalidValue = 0xFFFFFFFF;
237 static const uint32_t kSourceValue = 0xFFFFFFFE;
238 static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
239 static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
242 static const int kMaxChunkIndex = (1 << kChunkIndexSize) - 1;
245 class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
247 : public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
249 : public BitField<AllocationSpace, ChunkIndexBits::kNext, kSpaceTagSize> {
256 // Mapping objects to their location after deserialization.
257 // This is used during building, but not at runtime by V8.
258 class BackReferenceMap : public AddressMapBase {
261 : no_allocation_(), map_(new HashMap(HashMap::PointersMatch)) {}
263 ~BackReferenceMap() { delete map_; }
265 BackReference Lookup(HeapObject* obj) {
266 HashMap::Entry* entry = LookupEntry(map_, obj, false);
267 return entry ? BackReference(GetValue(entry)) : BackReference();
270 void Add(HeapObject* obj, BackReference b) {
271 DCHECK(b.is_valid());
272 DCHECK_EQ(NULL, LookupEntry(map_, obj, false));
273 HashMap::Entry* entry = LookupEntry(map_, obj, true);
274 SetValue(entry, b.bitfield());
277 void AddSourceString(String* string) {
278 Add(string, BackReference::SourceReference());
282 DisallowHeapAllocation no_allocation_;
284 DISALLOW_COPY_AND_ASSIGN(BackReferenceMap);
288 // The Serializer/Deserializer class is a common superclass for Serializer and
289 // Deserializer which is used to store common constants and methods used by
291 class SerializerDeserializer: public ObjectVisitor {
293 static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
295 static int nop() { return kNop; }
297 // No reservation for large object space necessary.
298 static const int kNumberOfPreallocatedSpaces = LO_SPACE;
299 static const int kNumberOfSpaces = LAST_SPACE + 1;
302 // Where the pointed-to object can be found:
304 kNewObject = 0, // Object is next in snapshot.
305 // 1-7 One per space.
306 kRootArray = 0x9, // Object is found in root array.
307 kPartialSnapshotCache = 0xa, // Object is in the cache.
308 kExternalReference = 0xb, // Pointer to an external reference.
309 kSkip = 0xc, // Skip n bytes.
310 kBuiltin = 0xd, // Builtin code object.
311 kAttachedReference = 0xe, // Object is described in an attached list.
312 kNop = 0xf, // Does nothing, used to pad.
313 kBackref = 0x10, // Object is described relative to end.
314 // 0x11-0x17 One per space.
315 kBackrefWithSkip = 0x18, // Object is described relative to end.
316 // 0x19-0x1f One per space.
317 // 0x20-0x3f Used by misc. tags below.
318 kPointedToMask = 0x3f
321 // How to code the pointer to the object.
323 kPlain = 0, // Straight pointer.
324 // What this means depends on the architecture:
325 kFromCode = 0x40, // A pointer inlined in code.
326 kHowToCodeMask = 0x40
329 // For kRootArrayConstants
332 kHasSkipDistance = 0x40,
336 // Where to point within the object.
339 kInnerPointer = 0x80, // First insn in code object or payload of cell.
340 kWhereToPointMask = 0x80
344 // Raw data to be copied from the snapshot. This byte code does not advance
345 // the current pointer, which is used for code objects, where we write the
346 // entire code in one memcpy, then fix up stuff with kSkip and other byte
347 // codes that overwrite data.
348 static const int kRawData = 0x20;
349 // Some common raw lengths: 0x21-0x3f. These autoadvance the current pointer.
350 // A tag emitted at strategic points in the snapshot to delineate sections.
351 // If the deserializer does not find these at the expected moments then it
352 // is an indication that the snapshot and the VM do not fit together.
353 // Examine the build process for architecture, version or configuration
355 static const int kSynchronize = 0x70;
356 // Used for the source code of the natives, which is in the executable, but
357 // is referred to from external strings in the snapshot.
358 static const int kNativesStringResource = 0x71;
359 static const int kRepeat = 0x72;
360 static const int kConstantRepeat = 0x73;
361 // 0x73-0x7f Repeat last word (subtract 0x72 to get the count).
362 static const int kMaxRepeats = 0x7f - 0x72;
363 static int CodeForRepeats(int repeats) {
364 DCHECK(repeats >= 1 && repeats <= kMaxRepeats);
365 return 0x72 + repeats;
367 static int RepeatsForCode(int byte_code) {
368 DCHECK(byte_code >= kConstantRepeat && byte_code <= 0x7f);
369 return byte_code - 0x72;
371 static const int kRootArrayConstants = 0xa0;
372 // 0xa0-0xbf Things from the first 32 elements of the root array.
373 static const int kRootArrayNumberOfConstantEncodings = 0x20;
374 static int RootArrayConstantFromByteCode(int byte_code) {
375 return byte_code & 0x1f;
378 static const int kAnyOldSpace = -1;
380 // A bitmask for getting the space out of an instruction.
381 static const int kSpaceMask = 7;
382 STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
386 // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
387 class Deserializer: public SerializerDeserializer {
389 // Create a deserializer from a snapshot byte source.
390 explicit Deserializer(SnapshotByteSource* source);
392 virtual ~Deserializer();
394 // Deserialize the snapshot into an empty heap.
395 void Deserialize(Isolate* isolate);
397 enum OnOOM { FATAL_ON_OOM, NULL_ON_OOM };
399 // Deserialize a single object and the objects reachable from it.
400 // We may want to abort gracefully even if deserialization fails.
401 void DeserializePartial(Isolate* isolate, Object** root,
402 OnOOM on_oom = FATAL_ON_OOM);
404 void AddReservation(int space, uint32_t chunk) {
406 DCHECK(space < kNumberOfSpaces);
407 reservations_[space].Add({chunk, NULL, NULL});
410 void FlushICacheForNewCodeObjects();
412 // Serialized user code reference certain objects that are provided in a list
413 // By calling this method, we assume that we are deserializing user code.
414 void SetAttachedObjects(Vector<Handle<Object> >* attached_objects) {
415 attached_objects_ = attached_objects;
418 bool deserializing_user_code() { return attached_objects_ != NULL; }
421 virtual void VisitPointers(Object** start, Object** end);
423 virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
429 // Allocation sites are present in the snapshot, and must be linked into
430 // a list at deserialization time.
431 void RelinkAllocationSite(AllocationSite* site);
433 // Fills in some heap data in an area from start to end (non-inclusive). The
434 // space id is used for the write barrier. The object_address is the address
435 // of the object we are writing into, or NULL if we are not writing into an
436 // object, i.e. if we are writing a series of tagged values that are not on
438 void ReadData(Object** start, Object** end, int space,
439 Address object_address);
440 void ReadObject(int space_number, Object** write_back);
441 Address Allocate(int space_index, int size);
443 // Special handling for serialized code like hooking up internalized strings.
444 HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj);
445 Object* ProcessBackRefInSerializedCode(Object* obj);
447 // This returns the address of an object that has been described in the
448 // snapshot by chunk index and offset.
449 HeapObject* GetBackReferencedObject(int space) {
450 if (space == LO_SPACE) {
451 uint32_t index = source_->GetInt();
452 return deserialized_large_objects_[index];
454 BackReference back_reference(source_->GetInt());
455 DCHECK(space < kNumberOfPreallocatedSpaces);
456 uint32_t chunk_index = back_reference.chunk_index();
457 DCHECK_LE(chunk_index, current_chunk_[space]);
458 uint32_t chunk_offset = back_reference.chunk_offset();
459 return HeapObject::FromAddress(reservations_[space][chunk_index].start +
464 // Cached current isolate.
467 // Objects from the attached object descriptions in the serialized user code.
468 Vector<Handle<Object> >* attached_objects_;
470 SnapshotByteSource* source_;
471 // The address of the next object that will be allocated in each space.
472 // Each space has a number of chunks reserved by the GC, with each chunk
473 // fitting into a page. Deserialized objects are allocated into the
474 // current chunk of the target space by bumping up high water mark.
475 Heap::Reservation reservations_[kNumberOfSpaces];
476 uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
477 Address high_water_[kNumberOfPreallocatedSpaces];
479 ExternalReferenceDecoder* external_reference_decoder_;
481 List<HeapObject*> deserialized_large_objects_;
483 DISALLOW_COPY_AND_ASSIGN(Deserializer);
487 class CodeAddressMap;
489 // There can be only one serializer per V8 process.
490 class Serializer : public SerializerDeserializer {
492 Serializer(Isolate* isolate, SnapshotByteSink* sink);
494 virtual void VisitPointers(Object** start, Object** end) OVERRIDE;
496 void FinalizeAllocation();
498 Vector<const uint32_t> FinalAllocationChunks(int space) const {
499 if (space == LO_SPACE) {
500 return Vector<const uint32_t>(&large_objects_total_size_, 1);
502 DCHECK_EQ(0, pending_chunk_[space]); // No pending chunks.
503 return completed_chunks_[space].ToConstVector();
507 Isolate* isolate() const { return isolate_; }
509 BackReferenceMap* back_reference_map() { return &back_reference_map_; }
510 RootIndexMap* root_index_map() { return &root_index_map_; }
513 class ObjectSerializer : public ObjectVisitor {
515 ObjectSerializer(Serializer* serializer,
517 SnapshotByteSink* sink,
518 HowToCode how_to_code,
519 WhereToPoint where_to_point)
520 : serializer_(serializer),
521 object_(HeapObject::cast(o)),
523 reference_representation_(how_to_code + where_to_point),
524 bytes_processed_so_far_(0),
525 code_object_(o->IsCode()),
526 code_has_been_output_(false) { }
528 void VisitPointers(Object** start, Object** end);
529 void VisitEmbeddedPointer(RelocInfo* target);
530 void VisitExternalReference(Address* p);
531 void VisitExternalReference(RelocInfo* rinfo);
532 void VisitCodeTarget(RelocInfo* target);
533 void VisitCodeEntry(Address entry_address);
534 void VisitCell(RelocInfo* rinfo);
535 void VisitRuntimeEntry(RelocInfo* reloc);
536 // Used for seralizing the external strings that hold the natives source.
537 void VisitExternalOneByteString(
538 v8::String::ExternalOneByteStringResource** resource);
539 // We can't serialize a heap with external two byte strings.
540 void VisitExternalTwoByteString(
541 v8::String::ExternalStringResource** resource) {
546 void SerializePrologue(AllocationSpace space, int size, Map* map);
548 enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
549 // This function outputs or skips the raw data between the last pointer and
550 // up to the current position. It optionally can just return the number of
551 // bytes to skip instead of performing a skip instruction, in case the skip
552 // can be merged into the next instruction.
553 int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
554 // External strings are serialized in a way to resemble sequential strings.
555 void SerializeExternalString();
557 Serializer* serializer_;
559 SnapshotByteSink* sink_;
560 int reference_representation_;
561 int bytes_processed_so_far_;
563 bool code_has_been_output_;
566 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
567 WhereToPoint where_to_point, int skip) = 0;
569 void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
572 void SerializeBackReference(BackReference back_reference,
573 HowToCode how_to_code,
574 WhereToPoint where_to_point, int skip);
575 void InitializeAllocators();
576 // This will return the space for an object.
577 static AllocationSpace SpaceOfObject(HeapObject* object);
578 BackReference AllocateLargeObject(int size);
579 BackReference Allocate(AllocationSpace space, int size);
580 int EncodeExternalReference(Address addr) {
581 return external_reference_encoder_->Encode(addr);
584 // GetInt reads 4 bytes at once, requiring padding at the end.
587 // Some roots should not be serialized, because their actual value depends on
588 // absolute addresses and they are reset after deserialization, anyway.
589 bool ShouldBeSkipped(Object** current);
591 // We may not need the code address map for logging for every instance
592 // of the serializer. Initialize it on demand.
593 void InitializeCodeAddressMap();
595 inline uint32_t max_chunk_size(int space) const {
597 DCHECK_LT(space, kNumberOfSpaces);
598 return max_chunk_size_[space];
603 SnapshotByteSink* sink_;
604 ExternalReferenceEncoder* external_reference_encoder_;
606 BackReferenceMap back_reference_map_;
607 RootIndexMap root_index_map_;
609 friend class ObjectSerializer;
610 friend class Deserializer;
613 CodeAddressMap* code_address_map_;
614 // Objects from the same space are put into chunks for bulk-allocation
615 // when deserializing. We have to make sure that each chunk fits into a
616 // page. So we track the chunk size in pending_chunk_ of a space, but
617 // when it exceeds a page, we complete the current chunk and start a new one.
618 uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
619 List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
620 uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
622 // We map serialized large objects to indexes for back-referencing.
623 uint32_t large_objects_total_size_;
624 uint32_t seen_large_objects_index_;
626 DISALLOW_COPY_AND_ASSIGN(Serializer);
630 class PartialSerializer : public Serializer {
632 PartialSerializer(Isolate* isolate,
633 Serializer* startup_snapshot_serializer,
634 SnapshotByteSink* sink)
635 : Serializer(isolate, sink),
636 startup_serializer_(startup_snapshot_serializer) {
637 InitializeCodeAddressMap();
640 // Serialize the objects reachable from a single object pointer.
641 void Serialize(Object** o);
642 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
643 WhereToPoint where_to_point, int skip) OVERRIDE;
646 int PartialSnapshotCacheIndex(HeapObject* o);
647 bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
648 // Scripts should be referred only through shared function infos. We can't
649 // allow them to be part of the partial snapshot because they contain a
650 // unique ID, and deserializing several partial snapshots containing script
651 // would cause dupes.
652 DCHECK(!o->IsScript());
653 return o->IsName() || o->IsSharedFunctionInfo() ||
654 o->IsHeapNumber() || o->IsCode() ||
657 startup_serializer_->isolate()->heap()->fixed_cow_array_map();
661 Serializer* startup_serializer_;
662 DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
666 class StartupSerializer : public Serializer {
668 StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
669 : Serializer(isolate, sink), root_index_wave_front_(0) {
670 // Clear the cache of objects used by the partial snapshot. After the
671 // strong roots have been serialized we can create a partial snapshot
672 // which will repopulate the cache with objects needed by that partial
674 isolate->set_serialize_partial_snapshot_cache_length(0);
675 InitializeCodeAddressMap();
678 // The StartupSerializer has to serialize the root array, which is slightly
680 virtual void VisitPointers(Object** start, Object** end) OVERRIDE;
682 // Serialize the current state of the heap. The order is:
683 // 1) Strong references.
684 // 2) Partial snapshot cache.
685 // 3) Weak references (e.g. the string table).
686 virtual void SerializeStrongReferences();
687 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
688 WhereToPoint where_to_point, int skip) OVERRIDE;
689 void SerializeWeakReferences();
691 SerializeStrongReferences();
692 SerializeWeakReferences();
697 intptr_t root_index_wave_front_;
698 DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
702 class CodeSerializer : public Serializer {
704 static ScriptData* Serialize(Isolate* isolate,
705 Handle<SharedFunctionInfo> info,
706 Handle<String> source);
708 MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
709 Isolate* isolate, ScriptData* data, Handle<String> source);
711 static const int kSourceObjectIndex = 0;
712 static const int kCodeStubsBaseIndex = 1;
714 String* source() const {
715 DCHECK(!AllowHeapAllocation::IsAllowed());
719 List<uint32_t>* stub_keys() { return &stub_keys_; }
720 int num_internalized_strings() const { return num_internalized_strings_; }
723 CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source,
725 : Serializer(isolate, sink),
727 main_code_(main_code),
728 num_internalized_strings_(0) {
729 back_reference_map_.AddSourceString(source);
732 virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
733 WhereToPoint where_to_point, int skip) OVERRIDE;
735 void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
736 WhereToPoint where_to_point);
737 void SerializeIC(Code* ic, HowToCode how_to_code,
738 WhereToPoint where_to_point);
739 void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
740 WhereToPoint where_to_point);
741 void SerializeSourceObject(HowToCode how_to_code,
742 WhereToPoint where_to_point);
743 void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
744 WhereToPoint where_to_point);
745 int AddCodeStubKey(uint32_t stub_key);
747 DisallowHeapAllocation no_gc_;
750 int num_internalized_strings_;
751 List<uint32_t> stub_keys_;
752 DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
756 // Wrapper around ScriptData to provide code-serializer-specific functionality.
757 class SerializedCodeData {
759 // Used by when consuming.
760 explicit SerializedCodeData(ScriptData* data, String* source)
761 : script_data_(data), owns_script_data_(false) {
762 DisallowHeapAllocation no_gc;
763 CHECK(IsSane(source));
766 // Used when producing.
767 SerializedCodeData(const List<byte>& payload, CodeSerializer* cs);
769 ~SerializedCodeData() {
770 if (owns_script_data_) delete script_data_;
773 // Return ScriptData object and relinquish ownership over it to the caller.
774 ScriptData* GetScriptData() {
775 ScriptData* result = script_data_;
777 DCHECK(owns_script_data_);
778 owns_script_data_ = false;
784 uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation); }
785 bool is_last_chunk() const { return IsLastChunkBits::decode(reservation); }
788 uint32_t reservation;
790 DISALLOW_COPY_AND_ASSIGN(Reservation);
793 int NumInternalizedStrings() const {
794 return GetHeaderValue(kNumInternalizedStringsOffset);
797 Vector<const Reservation> Reservations() const {
798 return Vector<const Reservation>(reinterpret_cast<const Reservation*>(
799 script_data_->data() + kHeaderSize),
800 GetHeaderValue(kReservationsOffset));
803 Vector<const uint32_t> CodeStubKeys() const {
804 int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
805 const byte* start = script_data_->data() + kHeaderSize + reservations_size;
806 return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
807 GetHeaderValue(kNumCodeStubKeysOffset));
810 const byte* Payload() const {
811 int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
812 int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
813 return script_data_->data() + kHeaderSize + reservations_size +
817 int PayloadLength() const {
818 int payload_length = GetHeaderValue(kPayloadLengthOffset);
819 DCHECK_EQ(script_data_->data() + script_data_->length(),
820 Payload() + payload_length);
821 return payload_length;
825 void SetHeaderValue(int offset, int value) {
826 reinterpret_cast<int*>(const_cast<byte*>(script_data_->data()))[offset] =
830 int GetHeaderValue(int offset) const {
831 return reinterpret_cast<const int*>(script_data_->data())[offset];
834 bool IsSane(String* source);
836 int CheckSum(String* source);
838 // The data header consists of int-sized entries:
840 // [1] number of internalized strings
841 // [2] number of code stub keys
842 // [3] payload length
843 // [4..10] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE.
844 static const int kCheckSumOffset = 0;
845 static const int kNumInternalizedStringsOffset = 1;
846 static const int kReservationsOffset = 2;
847 static const int kNumCodeStubKeysOffset = 3;
848 static const int kPayloadLengthOffset = 4;
849 static const int kHeaderSize = (kPayloadLengthOffset + 1) * kIntSize;
851 class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
852 class IsLastChunkBits : public BitField<bool, 31, 1> {};
854 // Following the header, we store, in sequential order
856 // - serialization payload
858 ScriptData* script_data_;
859 bool owns_script_data_;
861 } } // namespace v8::internal
863 #endif // V8_SERIALIZE_H_