test: remove obsolete harmony flags
[platform/upstream/nodejs.git] / deps / v8 / src / serialize.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/base/platform/platform.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/compiler.h"
13 #include "src/deoptimizer.h"
14 #include "src/execution.h"
15 #include "src/global-handles.h"
16 #include "src/ic/ic.h"
17 #include "src/ic/stub-cache.h"
18 #include "src/natives.h"
19 #include "src/objects.h"
20 #include "src/runtime/runtime.h"
21 #include "src/serialize.h"
22 #include "src/snapshot.h"
23 #include "src/snapshot-source-sink.h"
24 #include "src/v8threads.h"
25 #include "src/version.h"
26
27 namespace v8 {
28 namespace internal {
29
30
31 // -----------------------------------------------------------------------------
32 // Coding of external references.
33
34 // The encoding of an external reference. The type is in the high word.
35 // The id is in the low word.
36 static uint32_t EncodeExternal(TypeCode type, uint16_t id) {
37   return static_cast<uint32_t>(type) << 16 | id;
38 }
39
40
41 static int* GetInternalPointer(StatsCounter* counter) {
42   // All counters refer to dummy_counter, if deserializing happens without
43   // setting up counters.
44   static int dummy_counter = 0;
45   return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
46 }
47
48
49 ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
50   ExternalReferenceTable* external_reference_table =
51       isolate->external_reference_table();
52   if (external_reference_table == NULL) {
53     external_reference_table = new ExternalReferenceTable(isolate);
54     isolate->set_external_reference_table(external_reference_table);
55   }
56   return external_reference_table;
57 }
58
59
60 void ExternalReferenceTable::AddFromId(TypeCode type,
61                                        uint16_t id,
62                                        const char* name,
63                                        Isolate* isolate) {
64   Address address;
65   switch (type) {
66     case C_BUILTIN: {
67       ExternalReference ref(static_cast<Builtins::CFunctionId>(id), isolate);
68       address = ref.address();
69       break;
70     }
71     case BUILTIN: {
72       ExternalReference ref(static_cast<Builtins::Name>(id), isolate);
73       address = ref.address();
74       break;
75     }
76     case RUNTIME_FUNCTION: {
77       ExternalReference ref(static_cast<Runtime::FunctionId>(id), isolate);
78       address = ref.address();
79       break;
80     }
81     case IC_UTILITY: {
82       ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)),
83                             isolate);
84       address = ref.address();
85       break;
86     }
87     default:
88       UNREACHABLE();
89       return;
90   }
91   Add(address, type, id, name);
92 }
93
94
95 void ExternalReferenceTable::Add(Address address,
96                                  TypeCode type,
97                                  uint16_t id,
98                                  const char* name) {
99   DCHECK_NOT_NULL(address);
100   ExternalReferenceEntry entry;
101   entry.address = address;
102   entry.code = EncodeExternal(type, id);
103   entry.name = name;
104   DCHECK_NE(0u, entry.code);
105   // Assert that the code is added in ascending order to rule out duplicates.
106   DCHECK((size() == 0) || (code(size() - 1) < entry.code));
107   refs_.Add(entry);
108   if (id > max_id_[type]) max_id_[type] = id;
109 }
110
111
112 void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
113   for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
114     max_id_[type_code] = 0;
115   }
116
117   // Miscellaneous
118   Add(ExternalReference::roots_array_start(isolate).address(),
119       "Heap::roots_array_start()");
120   Add(ExternalReference::address_of_stack_limit(isolate).address(),
121       "StackGuard::address_of_jslimit()");
122   Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
123       "StackGuard::address_of_real_jslimit()");
124   Add(ExternalReference::new_space_start(isolate).address(),
125       "Heap::NewSpaceStart()");
126   Add(ExternalReference::new_space_mask(isolate).address(),
127       "Heap::NewSpaceMask()");
128   Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
129       "Heap::NewSpaceAllocationLimitAddress()");
130   Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
131       "Heap::NewSpaceAllocationTopAddress()");
132   Add(ExternalReference::debug_break(isolate).address(), "Debug::Break()");
133   Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
134       "Debug::step_in_fp_addr()");
135   Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
136       "mod_two_doubles");
137   // Keyed lookup cache.
138   Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
139       "KeyedLookupCache::keys()");
140   Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
141       "KeyedLookupCache::field_offsets()");
142   Add(ExternalReference::handle_scope_next_address(isolate).address(),
143       "HandleScope::next");
144   Add(ExternalReference::handle_scope_limit_address(isolate).address(),
145       "HandleScope::limit");
146   Add(ExternalReference::handle_scope_level_address(isolate).address(),
147       "HandleScope::level");
148   Add(ExternalReference::new_deoptimizer_function(isolate).address(),
149       "Deoptimizer::New()");
150   Add(ExternalReference::compute_output_frames_function(isolate).address(),
151       "Deoptimizer::ComputeOutputFrames()");
152   Add(ExternalReference::address_of_min_int().address(),
153       "LDoubleConstant::min_int");
154   Add(ExternalReference::address_of_one_half().address(),
155       "LDoubleConstant::one_half");
156   Add(ExternalReference::isolate_address(isolate).address(), "isolate");
157   Add(ExternalReference::address_of_negative_infinity().address(),
158       "LDoubleConstant::negative_infinity");
159   Add(ExternalReference::power_double_double_function(isolate).address(),
160       "power_double_double_function");
161   Add(ExternalReference::power_double_int_function(isolate).address(),
162       "power_double_int_function");
163   Add(ExternalReference::math_log_double_function(isolate).address(),
164       "std::log");
165   Add(ExternalReference::store_buffer_top(isolate).address(),
166       "store_buffer_top");
167   Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
168   Add(ExternalReference::get_date_field_function(isolate).address(),
169       "JSDate::GetField");
170   Add(ExternalReference::date_cache_stamp(isolate).address(),
171       "date_cache_stamp");
172   Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
173       "address_of_pending_message_obj");
174   Add(ExternalReference::address_of_has_pending_message(isolate).address(),
175       "address_of_has_pending_message");
176   Add(ExternalReference::address_of_pending_message_script(isolate).address(),
177       "pending_message_script");
178   Add(ExternalReference::get_make_code_young_function(isolate).address(),
179       "Code::MakeCodeYoung");
180   Add(ExternalReference::cpu_features().address(), "cpu_features");
181   Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
182       "Runtime::AllocateInNewSpace");
183   Add(ExternalReference(Runtime::kAllocateInTargetSpace, isolate).address(),
184       "Runtime::AllocateInTargetSpace");
185   Add(ExternalReference::old_pointer_space_allocation_top_address(isolate)
186           .address(),
187       "Heap::OldPointerSpaceAllocationTopAddress");
188   Add(ExternalReference::old_pointer_space_allocation_limit_address(isolate)
189           .address(),
190       "Heap::OldPointerSpaceAllocationLimitAddress");
191   Add(ExternalReference::old_data_space_allocation_top_address(isolate)
192           .address(),
193       "Heap::OldDataSpaceAllocationTopAddress");
194   Add(ExternalReference::old_data_space_allocation_limit_address(isolate)
195           .address(),
196       "Heap::OldDataSpaceAllocationLimitAddress");
197   Add(ExternalReference::allocation_sites_list_address(isolate).address(),
198       "Heap::allocation_sites_list_address()");
199   Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias");
200   Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
201       "Code::MarkCodeAsExecuted");
202   Add(ExternalReference::is_profiling_address(isolate).address(),
203       "CpuProfiler::is_profiling");
204   Add(ExternalReference::scheduled_exception_address(isolate).address(),
205       "Isolate::scheduled_exception");
206   Add(ExternalReference::invoke_function_callback(isolate).address(),
207       "InvokeFunctionCallback");
208   Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
209       "InvokeAccessorGetterCallback");
210   Add(ExternalReference::flush_icache_function(isolate).address(),
211       "CpuFeatures::FlushICache");
212   Add(ExternalReference::log_enter_external_function(isolate).address(),
213       "Logger::EnterExternal");
214   Add(ExternalReference::log_leave_external_function(isolate).address(),
215       "Logger::LeaveExternal");
216   Add(ExternalReference::address_of_minus_one_half().address(),
217       "double_constants.minus_one_half");
218   Add(ExternalReference::stress_deopt_count(isolate).address(),
219       "Isolate::stress_deopt_count_address()");
220   Add(ExternalReference::incremental_marking_record_write_function(isolate)
221           .address(),
222       "IncrementalMarking::RecordWriteFromCode");
223
224   // Debug addresses
225   Add(ExternalReference::debug_after_break_target_address(isolate).address(),
226       "Debug::after_break_target_address()");
227   Add(ExternalReference::debug_restarter_frame_function_pointer_address(isolate)
228           .address(),
229       "Debug::restarter_frame_function_pointer_address()");
230   Add(ExternalReference::debug_is_active_address(isolate).address(),
231       "Debug::is_active_address()");
232
233 #ifndef V8_INTERPRETED_REGEXP
234   Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
235       "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
236   Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
237       "RegExpMacroAssembler*::CheckStackGuardState()");
238   Add(ExternalReference::re_grow_stack(isolate).address(),
239       "NativeRegExpMacroAssembler::GrowStack()");
240   Add(ExternalReference::re_word_character_map().address(),
241       "NativeRegExpMacroAssembler::word_character_map");
242   Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
243       "RegExpStack::limit_address()");
244   Add(ExternalReference::address_of_regexp_stack_memory_address(isolate)
245           .address(),
246       "RegExpStack::memory_address()");
247   Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
248       "RegExpStack::memory_size()");
249   Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
250       "OffsetsVector::static_offsets_vector");
251 #endif  // V8_INTERPRETED_REGEXP
252
253   // The following populates all of the different type of external references
254   // into the ExternalReferenceTable.
255   //
256   // NOTE: This function was originally 100k of code.  It has since been
257   // rewritten to be mostly table driven, as the callback macro style tends to
258   // very easily cause code bloat.  Please be careful in the future when adding
259   // new references.
260
261   struct RefTableEntry {
262     TypeCode type;
263     uint16_t id;
264     const char* name;
265   };
266
267   static const RefTableEntry ref_table[] = {
268   // Builtins
269 #define DEF_ENTRY_C(name, ignored) \
270   { C_BUILTIN, \
271     Builtins::c_##name, \
272     "Builtins::" #name },
273
274   BUILTIN_LIST_C(DEF_ENTRY_C)
275 #undef DEF_ENTRY_C
276
277 #define DEF_ENTRY_C(name, ignored) \
278   { BUILTIN, \
279     Builtins::k##name, \
280     "Builtins::" #name },
281 #define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored)
282
283   BUILTIN_LIST_C(DEF_ENTRY_C)
284   BUILTIN_LIST_A(DEF_ENTRY_A)
285   BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
286 #undef DEF_ENTRY_C
287 #undef DEF_ENTRY_A
288
289   // Runtime functions
290 #define RUNTIME_ENTRY(name, nargs, ressize) \
291   { RUNTIME_FUNCTION, \
292     Runtime::k##name, \
293     "Runtime::" #name },
294
295   RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
296   INLINE_OPTIMIZED_FUNCTION_LIST(RUNTIME_ENTRY)
297 #undef RUNTIME_ENTRY
298
299 #define INLINE_OPTIMIZED_ENTRY(name, nargs, ressize) \
300   { RUNTIME_FUNCTION, \
301     Runtime::kInlineOptimized##name, \
302     "Runtime::" #name },
303
304   INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_OPTIMIZED_ENTRY)
305 #undef INLINE_OPTIMIZED_ENTRY
306
307   // IC utilities
308 #define IC_ENTRY(name) \
309   { IC_UTILITY, \
310     IC::k##name, \
311     "IC::" #name },
312
313   IC_UTIL_LIST(IC_ENTRY)
314 #undef IC_ENTRY
315   };  // end of ref_table[].
316
317   for (size_t i = 0; i < arraysize(ref_table); ++i) {
318     AddFromId(ref_table[i].type,
319               ref_table[i].id,
320               ref_table[i].name,
321               isolate);
322   }
323
324   // Stat counters
325   struct StatsRefTableEntry {
326     StatsCounter* (Counters::*counter)();
327     uint16_t id;
328     const char* name;
329   };
330
331   const StatsRefTableEntry stats_ref_table[] = {
332 #define COUNTER_ENTRY(name, caption) \
333   { &Counters::name,    \
334     Counters::k_##name, \
335     "Counters::" #name },
336
337   STATS_COUNTER_LIST_1(COUNTER_ENTRY)
338   STATS_COUNTER_LIST_2(COUNTER_ENTRY)
339 #undef COUNTER_ENTRY
340   };  // end of stats_ref_table[].
341
342   Counters* counters = isolate->counters();
343   for (size_t i = 0; i < arraysize(stats_ref_table); ++i) {
344     Add(reinterpret_cast<Address>(GetInternalPointer(
345             (counters->*(stats_ref_table[i].counter))())),
346         STATS_COUNTER,
347         stats_ref_table[i].id,
348         stats_ref_table[i].name);
349   }
350
351   // Top addresses
352
353   const char* AddressNames[] = {
354 #define BUILD_NAME_LITERAL(CamelName, hacker_name)      \
355     "Isolate::" #hacker_name "_address",
356     FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL)
357     NULL
358 #undef BUILD_NAME_LITERAL
359   };
360
361   for (uint16_t i = 0; i < Isolate::kIsolateAddressCount; ++i) {
362     Add(isolate->get_address_from_id((Isolate::AddressId)i),
363         TOP_ADDRESS, i, AddressNames[i]);
364   }
365
366   // Accessors
367 #define ACCESSOR_INFO_DECLARATION(name)                          \
368   Add(FUNCTION_ADDR(&Accessors::name##Getter), ACCESSOR_CODE,    \
369       Accessors::k##name##Getter, "Accessors::" #name "Getter"); \
370   Add(FUNCTION_ADDR(&Accessors::name##Setter), ACCESSOR_CODE,    \
371       Accessors::k##name##Setter, "Accessors::" #name "Setter");
372   ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
373 #undef ACCESSOR_INFO_DECLARATION
374
375   StubCache* stub_cache = isolate->stub_cache();
376
377   // Stub cache tables
378   Add(stub_cache->key_reference(StubCache::kPrimary).address(),
379       STUB_CACHE_TABLE, 1, "StubCache::primary_->key");
380   Add(stub_cache->value_reference(StubCache::kPrimary).address(),
381       STUB_CACHE_TABLE, 2, "StubCache::primary_->value");
382   Add(stub_cache->map_reference(StubCache::kPrimary).address(),
383       STUB_CACHE_TABLE, 3, "StubCache::primary_->map");
384   Add(stub_cache->key_reference(StubCache::kSecondary).address(),
385       STUB_CACHE_TABLE, 4, "StubCache::secondary_->key");
386   Add(stub_cache->value_reference(StubCache::kSecondary).address(),
387       STUB_CACHE_TABLE, 5, "StubCache::secondary_->value");
388   Add(stub_cache->map_reference(StubCache::kSecondary).address(),
389       STUB_CACHE_TABLE, 6, "StubCache::secondary_->map");
390
391   // Runtime entries
392   Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
393       RUNTIME_ENTRY, 1, "HandleScope::DeleteExtensions");
394   Add(ExternalReference::incremental_marking_record_write_function(isolate)
395           .address(),
396       RUNTIME_ENTRY, 2, "IncrementalMarking::RecordWrite");
397   Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
398       RUNTIME_ENTRY, 3, "StoreBuffer::StoreBufferOverflow");
399
400   // Add a small set of deopt entry addresses to encoder without generating the
401   // deopt table code, which isn't possible at deserialization time.
402   HandleScope scope(isolate);
403   for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
404     Address address = Deoptimizer::GetDeoptimizationEntry(
405         isolate,
406         entry,
407         Deoptimizer::LAZY,
408         Deoptimizer::CALCULATE_ENTRY_ADDRESS);
409     Add(address, LAZY_DEOPTIMIZATION, entry, "lazy_deopt");
410   }
411 }
412
413
414 ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate)
415     : encodings_(HashMap::PointersMatch),
416       isolate_(isolate) {
417   ExternalReferenceTable* external_references =
418       ExternalReferenceTable::instance(isolate_);
419   for (int i = 0; i < external_references->size(); ++i) {
420     Put(external_references->address(i), i);
421   }
422 }
423
424
425 uint32_t ExternalReferenceEncoder::Encode(Address key) const {
426   int index = IndexOf(key);
427   DCHECK(key == NULL || index >= 0);
428   return index >= 0 ?
429          ExternalReferenceTable::instance(isolate_)->code(index) : 0;
430 }
431
432
433 const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
434   int index = IndexOf(key);
435   return index >= 0 ? ExternalReferenceTable::instance(isolate_)->name(index)
436                     : "<unknown>";
437 }
438
439
440 int ExternalReferenceEncoder::IndexOf(Address key) const {
441   if (key == NULL) return -1;
442   HashMap::Entry* entry =
443       const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false);
444   return entry == NULL
445       ? -1
446       : static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
447 }
448
449
450 void ExternalReferenceEncoder::Put(Address key, int index) {
451   HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
452   entry->value = reinterpret_cast<void*>(index);
453 }
454
455
456 ExternalReferenceDecoder::ExternalReferenceDecoder(Isolate* isolate)
457     : encodings_(NewArray<Address*>(kTypeCodeCount)),
458       isolate_(isolate) {
459   ExternalReferenceTable* external_references =
460       ExternalReferenceTable::instance(isolate_);
461   for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
462     int max = external_references->max_id(type) + 1;
463     encodings_[type] = NewArray<Address>(max + 1);
464   }
465   for (int i = 0; i < external_references->size(); ++i) {
466     Put(external_references->code(i), external_references->address(i));
467   }
468 }
469
470
471 ExternalReferenceDecoder::~ExternalReferenceDecoder() {
472   for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
473     DeleteArray(encodings_[type]);
474   }
475   DeleteArray(encodings_);
476 }
477
478
479 RootIndexMap::RootIndexMap(Isolate* isolate) {
480   map_ = new HashMap(HashMap::PointersMatch);
481   Object** root_array = isolate->heap()->roots_array_start();
482   for (int i = 0; i < Heap::kStrongRootListLength; i++) {
483     Object* root = root_array[i];
484     if (root->IsHeapObject() && !isolate->heap()->InNewSpace(root)) {
485       HeapObject* heap_object = HeapObject::cast(root);
486       if (LookupEntry(map_, heap_object, false) != NULL) {
487         // Some root values are initialized to the empty FixedArray();
488         // Do not add them to the map.
489         // TODO(yangguo): This assert is not true. Some roots like
490         // instanceof_cache_answer can be e.g. null.
491         // DCHECK_EQ(isolate->heap()->empty_fixed_array(), heap_object);
492       } else {
493         SetValue(LookupEntry(map_, heap_object, true), i);
494       }
495     }
496   }
497 }
498
499
500 class CodeAddressMap: public CodeEventLogger {
501  public:
502   explicit CodeAddressMap(Isolate* isolate)
503       : isolate_(isolate) {
504     isolate->logger()->addCodeEventListener(this);
505   }
506
507   virtual ~CodeAddressMap() {
508     isolate_->logger()->removeCodeEventListener(this);
509   }
510
511   virtual void CodeMoveEvent(Address from, Address to) {
512     address_to_name_map_.Move(from, to);
513   }
514
515   virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
516   }
517
518   virtual void CodeDeleteEvent(Address from) {
519     address_to_name_map_.Remove(from);
520   }
521
522   const char* Lookup(Address address) {
523     return address_to_name_map_.Lookup(address);
524   }
525
526  private:
527   class NameMap {
528    public:
529     NameMap() : impl_(HashMap::PointersMatch) {}
530
531     ~NameMap() {
532       for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
533         DeleteArray(static_cast<const char*>(p->value));
534       }
535     }
536
537     void Insert(Address code_address, const char* name, int name_size) {
538       HashMap::Entry* entry = FindOrCreateEntry(code_address);
539       if (entry->value == NULL) {
540         entry->value = CopyName(name, name_size);
541       }
542     }
543
544     const char* Lookup(Address code_address) {
545       HashMap::Entry* entry = FindEntry(code_address);
546       return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
547     }
548
549     void Remove(Address code_address) {
550       HashMap::Entry* entry = FindEntry(code_address);
551       if (entry != NULL) {
552         DeleteArray(static_cast<char*>(entry->value));
553         RemoveEntry(entry);
554       }
555     }
556
557     void Move(Address from, Address to) {
558       if (from == to) return;
559       HashMap::Entry* from_entry = FindEntry(from);
560       DCHECK(from_entry != NULL);
561       void* value = from_entry->value;
562       RemoveEntry(from_entry);
563       HashMap::Entry* to_entry = FindOrCreateEntry(to);
564       DCHECK(to_entry->value == NULL);
565       to_entry->value = value;
566     }
567
568    private:
569     static char* CopyName(const char* name, int name_size) {
570       char* result = NewArray<char>(name_size + 1);
571       for (int i = 0; i < name_size; ++i) {
572         char c = name[i];
573         if (c == '\0') c = ' ';
574         result[i] = c;
575       }
576       result[name_size] = '\0';
577       return result;
578     }
579
580     HashMap::Entry* FindOrCreateEntry(Address code_address) {
581       return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
582     }
583
584     HashMap::Entry* FindEntry(Address code_address) {
585       return impl_.Lookup(code_address,
586                           ComputePointerHash(code_address),
587                           false);
588     }
589
590     void RemoveEntry(HashMap::Entry* entry) {
591       impl_.Remove(entry->key, entry->hash);
592     }
593
594     HashMap impl_;
595
596     DISALLOW_COPY_AND_ASSIGN(NameMap);
597   };
598
599   virtual void LogRecordedBuffer(Code* code,
600                                  SharedFunctionInfo*,
601                                  const char* name,
602                                  int length) {
603     address_to_name_map_.Insert(code->address(), name, length);
604   }
605
606   NameMap address_to_name_map_;
607   Isolate* isolate_;
608 };
609
610
611 void Deserializer::DecodeReservation(
612     Vector<const SerializedData::Reservation> res) {
613   DCHECK_EQ(0, reservations_[NEW_SPACE].length());
614   STATIC_ASSERT(NEW_SPACE == 0);
615   int current_space = NEW_SPACE;
616   for (int i = 0; i < res.length(); i++) {
617     SerializedData::Reservation r(0);
618     memcpy(&r, res.start() + i, sizeof(r));
619     reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
620     if (r.is_last()) current_space++;
621   }
622   DCHECK_EQ(kNumberOfSpaces, current_space);
623   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
624 }
625
626
627 void Deserializer::FlushICacheForNewCodeObjects() {
628   PageIterator it(isolate_->heap()->code_space());
629   while (it.has_next()) {
630     Page* p = it.next();
631     CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start());
632   }
633 }
634
635
636 bool Deserializer::ReserveSpace() {
637 #ifdef DEBUG
638   for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
639     CHECK(reservations_[i].length() > 0);
640   }
641 #endif  // DEBUG
642   if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
643   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
644     high_water_[i] = reservations_[i][0].start;
645   }
646   return true;
647 }
648
649
650 void Deserializer::Initialize(Isolate* isolate) {
651   DCHECK_NULL(isolate_);
652   DCHECK_NOT_NULL(isolate);
653   isolate_ = isolate;
654   DCHECK_NULL(external_reference_decoder_);
655   external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
656 }
657
658
659 void Deserializer::Deserialize(Isolate* isolate) {
660   Initialize(isolate);
661   if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context");
662   // No active threads.
663   DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
664   // No active handles.
665   DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
666   isolate_->heap()->IterateSmiRoots(this);
667   isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
668   isolate_->heap()->RepairFreeListsAfterDeserialization();
669   isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
670
671   isolate_->heap()->set_native_contexts_list(
672       isolate_->heap()->undefined_value());
673   isolate_->heap()->set_array_buffers_list(
674       isolate_->heap()->undefined_value());
675
676   // The allocation site list is build during root iteration, but if no sites
677   // were encountered then it needs to be initialized to undefined.
678   if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
679     isolate_->heap()->set_allocation_sites_list(
680         isolate_->heap()->undefined_value());
681   }
682
683   // Update data pointers to the external strings containing natives sources.
684   for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
685     Object* source = isolate_->heap()->natives_source_cache()->get(i);
686     if (!source->IsUndefined()) {
687       ExternalOneByteString::cast(source)->update_data_cache();
688     }
689   }
690
691   FlushICacheForNewCodeObjects();
692
693   // Issue code events for newly deserialized code objects.
694   LOG_CODE_EVENT(isolate_, LogCodeObjects());
695   LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
696 }
697
698
699 MaybeHandle<Object> Deserializer::DeserializePartial(
700     Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
701     Handle<FixedArray>* outdated_contexts_out) {
702   Initialize(isolate);
703   if (!ReserveSpace()) {
704     V8::FatalProcessOutOfMemory("deserialize context");
705     return MaybeHandle<Object>();
706   }
707
708   Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1);
709   attached_objects[kGlobalProxyReference] = global_proxy;
710   SetAttachedObjects(attached_objects);
711
712   DisallowHeapAllocation no_gc;
713   // Keep track of the code space start and end pointers in case new
714   // code objects were unserialized
715   OldSpace* code_space = isolate_->heap()->code_space();
716   Address start_address = code_space->top();
717   Object* root;
718   Object* outdated_contexts;
719   VisitPointer(&root);
720   VisitPointer(&outdated_contexts);
721
722   // There's no code deserialized here. If this assert fires
723   // then that's changed and logging should be added to notify
724   // the profiler et al of the new code.
725   CHECK_EQ(start_address, code_space->top());
726   CHECK(outdated_contexts->IsFixedArray());
727   *outdated_contexts_out =
728       Handle<FixedArray>(FixedArray::cast(outdated_contexts), isolate);
729   return Handle<Object>(root, isolate);
730 }
731
732
733 MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
734     Isolate* isolate) {
735   Initialize(isolate);
736   if (!ReserveSpace()) {
737     return Handle<SharedFunctionInfo>();
738   } else {
739     deserializing_user_code_ = true;
740     DisallowHeapAllocation no_gc;
741     Object* root;
742     VisitPointer(&root);
743     return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
744   }
745 }
746
747
748 Deserializer::~Deserializer() {
749   // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
750   // DCHECK(source_.AtEOF());
751   if (external_reference_decoder_) {
752     delete external_reference_decoder_;
753     external_reference_decoder_ = NULL;
754   }
755   attached_objects_.Dispose();
756 }
757
758
759 // This is called on the roots.  It is the driver of the deserialization
760 // process.  It is also called on the body of each function.
761 void Deserializer::VisitPointers(Object** start, Object** end) {
762   // The space must be new space.  Any other space would cause ReadChunk to try
763   // to update the remembered using NULL as the address.
764   ReadData(start, end, NEW_SPACE, NULL);
765 }
766
767
768 void Deserializer::RelinkAllocationSite(AllocationSite* site) {
769   if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
770     site->set_weak_next(isolate_->heap()->undefined_value());
771   } else {
772     site->set_weak_next(isolate_->heap()->allocation_sites_list());
773   }
774   isolate_->heap()->set_allocation_sites_list(site);
775 }
776
777
778 // Used to insert a deserialized internalized string into the string table.
779 class StringTableInsertionKey : public HashTableKey {
780  public:
781   explicit StringTableInsertionKey(String* string)
782       : string_(string), hash_(HashForObject(string)) {
783     DCHECK(string->IsInternalizedString());
784   }
785
786   bool IsMatch(Object* string) OVERRIDE {
787     // We know that all entries in a hash table had their hash keys created.
788     // Use that knowledge to have fast failure.
789     if (hash_ != HashForObject(string)) return false;
790     // We want to compare the content of two internalized strings here.
791     return string_->SlowEquals(String::cast(string));
792   }
793
794   uint32_t Hash() OVERRIDE { return hash_; }
795
796   uint32_t HashForObject(Object* key) OVERRIDE {
797     return String::cast(key)->Hash();
798   }
799
800   MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate)
801       OVERRIDE {
802     return handle(string_, isolate);
803   }
804
805   String* string_;
806   uint32_t hash_;
807 };
808
809
810 HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) {
811   if (obj->IsString()) {
812     String* string = String::cast(obj);
813     // Uninitialize hash field as the hash seed may have changed.
814     string->set_hash_field(String::kEmptyHashField);
815     if (string->IsInternalizedString()) {
816       DisallowHeapAllocation no_gc;
817       HandleScope scope(isolate_);
818       StringTableInsertionKey key(string);
819       String* canonical = *StringTable::LookupKey(isolate_, &key);
820       string->SetForwardedInternalizedString(canonical);
821       return canonical;
822     }
823   } else if (obj->IsScript()) {
824     Script::cast(obj)->set_id(isolate_->heap()->NextScriptId());
825   }
826   return obj;
827 }
828
829
830 HeapObject* Deserializer::GetBackReferencedObject(int space) {
831   HeapObject* obj;
832   BackReference back_reference(source_.GetInt());
833   if (space == LO_SPACE) {
834     CHECK(back_reference.chunk_index() == 0);
835     uint32_t index = back_reference.large_object_index();
836     obj = deserialized_large_objects_[index];
837   } else {
838     DCHECK(space < kNumberOfPreallocatedSpaces);
839     uint32_t chunk_index = back_reference.chunk_index();
840     DCHECK_LE(chunk_index, current_chunk_[space]);
841     uint32_t chunk_offset = back_reference.chunk_offset();
842     obj = HeapObject::FromAddress(reservations_[space][chunk_index].start +
843                                   chunk_offset);
844   }
845   if (deserializing_user_code() && obj->IsInternalizedString()) {
846     obj = String::cast(obj)->GetForwardedInternalizedString();
847   }
848   hot_objects_.Add(obj);
849   return obj;
850 }
851
852
853 // This routine writes the new object into the pointer provided and then
854 // returns true if the new object was in young space and false otherwise.
855 // The reason for this strange interface is that otherwise the object is
856 // written very late, which means the FreeSpace map is not set up by the
857 // time we need to use it to mark the space at the end of a page free.
858 void Deserializer::ReadObject(int space_number, Object** write_back) {
859   Address address;
860   HeapObject* obj;
861   int next_int = source_.GetInt();
862
863   bool double_align = false;
864 #ifndef V8_HOST_ARCH_64_BIT
865   double_align = next_int == kDoubleAlignmentSentinel;
866   if (double_align) next_int = source_.GetInt();
867 #endif
868
869   DCHECK_NE(kDoubleAlignmentSentinel, next_int);
870   int size = next_int << kObjectAlignmentBits;
871   int reserved_size = size + (double_align ? kPointerSize : 0);
872   address = Allocate(space_number, reserved_size);
873   obj = HeapObject::FromAddress(address);
874   if (double_align) {
875     obj = isolate_->heap()->DoubleAlignForDeserialization(obj, reserved_size);
876     address = obj->address();
877   }
878
879   isolate_->heap()->OnAllocationEvent(obj, size);
880   Object** current = reinterpret_cast<Object**>(address);
881   Object** limit = current + (size >> kPointerSizeLog2);
882   if (FLAG_log_snapshot_positions) {
883     LOG(isolate_, SnapshotPositionEvent(address, source_.position()));
884   }
885   ReadData(current, limit, space_number, address);
886
887   // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
888   // as a (weak) root. If this root is relocated correctly,
889   // RelinkAllocationSite() isn't necessary.
890   if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj));
891
892   // Fix up strings from serialized user code.
893   if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj);
894
895   Object* write_back_obj = obj;
896   UnalignedCopy(write_back, &write_back_obj);
897 #ifdef DEBUG
898   if (obj->IsCode()) {
899     DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
900   } else {
901     DCHECK(space_number != CODE_SPACE);
902   }
903 #endif
904 #if V8_TARGET_ARCH_PPC && \
905     (ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL)
906   // If we're on a platform that uses function descriptors
907   // these jump tables make use of RelocInfo::INTERNAL_REFERENCE.
908   // As the V8 serialization code doesn't handle that relocation type
909   // we use this to fix up code that has function descriptors.
910   if (space_number == CODE_SPACE) {
911     Code* code = reinterpret_cast<Code*>(HeapObject::FromAddress(address));
912     for (RelocIterator it(code); !it.done(); it.next()) {
913       RelocInfo::Mode rmode = it.rinfo()->rmode();
914       if (rmode == RelocInfo::INTERNAL_REFERENCE) {
915         Assembler::RelocateInternalReference(it.rinfo()->pc(), 0,
916                                              code->instruction_start());
917       }
918     }
919   }
920 #endif
921 }
922
923
924 // We know the space requirements before deserialization and can
925 // pre-allocate that reserved space. During deserialization, all we need
926 // to do is to bump up the pointer for each space in the reserved
927 // space. This is also used for fixing back references.
928 // We may have to split up the pre-allocation into several chunks
929 // because it would not fit onto a single page. We do not have to keep
930 // track of when to move to the next chunk. An opcode will signal this.
931 // Since multiple large objects cannot be folded into one large object
932 // space allocation, we have to do an actual allocation when deserializing
933 // each large object. Instead of tracking offset for back references, we
934 // reference large objects by index.
935 Address Deserializer::Allocate(int space_index, int size) {
936   if (space_index == LO_SPACE) {
937     AlwaysAllocateScope scope(isolate_);
938     LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
939     Executability exec = static_cast<Executability>(source_.Get());
940     AllocationResult result = lo_space->AllocateRaw(size, exec);
941     HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
942     deserialized_large_objects_.Add(obj);
943     return obj->address();
944   } else {
945     DCHECK(space_index < kNumberOfPreallocatedSpaces);
946     Address address = high_water_[space_index];
947     DCHECK_NOT_NULL(address);
948     high_water_[space_index] += size;
949 #ifdef DEBUG
950     // Assert that the current reserved chunk is still big enough.
951     const Heap::Reservation& reservation = reservations_[space_index];
952     int chunk_index = current_chunk_[space_index];
953     CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
954 #endif
955     return address;
956   }
957 }
958
959
960 void Deserializer::ReadData(Object** current, Object** limit, int source_space,
961                             Address current_object_address) {
962   Isolate* const isolate = isolate_;
963   // Write barrier support costs around 1% in startup time.  In fact there
964   // are no new space objects in current boot snapshots, so it's not needed,
965   // but that may change.
966   bool write_barrier_needed = (current_object_address != NULL &&
967                                source_space != NEW_SPACE &&
968                                source_space != CELL_SPACE &&
969                                source_space != PROPERTY_CELL_SPACE &&
970                                source_space != CODE_SPACE &&
971                                source_space != OLD_DATA_SPACE);
972   while (current < limit) {
973     byte data = source_.Get();
974     switch (data) {
975 #define CASE_STATEMENT(where, how, within, space_number) \
976   case where + how + within + space_number:              \
977     STATIC_ASSERT((where & ~kPointedToMask) == 0);       \
978     STATIC_ASSERT((how & ~kHowToCodeMask) == 0);         \
979     STATIC_ASSERT((within & ~kWhereToPointMask) == 0);   \
980     STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
981
982 #define CASE_BODY(where, how, within, space_number_if_any)                     \
983   {                                                                            \
984     bool emit_write_barrier = false;                                           \
985     bool current_was_incremented = false;                                      \
986     int space_number = space_number_if_any == kAnyOldSpace                     \
987                            ? (data & kSpaceMask)                               \
988                            : space_number_if_any;                              \
989     if (where == kNewObject && how == kPlain && within == kStartOfObject) {    \
990       ReadObject(space_number, current);                                       \
991       emit_write_barrier = (space_number == NEW_SPACE);                        \
992     } else {                                                                   \
993       Object* new_object = NULL; /* May not be a real Object pointer. */       \
994       if (where == kNewObject) {                                               \
995         ReadObject(space_number, &new_object);                                 \
996       } else if (where == kRootArray) {                                        \
997         int root_id = source_.GetInt();                                        \
998         new_object = isolate->heap()->roots_array_start()[root_id];            \
999         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
1000       } else if (where == kPartialSnapshotCache) {                             \
1001         int cache_index = source_.GetInt();                                    \
1002         new_object = isolate->serialize_partial_snapshot_cache()[cache_index]; \
1003         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
1004       } else if (where == kExternalReference) {                                \
1005         int skip = source_.GetInt();                                           \
1006         current = reinterpret_cast<Object**>(                                  \
1007             reinterpret_cast<Address>(current) + skip);                        \
1008         int reference_id = source_.GetInt();                                   \
1009         Address address = external_reference_decoder_->Decode(reference_id);   \
1010         new_object = reinterpret_cast<Object*>(address);                       \
1011       } else if (where == kBackref) {                                          \
1012         emit_write_barrier = (space_number == NEW_SPACE);                      \
1013         new_object = GetBackReferencedObject(data & kSpaceMask);               \
1014       } else if (where == kBuiltin) {                                          \
1015         DCHECK(deserializing_user_code());                                     \
1016         int builtin_id = source_.GetInt();                                     \
1017         DCHECK_LE(0, builtin_id);                                              \
1018         DCHECK_LT(builtin_id, Builtins::builtin_count);                        \
1019         Builtins::Name name = static_cast<Builtins::Name>(builtin_id);         \
1020         new_object = isolate->builtins()->builtin(name);                       \
1021         emit_write_barrier = false;                                            \
1022       } else if (where == kAttachedReference) {                                \
1023         int index = source_.GetInt();                                          \
1024         DCHECK(deserializing_user_code() || index == kGlobalProxyReference);   \
1025         new_object = *attached_objects_[index];                                \
1026         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
1027       } else {                                                                 \
1028         DCHECK(where == kBackrefWithSkip);                                     \
1029         int skip = source_.GetInt();                                           \
1030         current = reinterpret_cast<Object**>(                                  \
1031             reinterpret_cast<Address>(current) + skip);                        \
1032         emit_write_barrier = (space_number == NEW_SPACE);                      \
1033         new_object = GetBackReferencedObject(data & kSpaceMask);               \
1034       }                                                                        \
1035       if (within == kInnerPointer) {                                           \
1036         if (space_number != CODE_SPACE || new_object->IsCode()) {              \
1037           Code* new_code_object = reinterpret_cast<Code*>(new_object);         \
1038           new_object =                                                         \
1039               reinterpret_cast<Object*>(new_code_object->instruction_start()); \
1040         } else {                                                               \
1041           DCHECK(space_number == CODE_SPACE);                                  \
1042           Cell* cell = Cell::cast(new_object);                                 \
1043           new_object = reinterpret_cast<Object*>(cell->ValueAddress());        \
1044         }                                                                      \
1045       }                                                                        \
1046       if (how == kFromCode) {                                                  \
1047         Address location_of_branch_data = reinterpret_cast<Address>(current);  \
1048         Assembler::deserialization_set_special_target_at(                      \
1049             location_of_branch_data,                                           \
1050             Code::cast(HeapObject::FromAddress(current_object_address)),       \
1051             reinterpret_cast<Address>(new_object));                            \
1052         location_of_branch_data += Assembler::kSpecialTargetSize;              \
1053         current = reinterpret_cast<Object**>(location_of_branch_data);         \
1054         current_was_incremented = true;                                        \
1055       } else {                                                                 \
1056         UnalignedCopy(current, &new_object);                                   \
1057       }                                                                        \
1058     }                                                                          \
1059     if (emit_write_barrier && write_barrier_needed) {                          \
1060       Address current_address = reinterpret_cast<Address>(current);            \
1061       isolate->heap()->RecordWrite(                                            \
1062           current_object_address,                                              \
1063           static_cast<int>(current_address - current_object_address));         \
1064     }                                                                          \
1065     if (!current_was_incremented) {                                            \
1066       current++;                                                               \
1067     }                                                                          \
1068     break;                                                                     \
1069   }
1070
1071 // This generates a case and a body for the new space (which has to do extra
1072 // write barrier handling) and handles the other spaces with 8 fall-through
1073 // cases and one body.
1074 #define ALL_SPACES(where, how, within)                    \
1075   CASE_STATEMENT(where, how, within, NEW_SPACE)           \
1076   CASE_BODY(where, how, within, NEW_SPACE)                \
1077   CASE_STATEMENT(where, how, within, OLD_DATA_SPACE)      \
1078   CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE)   \
1079   CASE_STATEMENT(where, how, within, CODE_SPACE)          \
1080   CASE_STATEMENT(where, how, within, MAP_SPACE)           \
1081   CASE_STATEMENT(where, how, within, CELL_SPACE)          \
1082   CASE_STATEMENT(where, how, within, PROPERTY_CELL_SPACE) \
1083   CASE_STATEMENT(where, how, within, LO_SPACE)            \
1084   CASE_BODY(where, how, within, kAnyOldSpace)
1085
1086 #define FOUR_CASES(byte_code)             \
1087   case byte_code:                         \
1088   case byte_code + 1:                     \
1089   case byte_code + 2:                     \
1090   case byte_code + 3:
1091
1092 #define SIXTEEN_CASES(byte_code)          \
1093   FOUR_CASES(byte_code)                   \
1094   FOUR_CASES(byte_code + 4)               \
1095   FOUR_CASES(byte_code + 8)               \
1096   FOUR_CASES(byte_code + 12)
1097
1098 #define COMMON_RAW_LENGTHS(f)        \
1099   f(1)  \
1100   f(2)  \
1101   f(3)  \
1102   f(4)  \
1103   f(5)  \
1104   f(6)  \
1105   f(7)  \
1106   f(8)  \
1107   f(9)  \
1108   f(10) \
1109   f(11) \
1110   f(12) \
1111   f(13) \
1112   f(14) \
1113   f(15) \
1114   f(16) \
1115   f(17) \
1116   f(18) \
1117   f(19) \
1118   f(20) \
1119   f(21) \
1120   f(22) \
1121   f(23) \
1122   f(24) \
1123   f(25) \
1124   f(26) \
1125   f(27) \
1126   f(28) \
1127   f(29) \
1128   f(30) \
1129   f(31)
1130
1131       // We generate 15 cases and bodies that process special tags that combine
1132       // the raw data tag and the length into one byte.
1133 #define RAW_CASE(index)                                                        \
1134   case kRawData + index: {                                                     \
1135     byte* raw_data_out = reinterpret_cast<byte*>(current);                     \
1136     source_.CopyRaw(raw_data_out, index* kPointerSize);                        \
1137     current = reinterpret_cast<Object**>(raw_data_out + index * kPointerSize); \
1138     break;                                                                     \
1139   }
1140       COMMON_RAW_LENGTHS(RAW_CASE)
1141 #undef RAW_CASE
1142
1143       // Deserialize a chunk of raw data that doesn't have one of the popular
1144       // lengths.
1145       case kRawData: {
1146         int size = source_.GetInt();
1147         byte* raw_data_out = reinterpret_cast<byte*>(current);
1148         source_.CopyRaw(raw_data_out, size);
1149         break;
1150       }
1151
1152       SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance)
1153       SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance + 16) {
1154         int root_id = RootArrayConstantFromByteCode(data);
1155         Object* object = isolate->heap()->roots_array_start()[root_id];
1156         DCHECK(!isolate->heap()->InNewSpace(object));
1157         UnalignedCopy(current++, &object);
1158         break;
1159       }
1160
1161       SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance)
1162       SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance + 16) {
1163         int root_id = RootArrayConstantFromByteCode(data);
1164         int skip = source_.GetInt();
1165         current = reinterpret_cast<Object**>(
1166             reinterpret_cast<intptr_t>(current) + skip);
1167         Object* object = isolate->heap()->roots_array_start()[root_id];
1168         DCHECK(!isolate->heap()->InNewSpace(object));
1169         UnalignedCopy(current++, &object);
1170         break;
1171       }
1172
1173       case kVariableRepeat: {
1174         int repeats = source_.GetInt();
1175         Object* object = current[-1];
1176         DCHECK(!isolate->heap()->InNewSpace(object));
1177         for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
1178         break;
1179       }
1180
1181       STATIC_ASSERT(kRootArrayNumberOfConstantEncodings ==
1182                     Heap::kOldSpaceRoots);
1183       STATIC_ASSERT(kMaxFixedRepeats == 15);
1184       FOUR_CASES(kFixedRepeat)
1185       FOUR_CASES(kFixedRepeat + 4)
1186       FOUR_CASES(kFixedRepeat + 8)
1187       case kFixedRepeat + 12:
1188       case kFixedRepeat + 13:
1189       case kFixedRepeat + 14: {
1190         int repeats = RepeatsForCode(data);
1191         Object* object;
1192         UnalignedCopy(&object, current - 1);
1193         DCHECK(!isolate->heap()->InNewSpace(object));
1194         for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
1195         break;
1196       }
1197
1198       // Deserialize a new object and write a pointer to it to the current
1199       // object.
1200       ALL_SPACES(kNewObject, kPlain, kStartOfObject)
1201       // Support for direct instruction pointers in functions.  It's an inner
1202       // pointer because it points at the entry point, not at the start of the
1203       // code object.
1204       CASE_STATEMENT(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
1205       CASE_BODY(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
1206       // Deserialize a new code object and write a pointer to its first
1207       // instruction to the current code object.
1208       ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
1209       // Find a recently deserialized object using its offset from the current
1210       // allocation point and write a pointer to it to the current object.
1211       ALL_SPACES(kBackref, kPlain, kStartOfObject)
1212       ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
1213 #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
1214     defined(V8_TARGET_ARCH_PPC) || V8_OOL_CONSTANT_POOL
1215       // Deserialize a new object from pointer found in code and write
1216       // a pointer to it to the current object. Required only for MIPS, PPC or
1217       // ARM with ool constant pool, and omitted on the other architectures
1218       // because it is fully unrolled and would cause bloat.
1219       ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
1220       // Find a recently deserialized code object using its offset from the
1221       // current allocation point and write a pointer to it to the current
1222       // object. Required only for MIPS, PPC or ARM with ool constant pool.
1223       ALL_SPACES(kBackref, kFromCode, kStartOfObject)
1224       ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
1225 #endif
1226       // Find a recently deserialized code object using its offset from the
1227       // current allocation point and write a pointer to its first instruction
1228       // to the current code object or the instruction pointer in a function
1229       // object.
1230       ALL_SPACES(kBackref, kFromCode, kInnerPointer)
1231       ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
1232       ALL_SPACES(kBackref, kPlain, kInnerPointer)
1233       ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
1234       // Find an object in the roots array and write a pointer to it to the
1235       // current object.
1236       CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
1237       CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
1238 #if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
1239     defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC)
1240       // Find an object in the roots array and write a pointer to it to in code.
1241       CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0)
1242       CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0)
1243 #endif
1244       // Find an object in the partial snapshots cache and write a pointer to it
1245       // to the current object.
1246       CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
1247       CASE_BODY(kPartialSnapshotCache,
1248                 kPlain,
1249                 kStartOfObject,
1250                 0)
1251       // Find an code entry in the partial snapshots cache and
1252       // write a pointer to it to the current object.
1253       CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
1254       CASE_BODY(kPartialSnapshotCache,
1255                 kPlain,
1256                 kInnerPointer,
1257                 0)
1258       // Find an external reference and write a pointer to it to the current
1259       // object.
1260       CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
1261       CASE_BODY(kExternalReference,
1262                 kPlain,
1263                 kStartOfObject,
1264                 0)
1265       // Find an external reference and write a pointer to it in the current
1266       // code object.
1267       CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
1268       CASE_BODY(kExternalReference,
1269                 kFromCode,
1270                 kStartOfObject,
1271                 0)
1272       // Find a builtin and write a pointer to it to the current object.
1273       CASE_STATEMENT(kBuiltin, kPlain, kStartOfObject, 0)
1274       CASE_BODY(kBuiltin, kPlain, kStartOfObject, 0)
1275       CASE_STATEMENT(kBuiltin, kPlain, kInnerPointer, 0)
1276       CASE_BODY(kBuiltin, kPlain, kInnerPointer, 0)
1277       CASE_STATEMENT(kBuiltin, kFromCode, kInnerPointer, 0)
1278       CASE_BODY(kBuiltin, kFromCode, kInnerPointer, 0)
1279       // Find an object in the attached references and write a pointer to it to
1280       // the current object.
1281       CASE_STATEMENT(kAttachedReference, kPlain, kStartOfObject, 0)
1282       CASE_BODY(kAttachedReference, kPlain, kStartOfObject, 0)
1283       CASE_STATEMENT(kAttachedReference, kPlain, kInnerPointer, 0)
1284       CASE_BODY(kAttachedReference, kPlain, kInnerPointer, 0)
1285       CASE_STATEMENT(kAttachedReference, kFromCode, kInnerPointer, 0)
1286       CASE_BODY(kAttachedReference, kFromCode, kInnerPointer, 0)
1287
1288 #undef CASE_STATEMENT
1289 #undef CASE_BODY
1290 #undef ALL_SPACES
1291
1292       case kSkip: {
1293         int size = source_.GetInt();
1294         current = reinterpret_cast<Object**>(
1295             reinterpret_cast<intptr_t>(current) + size);
1296         break;
1297       }
1298
1299       case kNativesStringResource: {
1300         DCHECK(!isolate_->heap()->deserialization_complete());
1301         int index = source_.Get();
1302         Vector<const char> source_vector = Natives::GetScriptSource(index);
1303         NativesExternalStringResource* resource =
1304             new NativesExternalStringResource(source_vector.start(),
1305                                               source_vector.length());
1306         Object* resource_obj = reinterpret_cast<Object*>(resource);
1307         UnalignedCopy(current++, &resource_obj);
1308         break;
1309       }
1310
1311       case kNextChunk: {
1312         int space = source_.Get();
1313         DCHECK(space < kNumberOfPreallocatedSpaces);
1314         int chunk_index = current_chunk_[space];
1315         const Heap::Reservation& reservation = reservations_[space];
1316         // Make sure the current chunk is indeed exhausted.
1317         CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
1318         // Move to next reserved chunk.
1319         chunk_index = ++current_chunk_[space];
1320         CHECK_LT(chunk_index, reservation.length());
1321         high_water_[space] = reservation[chunk_index].start;
1322         break;
1323       }
1324
1325       FOUR_CASES(kHotObjectWithSkip)
1326       FOUR_CASES(kHotObjectWithSkip + 4) {
1327         int skip = source_.GetInt();
1328         current = reinterpret_cast<Object**>(
1329             reinterpret_cast<Address>(current) + skip);
1330         // Fall through.
1331       }
1332       FOUR_CASES(kHotObject)
1333       FOUR_CASES(kHotObject + 4) {
1334         int index = data & kHotObjectIndexMask;
1335         Object* hot_object = hot_objects_.Get(index);
1336         UnalignedCopy(current, &hot_object);
1337         if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
1338           Address current_address = reinterpret_cast<Address>(current);
1339           isolate->heap()->RecordWrite(
1340               current_object_address,
1341               static_cast<int>(current_address - current_object_address));
1342         }
1343         current++;
1344         break;
1345       }
1346
1347       case kSynchronize: {
1348         // If we get here then that indicates that you have a mismatch between
1349         // the number of GC roots when serializing and deserializing.
1350         CHECK(false);
1351       }
1352
1353       default:
1354         CHECK(false);
1355     }
1356   }
1357   CHECK_EQ(limit, current);
1358 }
1359
1360
1361 Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
1362     : isolate_(isolate),
1363       sink_(sink),
1364       external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
1365       root_index_map_(isolate),
1366       code_address_map_(NULL),
1367       large_objects_total_size_(0),
1368       seen_large_objects_index_(0) {
1369   // The serializer is meant to be used only to generate initial heap images
1370   // from a context in which there is only one isolate.
1371   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
1372     pending_chunk_[i] = 0;
1373     max_chunk_size_[i] = static_cast<uint32_t>(
1374         MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
1375   }
1376 }
1377
1378
1379 Serializer::~Serializer() {
1380   delete external_reference_encoder_;
1381   if (code_address_map_ != NULL) delete code_address_map_;
1382 }
1383
1384
1385 void StartupSerializer::SerializeStrongReferences() {
1386   Isolate* isolate = this->isolate();
1387   // No active threads.
1388   CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
1389   // No active or weak handles.
1390   CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
1391   CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
1392   CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
1393   // We don't support serializing installed extensions.
1394   CHECK(!isolate->has_installed_extensions());
1395   isolate->heap()->IterateSmiRoots(this);
1396   isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
1397 }
1398
1399
1400 void StartupSerializer::VisitPointers(Object** start, Object** end) {
1401   for (Object** current = start; current < end; current++) {
1402     if (start == isolate()->heap()->roots_array_start()) {
1403       root_index_wave_front_ =
1404           Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
1405     }
1406     if (ShouldBeSkipped(current)) {
1407       sink_->Put(kSkip, "Skip");
1408       sink_->PutInt(kPointerSize, "SkipOneWord");
1409     } else if ((*current)->IsSmi()) {
1410       sink_->Put(kOnePointerRawData, "Smi");
1411       for (int i = 0; i < kPointerSize; i++) {
1412         sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
1413       }
1414     } else {
1415       SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
1416     }
1417   }
1418 }
1419
1420
1421 void PartialSerializer::Serialize(Object** o) {
1422   if ((*o)->IsContext()) {
1423     Context* context = Context::cast(*o);
1424     global_object_ = context->global_object();
1425     back_reference_map()->AddGlobalProxy(context->global_proxy());
1426   }
1427   VisitPointer(o);
1428   SerializeOutdatedContextsAsFixedArray();
1429   Pad();
1430 }
1431
1432
1433 void PartialSerializer::SerializeOutdatedContextsAsFixedArray() {
1434   int length = outdated_contexts_.length();
1435   if (length == 0) {
1436     FixedArray* empty = isolate_->heap()->empty_fixed_array();
1437     SerializeObject(empty, kPlain, kStartOfObject, 0);
1438   } else {
1439     // Serialize an imaginary fixed array containing outdated contexts.
1440     int size = FixedArray::SizeFor(length);
1441     Allocate(NEW_SPACE, size);
1442     sink_->Put(kNewObject + NEW_SPACE, "emulated FixedArray");
1443     sink_->PutInt(size >> kObjectAlignmentBits, "FixedArray size in words");
1444     Map* map = isolate_->heap()->fixed_array_map();
1445     SerializeObject(map, kPlain, kStartOfObject, 0);
1446     Smi* length_smi = Smi::FromInt(length);
1447     sink_->Put(kOnePointerRawData, "Smi");
1448     for (int i = 0; i < kPointerSize; i++) {
1449       sink_->Put(reinterpret_cast<byte*>(&length_smi)[i], "Byte");
1450     }
1451     for (int i = 0; i < length; i++) {
1452       BackReference back_ref = outdated_contexts_[i];
1453       DCHECK(BackReferenceIsAlreadyAllocated(back_ref));
1454       sink_->Put(kBackref + back_ref.space(), "BackRef");
1455       sink_->PutInt(back_ref.reference(), "BackRefValue");
1456     }
1457   }
1458 }
1459
1460
1461 bool Serializer::ShouldBeSkipped(Object** current) {
1462   Object** roots = isolate()->heap()->roots_array_start();
1463   return current == &roots[Heap::kStoreBufferTopRootIndex]
1464       || current == &roots[Heap::kStackLimitRootIndex]
1465       || current == &roots[Heap::kRealStackLimitRootIndex];
1466 }
1467
1468
1469 void Serializer::VisitPointers(Object** start, Object** end) {
1470   for (Object** current = start; current < end; current++) {
1471     if ((*current)->IsSmi()) {
1472       sink_->Put(kOnePointerRawData, "Smi");
1473       for (int i = 0; i < kPointerSize; i++) {
1474         sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
1475       }
1476     } else {
1477       SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
1478     }
1479   }
1480 }
1481
1482
1483 void Serializer::EncodeReservations(
1484     List<SerializedData::Reservation>* out) const {
1485   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
1486     for (int j = 0; j < completed_chunks_[i].length(); j++) {
1487       out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
1488     }
1489
1490     if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
1491       out->Add(SerializedData::Reservation(pending_chunk_[i]));
1492     }
1493     out->last().mark_as_last();
1494   }
1495
1496   out->Add(SerializedData::Reservation(large_objects_total_size_));
1497   out->last().mark_as_last();
1498 }
1499
1500
1501 // This ensures that the partial snapshot cache keeps things alive during GC and
1502 // tracks their movement.  When it is called during serialization of the startup
1503 // snapshot nothing happens.  When the partial (context) snapshot is created,
1504 // this array is populated with the pointers that the partial snapshot will
1505 // need. As that happens we emit serialized objects to the startup snapshot
1506 // that correspond to the elements of this cache array.  On deserialization we
1507 // therefore need to visit the cache array.  This fills it up with pointers to
1508 // deserialized objects.
1509 void SerializerDeserializer::Iterate(Isolate* isolate,
1510                                      ObjectVisitor* visitor) {
1511   if (isolate->serializer_enabled()) return;
1512   for (int i = 0; ; i++) {
1513     if (isolate->serialize_partial_snapshot_cache_length() <= i) {
1514       // Extend the array ready to get a value from the visitor when
1515       // deserializing.
1516       isolate->PushToPartialSnapshotCache(Smi::FromInt(0));
1517     }
1518     Object** cache = isolate->serialize_partial_snapshot_cache();
1519     visitor->VisitPointers(&cache[i], &cache[i + 1]);
1520     // Sentinel is the undefined object, which is a root so it will not normally
1521     // be found in the cache.
1522     if (cache[i] == isolate->heap()->undefined_value()) {
1523       break;
1524     }
1525   }
1526 }
1527
1528
1529 int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
1530   Isolate* isolate = this->isolate();
1531
1532   for (int i = 0;
1533        i < isolate->serialize_partial_snapshot_cache_length();
1534        i++) {
1535     Object* entry = isolate->serialize_partial_snapshot_cache()[i];
1536     if (entry == heap_object) return i;
1537   }
1538
1539   // We didn't find the object in the cache.  So we add it to the cache and
1540   // then visit the pointer so that it becomes part of the startup snapshot
1541   // and we can refer to it from the partial snapshot.
1542   int length = isolate->serialize_partial_snapshot_cache_length();
1543   isolate->PushToPartialSnapshotCache(heap_object);
1544   startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
1545   // We don't recurse from the startup snapshot generator into the partial
1546   // snapshot generator.
1547   DCHECK(length == isolate->serialize_partial_snapshot_cache_length() - 1);
1548   return length;
1549 }
1550
1551
1552 #ifdef DEBUG
1553 bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
1554   DCHECK(reference.is_valid());
1555   DCHECK(!reference.is_source());
1556   DCHECK(!reference.is_global_proxy());
1557   AllocationSpace space = reference.space();
1558   int chunk_index = reference.chunk_index();
1559   if (space == LO_SPACE) {
1560     return chunk_index == 0 &&
1561            reference.large_object_index() < seen_large_objects_index_;
1562   } else if (chunk_index == completed_chunks_[space].length()) {
1563     return reference.chunk_offset() < pending_chunk_[space];
1564   } else {
1565     return chunk_index < completed_chunks_[space].length() &&
1566            reference.chunk_offset() < completed_chunks_[space][chunk_index];
1567   }
1568 }
1569 #endif  // DEBUG
1570
1571
1572 bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
1573                                       WhereToPoint where_to_point, int skip) {
1574   if (how_to_code == kPlain && where_to_point == kStartOfObject) {
1575     // Encode a reference to a hot object by its index in the working set.
1576     int index = hot_objects_.Find(obj);
1577     if (index != HotObjectsList::kNotFound) {
1578       DCHECK(index >= 0 && index <= kMaxHotObjectIndex);
1579       if (FLAG_trace_serializer) {
1580         PrintF(" Encoding hot object %d:", index);
1581         obj->ShortPrint();
1582         PrintF("\n");
1583       }
1584       if (skip != 0) {
1585         sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
1586         sink_->PutInt(skip, "HotObjectSkipDistance");
1587       } else {
1588         sink_->Put(kHotObject + index, "HotObject");
1589       }
1590       return true;
1591     }
1592   }
1593   BackReference back_reference = back_reference_map_.Lookup(obj);
1594   if (back_reference.is_valid()) {
1595     // Encode the location of an already deserialized object in order to write
1596     // its location into a later object.  We can encode the location as an
1597     // offset fromthe start of the deserialized objects or as an offset
1598     // backwards from thecurrent allocation pointer.
1599     if (back_reference.is_source()) {
1600       FlushSkip(skip);
1601       if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
1602       DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
1603       sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
1604       sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
1605     } else if (back_reference.is_global_proxy()) {
1606       FlushSkip(skip);
1607       if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
1608       DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
1609       sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
1610       sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
1611     } else {
1612       if (FLAG_trace_serializer) {
1613         PrintF(" Encoding back reference to: ");
1614         obj->ShortPrint();
1615         PrintF("\n");
1616       }
1617
1618       AllocationSpace space = back_reference.space();
1619       if (skip == 0) {
1620         sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
1621       } else {
1622         sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
1623                    "BackRefWithSkip");
1624         sink_->PutInt(skip, "BackRefSkipDistance");
1625       }
1626       DCHECK(BackReferenceIsAlreadyAllocated(back_reference));
1627       sink_->PutInt(back_reference.reference(), "BackRefValue");
1628
1629       hot_objects_.Add(obj);
1630     }
1631     return true;
1632   }
1633   return false;
1634 }
1635
1636
1637 void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
1638                                         WhereToPoint where_to_point, int skip) {
1639   DCHECK(!obj->IsJSFunction());
1640
1641   int root_index = root_index_map_.Lookup(obj);
1642   // We can only encode roots as such if it has already been serialized.
1643   // That applies to root indices below the wave front.
1644   if (root_index != RootIndexMap::kInvalidRootIndex &&
1645       root_index < root_index_wave_front_) {
1646     PutRoot(root_index, obj, how_to_code, where_to_point, skip);
1647     return;
1648   }
1649
1650   if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
1651
1652   FlushSkip(skip);
1653
1654   // Object has not yet been serialized.  Serialize it here.
1655   ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
1656                                      where_to_point);
1657   object_serializer.Serialize();
1658 }
1659
1660
1661 void StartupSerializer::SerializeWeakReferences() {
1662   // This phase comes right after the serialization (of the snapshot).
1663   // After we have done the partial serialization the partial snapshot cache
1664   // will contain some references needed to decode the partial snapshot.  We
1665   // add one entry with 'undefined' which is the sentinel that the deserializer
1666   // uses to know it is done deserializing the array.
1667   Object* undefined = isolate()->heap()->undefined_value();
1668   VisitPointer(&undefined);
1669   isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
1670   Pad();
1671 }
1672
1673
1674 void Serializer::PutRoot(int root_index,
1675                          HeapObject* object,
1676                          SerializerDeserializer::HowToCode how_to_code,
1677                          SerializerDeserializer::WhereToPoint where_to_point,
1678                          int skip) {
1679   if (FLAG_trace_serializer) {
1680     PrintF(" Encoding root %d:", root_index);
1681     object->ShortPrint();
1682     PrintF("\n");
1683   }
1684
1685   if (how_to_code == kPlain &&
1686       where_to_point == kStartOfObject &&
1687       root_index < kRootArrayNumberOfConstantEncodings &&
1688       !isolate()->heap()->InNewSpace(object)) {
1689     if (skip == 0) {
1690       sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index,
1691                  "RootConstant");
1692     } else {
1693       sink_->Put(kRootArrayConstants + kHasSkipDistance + root_index,
1694                  "RootConstant");
1695       sink_->PutInt(skip, "SkipInPutRoot");
1696     }
1697   } else {
1698     FlushSkip(skip);
1699     sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
1700     sink_->PutInt(root_index, "root_index");
1701   }
1702 }
1703
1704
1705 void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
1706                                         WhereToPoint where_to_point, int skip) {
1707   if (obj->IsMap()) {
1708     // The code-caches link to context-specific code objects, which
1709     // the startup and context serializes cannot currently handle.
1710     DCHECK(Map::cast(obj)->code_cache() == obj->GetHeap()->empty_fixed_array());
1711   }
1712
1713   // Replace typed arrays by undefined.
1714   if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value();
1715
1716   int root_index = root_index_map_.Lookup(obj);
1717   if (root_index != RootIndexMap::kInvalidRootIndex) {
1718     PutRoot(root_index, obj, how_to_code, where_to_point, skip);
1719     return;
1720   }
1721
1722   if (ShouldBeInThePartialSnapshotCache(obj)) {
1723     FlushSkip(skip);
1724
1725     int cache_index = PartialSnapshotCacheIndex(obj);
1726     sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
1727                "PartialSnapshotCache");
1728     sink_->PutInt(cache_index, "partial_snapshot_cache_index");
1729     return;
1730   }
1731
1732   // Pointers from the partial snapshot to the objects in the startup snapshot
1733   // should go through the root array or through the partial snapshot cache.
1734   // If this is not the case you may have to add something to the root array.
1735   DCHECK(!startup_serializer_->back_reference_map()->Lookup(obj).is_valid());
1736   // All the internalized strings that the partial snapshot needs should be
1737   // either in the root table or in the partial snapshot cache.
1738   DCHECK(!obj->IsInternalizedString());
1739
1740   if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
1741
1742   FlushSkip(skip);
1743
1744   // Object has not yet been serialized.  Serialize it here.
1745   ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
1746   serializer.Serialize();
1747
1748   if (obj->IsContext() &&
1749       Context::cast(obj)->global_object() == global_object_) {
1750     // Context refers to the current global object. This reference will
1751     // become outdated after deserialization.
1752     BackReference back_reference = back_reference_map_.Lookup(obj);
1753     DCHECK(back_reference.is_valid());
1754     outdated_contexts_.Add(back_reference);
1755   }
1756 }
1757
1758
1759 void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
1760                                                      int size, Map* map) {
1761   if (serializer_->code_address_map_) {
1762     const char* code_name =
1763         serializer_->code_address_map_->Lookup(object_->address());
1764     LOG(serializer_->isolate_,
1765         CodeNameEvent(object_->address(), sink_->Position(), code_name));
1766     LOG(serializer_->isolate_,
1767         SnapshotPositionEvent(object_->address(), sink_->Position()));
1768   }
1769
1770   BackReference back_reference;
1771   if (space == LO_SPACE) {
1772     sink_->Put(kNewObject + reference_representation_ + space,
1773                "NewLargeObject");
1774     sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
1775     if (object_->IsCode()) {
1776       sink_->Put(EXECUTABLE, "executable large object");
1777     } else {
1778       sink_->Put(NOT_EXECUTABLE, "not executable large object");
1779     }
1780     back_reference = serializer_->AllocateLargeObject(size);
1781   } else {
1782     bool needs_double_align = false;
1783     if (object_->NeedsToEnsureDoubleAlignment()) {
1784       // Add wriggle room for double alignment padding.
1785       back_reference = serializer_->Allocate(space, size + kPointerSize);
1786       needs_double_align = true;
1787     } else {
1788       back_reference = serializer_->Allocate(space, size);
1789     }
1790     sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
1791     if (needs_double_align)
1792       sink_->PutInt(kDoubleAlignmentSentinel, "DoubleAlignSentinel");
1793     int encoded_size = size >> kObjectAlignmentBits;
1794     DCHECK_NE(kDoubleAlignmentSentinel, encoded_size);
1795     sink_->PutInt(encoded_size, "ObjectSizeInWords");
1796   }
1797
1798   // Mark this object as already serialized.
1799   serializer_->back_reference_map()->Add(object_, back_reference);
1800
1801   // Serialize the map (first word of the object).
1802   serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
1803 }
1804
1805
1806 void Serializer::ObjectSerializer::SerializeExternalString() {
1807   // Instead of serializing this as an external string, we serialize
1808   // an imaginary sequential string with the same content.
1809   Isolate* isolate = serializer_->isolate();
1810   DCHECK(object_->IsExternalString());
1811   DCHECK(object_->map() != isolate->heap()->native_source_string_map());
1812   ExternalString* string = ExternalString::cast(object_);
1813   int length = string->length();
1814   Map* map;
1815   int content_size;
1816   int allocation_size;
1817   const byte* resource;
1818   // Find the map and size for the imaginary sequential string.
1819   bool internalized = object_->IsInternalizedString();
1820   if (object_->IsExternalOneByteString()) {
1821     map = internalized ? isolate->heap()->one_byte_internalized_string_map()
1822                        : isolate->heap()->one_byte_string_map();
1823     allocation_size = SeqOneByteString::SizeFor(length);
1824     content_size = length * kCharSize;
1825     resource = reinterpret_cast<const byte*>(
1826         ExternalOneByteString::cast(string)->resource()->data());
1827   } else {
1828     map = internalized ? isolate->heap()->internalized_string_map()
1829                        : isolate->heap()->string_map();
1830     allocation_size = SeqTwoByteString::SizeFor(length);
1831     content_size = length * kShortSize;
1832     resource = reinterpret_cast<const byte*>(
1833         ExternalTwoByteString::cast(string)->resource()->data());
1834   }
1835
1836   AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
1837                               ? LO_SPACE
1838                               : OLD_DATA_SPACE;
1839   SerializePrologue(space, allocation_size, map);
1840
1841   // Output the rest of the imaginary string.
1842   int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
1843
1844   // Output raw data header. Do not bother with common raw length cases here.
1845   sink_->Put(kRawData, "RawDataForString");
1846   sink_->PutInt(bytes_to_output, "length");
1847
1848   // Serialize string header (except for map).
1849   Address string_start = string->address();
1850   for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
1851     sink_->PutSection(string_start[i], "StringHeader");
1852   }
1853
1854   // Serialize string content.
1855   sink_->PutRaw(resource, content_size, "StringContent");
1856
1857   // Since the allocation size is rounded up to object alignment, there
1858   // maybe left-over bytes that need to be padded.
1859   int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
1860   DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
1861   for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
1862
1863   sink_->Put(kSkip, "SkipAfterString");
1864   sink_->PutInt(bytes_to_output, "SkipDistance");
1865 }
1866
1867
1868 void Serializer::ObjectSerializer::Serialize() {
1869   if (FLAG_trace_serializer) {
1870     PrintF(" Encoding heap object: ");
1871     object_->ShortPrint();
1872     PrintF("\n");
1873   }
1874
1875   // We cannot serialize typed array objects correctly.
1876   DCHECK(!object_->IsJSTypedArray());
1877
1878   if (object_->IsScript()) {
1879     // Clear cached line ends.
1880     Object* undefined = serializer_->isolate()->heap()->undefined_value();
1881     Script::cast(object_)->set_line_ends(undefined);
1882   }
1883
1884   if (object_->IsExternalString()) {
1885     Heap* heap = serializer_->isolate()->heap();
1886     if (object_->map() != heap->native_source_string_map()) {
1887       // Usually we cannot recreate resources for external strings. To work
1888       // around this, external strings are serialized to look like ordinary
1889       // sequential strings.
1890       // The exception are native source code strings, since we can recreate
1891       // their resources. In that case we fall through and leave it to
1892       // VisitExternalOneByteString further down.
1893       SerializeExternalString();
1894       return;
1895     }
1896   }
1897
1898   int size = object_->Size();
1899   Map* map = object_->map();
1900   SerializePrologue(Serializer::SpaceOfObject(object_), size, map);
1901
1902   // Serialize the rest of the object.
1903   CHECK_EQ(0, bytes_processed_so_far_);
1904   bytes_processed_so_far_ = kPointerSize;
1905
1906   object_->IterateBody(map->instance_type(), size, this);
1907   OutputRawData(object_->address() + size);
1908 }
1909
1910
1911 void Serializer::ObjectSerializer::VisitPointers(Object** start,
1912                                                  Object** end) {
1913   Object** current = start;
1914   while (current < end) {
1915     while (current < end && (*current)->IsSmi()) current++;
1916     if (current < end) OutputRawData(reinterpret_cast<Address>(current));
1917
1918     while (current < end && !(*current)->IsSmi()) {
1919       HeapObject* current_contents = HeapObject::cast(*current);
1920       int root_index = serializer_->root_index_map()->Lookup(current_contents);
1921       // Repeats are not subject to the write barrier so we can only use
1922       // immortal immovable root members. They are never in new space.
1923       if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
1924           Heap::RootIsImmortalImmovable(root_index) &&
1925           current_contents == current[-1]) {
1926         DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
1927         int repeat_count = 1;
1928         while (&current[repeat_count] < end - 1 &&
1929                current[repeat_count] == current_contents) {
1930           repeat_count++;
1931         }
1932         current += repeat_count;
1933         bytes_processed_so_far_ += repeat_count * kPointerSize;
1934         if (repeat_count > kMaxFixedRepeats) {
1935           sink_->Put(kVariableRepeat, "SerializeRepeats");
1936           sink_->PutInt(repeat_count, "SerializeRepeats");
1937         } else {
1938           sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
1939         }
1940       } else {
1941         serializer_->SerializeObject(
1942                 current_contents, kPlain, kStartOfObject, 0);
1943         bytes_processed_so_far_ += kPointerSize;
1944         current++;
1945       }
1946     }
1947   }
1948 }
1949
1950
1951 void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
1952   // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
1953   if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
1954
1955   int skip = OutputRawData(rinfo->target_address_address(),
1956                            kCanReturnSkipInsteadOfSkipping);
1957   HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1958   Object* object = rinfo->target_object();
1959   serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
1960                                kStartOfObject, skip);
1961   bytes_processed_so_far_ += rinfo->target_address_size();
1962 }
1963
1964
1965 void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
1966   int skip = OutputRawData(reinterpret_cast<Address>(p),
1967                            kCanReturnSkipInsteadOfSkipping);
1968   sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
1969   sink_->PutInt(skip, "SkipB4ExternalRef");
1970   Address target = *p;
1971   sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1972   bytes_processed_so_far_ += kPointerSize;
1973 }
1974
1975
1976 void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
1977   int skip = OutputRawData(rinfo->target_address_address(),
1978                            kCanReturnSkipInsteadOfSkipping);
1979   HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1980   sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
1981   sink_->PutInt(skip, "SkipB4ExternalRef");
1982   Address target = rinfo->target_reference();
1983   sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1984   bytes_processed_so_far_ += rinfo->target_address_size();
1985 }
1986
1987
1988 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
1989   int skip = OutputRawData(rinfo->target_address_address(),
1990                            kCanReturnSkipInsteadOfSkipping);
1991   HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1992   sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
1993   sink_->PutInt(skip, "SkipB4ExternalRef");
1994   Address target = rinfo->target_address();
1995   sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1996   bytes_processed_so_far_ += rinfo->target_address_size();
1997 }
1998
1999
2000 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
2001   // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
2002   if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
2003
2004   int skip = OutputRawData(rinfo->target_address_address(),
2005                            kCanReturnSkipInsteadOfSkipping);
2006   Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
2007   serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
2008   bytes_processed_so_far_ += rinfo->target_address_size();
2009 }
2010
2011
2012 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
2013   int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
2014   Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
2015   serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
2016   bytes_processed_so_far_ += kPointerSize;
2017 }
2018
2019
2020 void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
2021   // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
2022   if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
2023
2024   int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
2025   Cell* object = Cell::cast(rinfo->target_cell());
2026   serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
2027   bytes_processed_so_far_ += kPointerSize;
2028 }
2029
2030
2031 void Serializer::ObjectSerializer::VisitExternalOneByteString(
2032     v8::String::ExternalOneByteStringResource** resource_pointer) {
2033   Address references_start = reinterpret_cast<Address>(resource_pointer);
2034   OutputRawData(references_start);
2035   for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
2036     Object* source =
2037         serializer_->isolate()->heap()->natives_source_cache()->get(i);
2038     if (!source->IsUndefined()) {
2039       ExternalOneByteString* string = ExternalOneByteString::cast(source);
2040       typedef v8::String::ExternalOneByteStringResource Resource;
2041       const Resource* resource = string->resource();
2042       if (resource == *resource_pointer) {
2043         sink_->Put(kNativesStringResource, "NativesStringResource");
2044         sink_->PutSection(i, "NativesStringResourceEnd");
2045         bytes_processed_so_far_ += sizeof(resource);
2046         return;
2047       }
2048     }
2049   }
2050   // One of the strings in the natives cache should match the resource.  We
2051   // don't expect any other kinds of external strings here.
2052   UNREACHABLE();
2053 }
2054
2055
2056 static Code* CloneCodeObject(HeapObject* code) {
2057   Address copy = new byte[code->Size()];
2058   MemCopy(copy, code->address(), code->Size());
2059   return Code::cast(HeapObject::FromAddress(copy));
2060 }
2061
2062
2063 static void WipeOutRelocations(Code* code) {
2064   int mode_mask =
2065       RelocInfo::kCodeTargetMask |
2066       RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
2067       RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
2068       RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
2069   for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
2070     if (!(FLAG_enable_ool_constant_pool && it.rinfo()->IsInConstantPool())) {
2071       it.rinfo()->WipeOut();
2072     }
2073   }
2074 }
2075
2076
2077 int Serializer::ObjectSerializer::OutputRawData(
2078     Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
2079   Address object_start = object_->address();
2080   int base = bytes_processed_so_far_;
2081   int up_to_offset = static_cast<int>(up_to - object_start);
2082   int to_skip = up_to_offset - bytes_processed_so_far_;
2083   int bytes_to_output = to_skip;
2084   bytes_processed_so_far_ += to_skip;
2085   // This assert will fail if the reloc info gives us the target_address_address
2086   // locations in a non-ascending order.  Luckily that doesn't happen.
2087   DCHECK(to_skip >= 0);
2088   bool outputting_code = false;
2089   if (to_skip != 0 && code_object_ && !code_has_been_output_) {
2090     // Output the code all at once and fix later.
2091     bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
2092     outputting_code = true;
2093     code_has_been_output_ = true;
2094   }
2095   if (bytes_to_output != 0 &&
2096       (!code_object_ || outputting_code)) {
2097 #define RAW_CASE(index)                                                        \
2098     if (!outputting_code && bytes_to_output == index * kPointerSize &&         \
2099         index * kPointerSize == to_skip) {                                     \
2100       sink_->PutSection(kRawData + index, "RawDataFixed");                     \
2101       to_skip = 0;  /* This insn already skips. */                             \
2102     } else  /* NOLINT */
2103     COMMON_RAW_LENGTHS(RAW_CASE)
2104 #undef RAW_CASE
2105     {  /* NOLINT */
2106       // We always end up here if we are outputting the code of a code object.
2107       sink_->Put(kRawData, "RawData");
2108       sink_->PutInt(bytes_to_output, "length");
2109     }
2110
2111     // To make snapshots reproducible, we need to wipe out all pointers in code.
2112     if (code_object_) {
2113       Code* code = CloneCodeObject(object_);
2114       // Code age headers are not serializable.
2115       code->MakeYoung(serializer_->isolate());
2116       WipeOutRelocations(code);
2117       // We need to wipe out the header fields *after* wiping out the
2118       // relocations, because some of these fields are needed for the latter.
2119       code->WipeOutHeader();
2120       object_start = code->address();
2121     }
2122
2123     const char* description = code_object_ ? "Code" : "Byte";
2124 #ifdef MEMORY_SANITIZER
2125     // Object sizes are usually rounded up with uninitialized padding space.
2126     MSAN_MEMORY_IS_INITIALIZED(object_start + base, bytes_to_output);
2127 #endif  // MEMORY_SANITIZER
2128     sink_->PutRaw(object_start + base, bytes_to_output, description);
2129     if (code_object_) delete[] object_start;
2130   }
2131   if (to_skip != 0 && return_skip == kIgnoringReturn) {
2132     sink_->Put(kSkip, "Skip");
2133     sink_->PutInt(to_skip, "SkipDistance");
2134     to_skip = 0;
2135   }
2136   return to_skip;
2137 }
2138
2139
2140 AllocationSpace Serializer::SpaceOfObject(HeapObject* object) {
2141   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
2142     AllocationSpace s = static_cast<AllocationSpace>(i);
2143     if (object->GetHeap()->InSpace(object, s)) {
2144       DCHECK(i < kNumberOfSpaces);
2145       return s;
2146     }
2147   }
2148   UNREACHABLE();
2149   return FIRST_SPACE;
2150 }
2151
2152
2153 BackReference Serializer::AllocateLargeObject(int size) {
2154   // Large objects are allocated one-by-one when deserializing. We do not
2155   // have to keep track of multiple chunks.
2156   large_objects_total_size_ += size;
2157   return BackReference::LargeObjectReference(seen_large_objects_index_++);
2158 }
2159
2160
2161 BackReference Serializer::Allocate(AllocationSpace space, int size) {
2162   DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
2163   DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
2164   uint32_t new_chunk_size = pending_chunk_[space] + size;
2165   if (new_chunk_size > max_chunk_size(space)) {
2166     // The new chunk size would not fit onto a single page. Complete the
2167     // current chunk and start a new one.
2168     sink_->Put(kNextChunk, "NextChunk");
2169     sink_->Put(space, "NextChunkSpace");
2170     completed_chunks_[space].Add(pending_chunk_[space]);
2171     DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
2172     pending_chunk_[space] = 0;
2173     new_chunk_size = size;
2174   }
2175   uint32_t offset = pending_chunk_[space];
2176   pending_chunk_[space] = new_chunk_size;
2177   return BackReference::Reference(space, completed_chunks_[space].length(),
2178                                   offset);
2179 }
2180
2181
2182 void Serializer::Pad() {
2183   // The non-branching GetInt will read up to 3 bytes too far, so we need
2184   // to pad the snapshot to make sure we don't read over the end.
2185   for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
2186     sink_->Put(kNop, "Padding");
2187   }
2188   // Pad up to pointer size for checksum.
2189   while (!IsAligned(sink_->Position(), kPointerAlignment)) {
2190     sink_->Put(kNop, "Padding");
2191   }
2192 }
2193
2194
2195 void Serializer::InitializeCodeAddressMap() {
2196   isolate_->InitializeLoggingAndCounters();
2197   code_address_map_ = new CodeAddressMap(isolate_);
2198 }
2199
2200
2201 ScriptData* CodeSerializer::Serialize(Isolate* isolate,
2202                                       Handle<SharedFunctionInfo> info,
2203                                       Handle<String> source) {
2204   base::ElapsedTimer timer;
2205   if (FLAG_profile_deserialization) timer.Start();
2206   if (FLAG_trace_serializer) {
2207     PrintF("[Serializing from");
2208     Object* script = info->script();
2209     if (script->IsScript()) Script::cast(script)->name()->ShortPrint();
2210     PrintF("]\n");
2211   }
2212
2213   // Serialize code object.
2214   SnapshotByteSink sink(info->code()->CodeSize() * 2);
2215   CodeSerializer cs(isolate, &sink, *source, info->code());
2216   DisallowHeapAllocation no_gc;
2217   Object** location = Handle<Object>::cast(info).location();
2218   cs.VisitPointer(location);
2219   cs.Pad();
2220
2221   SerializedCodeData data(sink.data(), cs);
2222   ScriptData* script_data = data.GetScriptData();
2223
2224   if (FLAG_profile_deserialization) {
2225     double ms = timer.Elapsed().InMillisecondsF();
2226     int length = script_data->length();
2227     PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
2228   }
2229
2230   return script_data;
2231 }
2232
2233
2234 void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
2235                                      WhereToPoint where_to_point, int skip) {
2236   int root_index = root_index_map_.Lookup(obj);
2237   if (root_index != RootIndexMap::kInvalidRootIndex) {
2238     PutRoot(root_index, obj, how_to_code, where_to_point, skip);
2239     return;
2240   }
2241
2242   if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
2243
2244   FlushSkip(skip);
2245
2246   if (obj->IsCode()) {
2247     Code* code_object = Code::cast(obj);
2248     switch (code_object->kind()) {
2249       case Code::OPTIMIZED_FUNCTION:  // No optimized code compiled yet.
2250       case Code::HANDLER:             // No handlers patched in yet.
2251       case Code::REGEXP:              // No regexp literals initialized yet.
2252       case Code::NUMBER_OF_KINDS:     // Pseudo enum value.
2253         CHECK(false);
2254       case Code::BUILTIN:
2255         SerializeBuiltin(code_object->builtin_index(), how_to_code,
2256                          where_to_point);
2257         return;
2258       case Code::STUB:
2259         SerializeCodeStub(code_object->stub_key(), how_to_code, where_to_point);
2260         return;
2261 #define IC_KIND_CASE(KIND) case Code::KIND:
2262         IC_KIND_LIST(IC_KIND_CASE)
2263 #undef IC_KIND_CASE
2264         SerializeIC(code_object, how_to_code, where_to_point);
2265         return;
2266       case Code::FUNCTION:
2267         DCHECK(code_object->has_reloc_info_for_serialization());
2268         // Only serialize the code for the toplevel function unless specified
2269         // by flag. Replace code of inner functions by the lazy compile builtin.
2270         // This is safe, as checked in Compiler::BuildFunctionInfo.
2271         if (code_object != main_code_ && !FLAG_serialize_inner) {
2272           SerializeBuiltin(Builtins::kCompileLazy, how_to_code, where_to_point);
2273         } else {
2274           SerializeGeneric(code_object, how_to_code, where_to_point);
2275         }
2276         return;
2277     }
2278     UNREACHABLE();
2279   }
2280
2281   // Past this point we should not see any (context-specific) maps anymore.
2282   CHECK(!obj->IsMap());
2283   // There should be no references to the global object embedded.
2284   CHECK(!obj->IsJSGlobalProxy() && !obj->IsGlobalObject());
2285   // There should be no hash table embedded. They would require rehashing.
2286   CHECK(!obj->IsHashTable());
2287   // We expect no instantiated function objects or contexts.
2288   CHECK(!obj->IsJSFunction() && !obj->IsContext());
2289
2290   SerializeGeneric(obj, how_to_code, where_to_point);
2291 }
2292
2293
2294 void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
2295                                       HowToCode how_to_code,
2296                                       WhereToPoint where_to_point) {
2297   if (heap_object->IsInternalizedString()) num_internalized_strings_++;
2298
2299   // Object has not yet been serialized.  Serialize it here.
2300   ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
2301                               where_to_point);
2302   serializer.Serialize();
2303 }
2304
2305
2306 void CodeSerializer::SerializeBuiltin(int builtin_index, HowToCode how_to_code,
2307                                       WhereToPoint where_to_point) {
2308   DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
2309          (how_to_code == kPlain && where_to_point == kInnerPointer) ||
2310          (how_to_code == kFromCode && where_to_point == kInnerPointer));
2311   DCHECK_LT(builtin_index, Builtins::builtin_count);
2312   DCHECK_LE(0, builtin_index);
2313
2314   if (FLAG_trace_serializer) {
2315     PrintF(" Encoding builtin: %s\n",
2316            isolate()->builtins()->name(builtin_index));
2317   }
2318
2319   sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
2320   sink_->PutInt(builtin_index, "builtin_index");
2321 }
2322
2323
2324 void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
2325                                        WhereToPoint where_to_point) {
2326   DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
2327          (how_to_code == kPlain && where_to_point == kInnerPointer) ||
2328          (how_to_code == kFromCode && where_to_point == kInnerPointer));
2329   DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
2330   DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
2331
2332   int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
2333
2334   if (FLAG_trace_serializer) {
2335     PrintF(" Encoding code stub %s as %d\n",
2336            CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key), false),
2337            index);
2338   }
2339
2340   sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
2341   sink_->PutInt(index, "CodeStub key");
2342 }
2343
2344
2345 void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
2346                                  WhereToPoint where_to_point) {
2347   // The IC may be implemented as a stub.
2348   uint32_t stub_key = ic->stub_key();
2349   if (stub_key != CodeStub::NoCacheKey()) {
2350     if (FLAG_trace_serializer) {
2351       PrintF(" %s is a code stub\n", Code::Kind2String(ic->kind()));
2352     }
2353     SerializeCodeStub(stub_key, how_to_code, where_to_point);
2354     return;
2355   }
2356   // The IC may be implemented as builtin. Only real builtins have an
2357   // actual builtin_index value attached (otherwise it's just garbage).
2358   // Compare to make sure we are really dealing with a builtin.
2359   int builtin_index = ic->builtin_index();
2360   if (builtin_index < Builtins::builtin_count) {
2361     Builtins::Name name = static_cast<Builtins::Name>(builtin_index);
2362     Code* builtin = isolate()->builtins()->builtin(name);
2363     if (builtin == ic) {
2364       if (FLAG_trace_serializer) {
2365         PrintF(" %s is a builtin\n", Code::Kind2String(ic->kind()));
2366       }
2367       DCHECK(ic->kind() == Code::KEYED_LOAD_IC ||
2368              ic->kind() == Code::KEYED_STORE_IC);
2369       SerializeBuiltin(builtin_index, how_to_code, where_to_point);
2370       return;
2371     }
2372   }
2373   // The IC may also just be a piece of code kept in the non_monomorphic_cache.
2374   // In that case, just serialize as a normal code object.
2375   if (FLAG_trace_serializer) {
2376     PrintF(" %s has no special handling\n", Code::Kind2String(ic->kind()));
2377   }
2378   DCHECK(ic->kind() == Code::LOAD_IC || ic->kind() == Code::STORE_IC);
2379   SerializeGeneric(ic, how_to_code, where_to_point);
2380 }
2381
2382
2383 int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
2384   // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
2385   int index = 0;
2386   while (index < stub_keys_.length()) {
2387     if (stub_keys_[index] == stub_key) return index;
2388     index++;
2389   }
2390   stub_keys_.Add(stub_key);
2391   return index;
2392 }
2393
2394
2395 MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
2396     Isolate* isolate, ScriptData* cached_data, Handle<String> source) {
2397   base::ElapsedTimer timer;
2398   if (FLAG_profile_deserialization) timer.Start();
2399
2400   HandleScope scope(isolate);
2401
2402   SmartPointer<SerializedCodeData> scd(
2403       SerializedCodeData::FromCachedData(cached_data, *source));
2404   if (scd.is_empty()) {
2405     if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
2406     DCHECK(cached_data->rejected());
2407     return MaybeHandle<SharedFunctionInfo>();
2408   }
2409
2410   // Eagerly expand string table to avoid allocations during deserialization.
2411   StringTable::EnsureCapacityForDeserialization(isolate,
2412                                                 scd->NumInternalizedStrings());
2413
2414   // Prepare and register list of attached objects.
2415   Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
2416   Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
2417       code_stub_keys.length() + kCodeStubsBaseIndex);
2418   attached_objects[kSourceObjectIndex] = source;
2419   for (int i = 0; i < code_stub_keys.length(); i++) {
2420     attached_objects[i + kCodeStubsBaseIndex] =
2421         CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
2422   }
2423
2424   Deserializer deserializer(scd.get());
2425   deserializer.SetAttachedObjects(attached_objects);
2426
2427   // Deserialize.
2428   Handle<SharedFunctionInfo> result;
2429   if (!deserializer.DeserializeCode(isolate).ToHandle(&result)) {
2430     // Deserializing may fail if the reservations cannot be fulfilled.
2431     if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
2432     return MaybeHandle<SharedFunctionInfo>();
2433   }
2434   deserializer.FlushICacheForNewCodeObjects();
2435
2436   if (FLAG_profile_deserialization) {
2437     double ms = timer.Elapsed().InMillisecondsF();
2438     int length = cached_data->length();
2439     PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
2440   }
2441   result->set_deserialized(true);
2442
2443   if (isolate->logger()->is_logging_code_events() ||
2444       isolate->cpu_profiler()->is_profiling()) {
2445     String* name = isolate->heap()->empty_string();
2446     if (result->script()->IsScript()) {
2447       Script* script = Script::cast(result->script());
2448       if (script->name()->IsString()) name = String::cast(script->name());
2449     }
2450     isolate->logger()->CodeCreateEvent(Logger::SCRIPT_TAG, result->code(),
2451                                        *result, NULL, name);
2452   }
2453
2454   return scope.CloseAndEscape(result);
2455 }
2456
2457
2458 void SerializedData::AllocateData(int size) {
2459   DCHECK(!owns_data_);
2460   data_ = NewArray<byte>(size);
2461   size_ = size;
2462   owns_data_ = true;
2463   DCHECK(IsAligned(reinterpret_cast<intptr_t>(data_), kPointerAlignment));
2464 }
2465
2466
2467 SnapshotData::SnapshotData(const Serializer& ser) {
2468   DisallowHeapAllocation no_gc;
2469   List<Reservation> reservations;
2470   ser.EncodeReservations(&reservations);
2471   const List<byte>& payload = ser.sink()->data();
2472
2473   // Calculate sizes.
2474   int reservation_size = reservations.length() * kInt32Size;
2475   int size = kHeaderSize + reservation_size + payload.length();
2476
2477   // Allocate backing store and create result data.
2478   AllocateData(size);
2479
2480   // Set header values.
2481   SetHeaderValue(kCheckSumOffset, Version::Hash());
2482   SetHeaderValue(kNumReservationsOffset, reservations.length());
2483   SetHeaderValue(kPayloadLengthOffset, payload.length());
2484
2485   // Copy reservation chunk sizes.
2486   CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
2487             reservation_size);
2488
2489   // Copy serialized data.
2490   CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(),
2491             static_cast<size_t>(payload.length()));
2492 }
2493
2494
2495 bool SnapshotData::IsSane() {
2496   return GetHeaderValue(kCheckSumOffset) == Version::Hash();
2497 }
2498
2499
2500 Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
2501   return Vector<const Reservation>(
2502       reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
2503       GetHeaderValue(kNumReservationsOffset));
2504 }
2505
2506
2507 Vector<const byte> SnapshotData::Payload() const {
2508   int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
2509   const byte* payload = data_ + kHeaderSize + reservations_size;
2510   int length = GetHeaderValue(kPayloadLengthOffset);
2511   DCHECK_EQ(data_ + size_, payload + length);
2512   return Vector<const byte>(payload, length);
2513 }
2514
2515
2516 class Checksum {
2517  public:
2518   explicit Checksum(Vector<const byte> payload) {
2519     // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
2520     uintptr_t a = 1;
2521     uintptr_t b = 0;
2522     const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
2523     DCHECK(IsAligned(payload.length(), kIntptrSize));
2524     const uintptr_t* end = cur + payload.length() / kIntptrSize;
2525     while (cur < end) {
2526       // Unsigned overflow expected and intended.
2527       a += *cur++;
2528       b += a;
2529     }
2530 #if V8_HOST_ARCH_64_BIT
2531     a ^= a >> 32;
2532     b ^= b >> 32;
2533 #endif  // V8_HOST_ARCH_64_BIT
2534     a_ = static_cast<uint32_t>(a);
2535     b_ = static_cast<uint32_t>(b);
2536   }
2537
2538   bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; }
2539
2540   uint32_t a() const { return a_; }
2541   uint32_t b() const { return b_; }
2542
2543  private:
2544   uint32_t a_;
2545   uint32_t b_;
2546
2547   DISALLOW_COPY_AND_ASSIGN(Checksum);
2548 };
2549
2550
2551 SerializedCodeData::SerializedCodeData(const List<byte>& payload,
2552                                        const CodeSerializer& cs) {
2553   DisallowHeapAllocation no_gc;
2554   const List<uint32_t>* stub_keys = cs.stub_keys();
2555
2556   List<Reservation> reservations;
2557   cs.EncodeReservations(&reservations);
2558
2559   // Calculate sizes.
2560   int reservation_size = reservations.length() * kInt32Size;
2561   int num_stub_keys = stub_keys->length();
2562   int stub_keys_size = stub_keys->length() * kInt32Size;
2563   int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
2564   int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
2565   int size = padded_payload_offset + payload.length();
2566
2567   // Allocate backing store and create result data.
2568   AllocateData(size);
2569
2570   // Set header values.
2571   SetHeaderValue(kMagicNumberOffset, kMagicNumber);
2572   SetHeaderValue(kVersionHashOffset, Version::Hash());
2573   SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
2574   SetHeaderValue(kCpuFeaturesOffset,
2575                  static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
2576   SetHeaderValue(kFlagHashOffset, FlagList::Hash());
2577   SetHeaderValue(kNumInternalizedStringsOffset, cs.num_internalized_strings());
2578   SetHeaderValue(kNumReservationsOffset, reservations.length());
2579   SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
2580   SetHeaderValue(kPayloadLengthOffset, payload.length());
2581
2582   Checksum checksum(payload.ToConstVector());
2583   SetHeaderValue(kChecksum1Offset, checksum.a());
2584   SetHeaderValue(kChecksum2Offset, checksum.b());
2585
2586   // Copy reservation chunk sizes.
2587   CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
2588             reservation_size);
2589
2590   // Copy code stub keys.
2591   CopyBytes(data_ + kHeaderSize + reservation_size,
2592             reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
2593
2594   memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
2595
2596   // Copy serialized data.
2597   CopyBytes(data_ + padded_payload_offset, payload.begin(),
2598             static_cast<size_t>(payload.length()));
2599 }
2600
2601
2602 SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
2603     String* source) const {
2604   uint32_t magic_number = GetHeaderValue(kMagicNumberOffset);
2605   uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
2606   uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
2607   uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
2608   uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
2609   uint32_t c1 = GetHeaderValue(kChecksum1Offset);
2610   uint32_t c2 = GetHeaderValue(kChecksum2Offset);
2611   if (magic_number != kMagicNumber) return MAGIC_NUMBER_MISMATCH;
2612   if (version_hash != Version::Hash()) return VERSION_MISMATCH;
2613   if (source_hash != SourceHash(source)) return SOURCE_MISMATCH;
2614   if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
2615     return CPU_FEATURES_MISMATCH;
2616   }
2617   if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
2618   if (!Checksum(Payload()).Check(c1, c2)) return CHECKSUM_MISMATCH;
2619   return CHECK_SUCCESS;
2620 }
2621
2622
2623 // Return ScriptData object and relinquish ownership over it to the caller.
2624 ScriptData* SerializedCodeData::GetScriptData() {
2625   DCHECK(owns_data_);
2626   ScriptData* result = new ScriptData(data_, size_);
2627   result->AcquireDataOwnership();
2628   owns_data_ = false;
2629   data_ = NULL;
2630   return result;
2631 }
2632
2633
2634 Vector<const SerializedData::Reservation> SerializedCodeData::Reservations()
2635     const {
2636   return Vector<const Reservation>(
2637       reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
2638       GetHeaderValue(kNumReservationsOffset));
2639 }
2640
2641
2642 Vector<const byte> SerializedCodeData::Payload() const {
2643   int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
2644   int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
2645   int payload_offset = kHeaderSize + reservations_size + code_stubs_size;
2646   int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
2647   const byte* payload = data_ + padded_payload_offset;
2648   DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
2649   int length = GetHeaderValue(kPayloadLengthOffset);
2650   DCHECK_EQ(data_ + size_, payload + length);
2651   return Vector<const byte>(payload, length);
2652 }
2653
2654
2655 int SerializedCodeData::NumInternalizedStrings() const {
2656   return GetHeaderValue(kNumInternalizedStringsOffset);
2657 }
2658
2659 Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
2660   int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
2661   const byte* start = data_ + kHeaderSize + reservations_size;
2662   return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
2663                                 GetHeaderValue(kNumCodeStubKeysOffset));
2664 }
2665
2666
2667 SerializedCodeData::SerializedCodeData(ScriptData* data)
2668     : SerializedData(const_cast<byte*>(data->data()), data->length()) {}
2669
2670
2671 SerializedCodeData* SerializedCodeData::FromCachedData(ScriptData* cached_data,
2672                                                        String* source) {
2673   DisallowHeapAllocation no_gc;
2674   SerializedCodeData* scd = new SerializedCodeData(cached_data);
2675   SanityCheckResult r = scd->SanityCheck(source);
2676   if (r == CHECK_SUCCESS) return scd;
2677   cached_data->Reject();
2678   source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(r);
2679   delete scd;
2680   return NULL;
2681 }
2682 } }  // namespace v8::internal