test: remove obsolete harmony flags
[platform/upstream/nodejs.git] / deps / v8 / src / snapshot / serialize.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/base/platform/platform.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/cpu-profiler.h"
13 #include "src/deoptimizer.h"
14 #include "src/execution.h"
15 #include "src/global-handles.h"
16 #include "src/ic/ic.h"
17 #include "src/ic/stub-cache.h"
18 #include "src/objects.h"
19 #include "src/parser.h"
20 #include "src/runtime/runtime.h"
21 #include "src/snapshot/natives.h"
22 #include "src/snapshot/serialize.h"
23 #include "src/snapshot/snapshot.h"
24 #include "src/snapshot/snapshot-source-sink.h"
25 #include "src/v8threads.h"
26 #include "src/version.h"
27
28 namespace v8 {
29 namespace internal {
30
31
32 // -----------------------------------------------------------------------------
33 // Coding of external references.
34
35
36 ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
37   ExternalReferenceTable* external_reference_table =
38       isolate->external_reference_table();
39   if (external_reference_table == NULL) {
40     external_reference_table = new ExternalReferenceTable(isolate);
41     isolate->set_external_reference_table(external_reference_table);
42   }
43   return external_reference_table;
44 }
45
46
47 ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
48   // Miscellaneous
49   Add(ExternalReference::roots_array_start(isolate).address(),
50       "Heap::roots_array_start()");
51   Add(ExternalReference::address_of_stack_limit(isolate).address(),
52       "StackGuard::address_of_jslimit()");
53   Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
54       "StackGuard::address_of_real_jslimit()");
55   Add(ExternalReference::new_space_start(isolate).address(),
56       "Heap::NewSpaceStart()");
57   Add(ExternalReference::new_space_mask(isolate).address(),
58       "Heap::NewSpaceMask()");
59   Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
60       "Heap::NewSpaceAllocationLimitAddress()");
61   Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
62       "Heap::NewSpaceAllocationTopAddress()");
63   Add(ExternalReference::debug_break(isolate).address(), "Debug::Break()");
64   Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
65       "Debug::step_in_fp_addr()");
66   Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
67       "mod_two_doubles");
68   // Keyed lookup cache.
69   Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
70       "KeyedLookupCache::keys()");
71   Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
72       "KeyedLookupCache::field_offsets()");
73   Add(ExternalReference::handle_scope_next_address(isolate).address(),
74       "HandleScope::next");
75   Add(ExternalReference::handle_scope_limit_address(isolate).address(),
76       "HandleScope::limit");
77   Add(ExternalReference::handle_scope_level_address(isolate).address(),
78       "HandleScope::level");
79   Add(ExternalReference::new_deoptimizer_function(isolate).address(),
80       "Deoptimizer::New()");
81   Add(ExternalReference::compute_output_frames_function(isolate).address(),
82       "Deoptimizer::ComputeOutputFrames()");
83   Add(ExternalReference::address_of_min_int().address(),
84       "LDoubleConstant::min_int");
85   Add(ExternalReference::address_of_one_half().address(),
86       "LDoubleConstant::one_half");
87   Add(ExternalReference::isolate_address(isolate).address(), "isolate");
88   Add(ExternalReference::address_of_negative_infinity().address(),
89       "LDoubleConstant::negative_infinity");
90   Add(ExternalReference::power_double_double_function(isolate).address(),
91       "power_double_double_function");
92   Add(ExternalReference::power_double_int_function(isolate).address(),
93       "power_double_int_function");
94   Add(ExternalReference::math_log_double_function(isolate).address(),
95       "std::log");
96   Add(ExternalReference::store_buffer_top(isolate).address(),
97       "store_buffer_top");
98   Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
99   Add(ExternalReference::get_date_field_function(isolate).address(),
100       "JSDate::GetField");
101   Add(ExternalReference::date_cache_stamp(isolate).address(),
102       "date_cache_stamp");
103   Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
104       "address_of_pending_message_obj");
105   Add(ExternalReference::get_make_code_young_function(isolate).address(),
106       "Code::MakeCodeYoung");
107   Add(ExternalReference::cpu_features().address(), "cpu_features");
108   Add(ExternalReference::old_pointer_space_allocation_top_address(isolate)
109           .address(),
110       "Heap::OldPointerSpaceAllocationTopAddress");
111   Add(ExternalReference::old_pointer_space_allocation_limit_address(isolate)
112           .address(),
113       "Heap::OldPointerSpaceAllocationLimitAddress");
114   Add(ExternalReference::old_data_space_allocation_top_address(isolate)
115           .address(),
116       "Heap::OldDataSpaceAllocationTopAddress");
117   Add(ExternalReference::old_data_space_allocation_limit_address(isolate)
118           .address(),
119       "Heap::OldDataSpaceAllocationLimitAddress");
120   Add(ExternalReference::allocation_sites_list_address(isolate).address(),
121       "Heap::allocation_sites_list_address()");
122   Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias");
123   Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
124       "Code::MarkCodeAsExecuted");
125   Add(ExternalReference::is_profiling_address(isolate).address(),
126       "CpuProfiler::is_profiling");
127   Add(ExternalReference::scheduled_exception_address(isolate).address(),
128       "Isolate::scheduled_exception");
129   Add(ExternalReference::invoke_function_callback(isolate).address(),
130       "InvokeFunctionCallback");
131   Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
132       "InvokeAccessorGetterCallback");
133   Add(ExternalReference::flush_icache_function(isolate).address(),
134       "CpuFeatures::FlushICache");
135   Add(ExternalReference::log_enter_external_function(isolate).address(),
136       "Logger::EnterExternal");
137   Add(ExternalReference::log_leave_external_function(isolate).address(),
138       "Logger::LeaveExternal");
139   Add(ExternalReference::address_of_minus_one_half().address(),
140       "double_constants.minus_one_half");
141   Add(ExternalReference::stress_deopt_count(isolate).address(),
142       "Isolate::stress_deopt_count_address()");
143
144   // Debug addresses
145   Add(ExternalReference::debug_after_break_target_address(isolate).address(),
146       "Debug::after_break_target_address()");
147   Add(ExternalReference::debug_restarter_frame_function_pointer_address(isolate)
148           .address(),
149       "Debug::restarter_frame_function_pointer_address()");
150   Add(ExternalReference::debug_is_active_address(isolate).address(),
151       "Debug::is_active_address()");
152
153 #ifndef V8_INTERPRETED_REGEXP
154   Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
155       "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
156   Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
157       "RegExpMacroAssembler*::CheckStackGuardState()");
158   Add(ExternalReference::re_grow_stack(isolate).address(),
159       "NativeRegExpMacroAssembler::GrowStack()");
160   Add(ExternalReference::re_word_character_map().address(),
161       "NativeRegExpMacroAssembler::word_character_map");
162   Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
163       "RegExpStack::limit_address()");
164   Add(ExternalReference::address_of_regexp_stack_memory_address(isolate)
165           .address(),
166       "RegExpStack::memory_address()");
167   Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
168       "RegExpStack::memory_size()");
169   Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
170       "OffsetsVector::static_offsets_vector");
171 #endif  // V8_INTERPRETED_REGEXP
172
173   // The following populates all of the different type of external references
174   // into the ExternalReferenceTable.
175   //
176   // NOTE: This function was originally 100k of code.  It has since been
177   // rewritten to be mostly table driven, as the callback macro style tends to
178   // very easily cause code bloat.  Please be careful in the future when adding
179   // new references.
180
181   struct RefTableEntry {
182     uint16_t id;
183     const char* name;
184   };
185
186   static const RefTableEntry c_builtins[] = {
187 #define DEF_ENTRY_C(name, ignored)           \
188   { Builtins::c_##name, "Builtins::" #name } \
189   ,
190       BUILTIN_LIST_C(DEF_ENTRY_C)
191 #undef DEF_ENTRY_C
192   };
193
194   for (unsigned i = 0; i < arraysize(c_builtins); ++i) {
195     ExternalReference ref(static_cast<Builtins::CFunctionId>(c_builtins[i].id),
196                           isolate);
197     Add(ref.address(), c_builtins[i].name);
198   }
199
200   static const RefTableEntry builtins[] = {
201 #define DEF_ENTRY_C(name, ignored)          \
202   { Builtins::k##name, "Builtins::" #name } \
203   ,
204 #define DEF_ENTRY_A(name, i1, i2, i3)       \
205   { Builtins::k##name, "Builtins::" #name } \
206   ,
207       BUILTIN_LIST_C(DEF_ENTRY_C) BUILTIN_LIST_A(DEF_ENTRY_A)
208           BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
209 #undef DEF_ENTRY_C
210 #undef DEF_ENTRY_A
211   };
212
213   for (unsigned i = 0; i < arraysize(builtins); ++i) {
214     ExternalReference ref(static_cast<Builtins::Name>(builtins[i].id), isolate);
215     Add(ref.address(), builtins[i].name);
216   }
217
218   static const RefTableEntry runtime_functions[] = {
219 #define RUNTIME_ENTRY(name, i1, i2)       \
220   { Runtime::k##name, "Runtime::" #name } \
221   ,
222       FOR_EACH_INTRINSIC(RUNTIME_ENTRY)
223 #undef RUNTIME_ENTRY
224   };
225
226   for (unsigned i = 0; i < arraysize(runtime_functions); ++i) {
227     ExternalReference ref(
228         static_cast<Runtime::FunctionId>(runtime_functions[i].id), isolate);
229     Add(ref.address(), runtime_functions[i].name);
230   }
231
232   static const RefTableEntry inline_caches[] = {
233 #define IC_ENTRY(name)          \
234   { IC::k##name, "IC::" #name } \
235   ,
236       IC_UTIL_LIST(IC_ENTRY)
237 #undef IC_ENTRY
238   };
239
240   for (unsigned i = 0; i < arraysize(inline_caches); ++i) {
241     ExternalReference ref(
242         IC_Utility(static_cast<IC::UtilityId>(inline_caches[i].id)), isolate);
243     Add(ref.address(), runtime_functions[i].name);
244   }
245
246   // Stat counters
247   struct StatsRefTableEntry {
248     StatsCounter* (Counters::*counter)();
249     const char* name;
250   };
251
252   static const StatsRefTableEntry stats_ref_table[] = {
253 #define COUNTER_ENTRY(name, caption)      \
254   { &Counters::name, "Counters::" #name } \
255   ,
256       STATS_COUNTER_LIST_1(COUNTER_ENTRY) STATS_COUNTER_LIST_2(COUNTER_ENTRY)
257 #undef COUNTER_ENTRY
258   };
259
260   Counters* counters = isolate->counters();
261   for (unsigned i = 0; i < arraysize(stats_ref_table); ++i) {
262     // To make sure the indices are not dependent on whether counters are
263     // enabled, use a dummy address as filler.
264     Address address = NotAvailable();
265     StatsCounter* counter = (counters->*(stats_ref_table[i].counter))();
266     if (counter->Enabled()) {
267       address = reinterpret_cast<Address>(counter->GetInternalPointer());
268     }
269     Add(address, stats_ref_table[i].name);
270   }
271
272   // Top addresses
273   static const char* address_names[] = {
274 #define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address",
275       FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) NULL
276 #undef BUILD_NAME_LITERAL
277   };
278
279   for (int i = 0; i < Isolate::kIsolateAddressCount; ++i) {
280     Add(isolate->get_address_from_id(static_cast<Isolate::AddressId>(i)),
281         address_names[i]);
282   }
283
284   // Accessors
285   struct AccessorRefTable {
286     Address address;
287     const char* name;
288   };
289
290   static const AccessorRefTable accessors[] = {
291 #define ACCESSOR_INFO_DECLARATION(name)                                     \
292   { FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter" } \
293   , {FUNCTION_ADDR(&Accessors::name##Setter), "Accessors::" #name "Setter"},
294       ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
295 #undef ACCESSOR_INFO_DECLARATION
296   };
297
298   for (unsigned i = 0; i < arraysize(accessors); ++i) {
299     Add(accessors[i].address, accessors[i].name);
300   }
301
302   StubCache* stub_cache = isolate->stub_cache();
303
304   // Stub cache tables
305   Add(stub_cache->key_reference(StubCache::kPrimary).address(),
306       "StubCache::primary_->key");
307   Add(stub_cache->value_reference(StubCache::kPrimary).address(),
308       "StubCache::primary_->value");
309   Add(stub_cache->map_reference(StubCache::kPrimary).address(),
310       "StubCache::primary_->map");
311   Add(stub_cache->key_reference(StubCache::kSecondary).address(),
312       "StubCache::secondary_->key");
313   Add(stub_cache->value_reference(StubCache::kSecondary).address(),
314       "StubCache::secondary_->value");
315   Add(stub_cache->map_reference(StubCache::kSecondary).address(),
316       "StubCache::secondary_->map");
317
318   // Runtime entries
319   Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
320       "HandleScope::DeleteExtensions");
321   Add(ExternalReference::incremental_marking_record_write_function(isolate)
322           .address(),
323       "IncrementalMarking::RecordWrite");
324   Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
325       "StoreBuffer::StoreBufferOverflow");
326
327   // Add a small set of deopt entry addresses to encoder without generating the
328   // deopt table code, which isn't possible at deserialization time.
329   HandleScope scope(isolate);
330   for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) {
331     Address address = Deoptimizer::GetDeoptimizationEntry(
332         isolate,
333         entry,
334         Deoptimizer::LAZY,
335         Deoptimizer::CALCULATE_ENTRY_ADDRESS);
336     Add(address, "lazy_deopt");
337   }
338 }
339
340
341 ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
342   map_ = isolate->external_reference_map();
343   if (map_ != NULL) return;
344   map_ = new HashMap(HashMap::PointersMatch);
345   ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
346   for (int i = 0; i < table->size(); ++i) {
347     Address addr = table->address(i);
348     if (addr == ExternalReferenceTable::NotAvailable()) continue;
349     // We expect no duplicate external references entries in the table.
350     DCHECK_NULL(map_->Lookup(addr, Hash(addr), false));
351     map_->Lookup(addr, Hash(addr), true)->value = reinterpret_cast<void*>(i);
352   }
353   isolate->set_external_reference_map(map_);
354 }
355
356
357 uint32_t ExternalReferenceEncoder::Encode(Address address) const {
358   DCHECK_NOT_NULL(address);
359   HashMap::Entry* entry =
360       const_cast<HashMap*>(map_)->Lookup(address, Hash(address), false);
361   DCHECK_NOT_NULL(entry);
362   return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
363 }
364
365
366 const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
367                                                     Address address) const {
368   HashMap::Entry* entry =
369       const_cast<HashMap*>(map_)->Lookup(address, Hash(address), false);
370   if (entry == NULL) return "<unknown>";
371   uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
372   return ExternalReferenceTable::instance(isolate)->name(i);
373 }
374
375
376 RootIndexMap::RootIndexMap(Isolate* isolate) {
377   map_ = isolate->root_index_map();
378   if (map_ != NULL) return;
379   map_ = new HashMap(HashMap::PointersMatch);
380   Object** root_array = isolate->heap()->roots_array_start();
381   for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
382     Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
383     Object* root = root_array[root_index];
384     // Omit root entries that can be written after initialization. They must
385     // not be referenced through the root list in the snapshot.
386     if (root->IsHeapObject() &&
387         isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
388       HeapObject* heap_object = HeapObject::cast(root);
389       HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
390       if (entry != NULL) {
391         // Some are initialized to a previous value in the root list.
392         DCHECK_LT(GetValue(entry), i);
393       } else {
394         SetValue(LookupEntry(map_, heap_object, true), i);
395       }
396     }
397   }
398   isolate->set_root_index_map(map_);
399 }
400
401
402 class CodeAddressMap: public CodeEventLogger {
403  public:
404   explicit CodeAddressMap(Isolate* isolate)
405       : isolate_(isolate) {
406     isolate->logger()->addCodeEventListener(this);
407   }
408
409   virtual ~CodeAddressMap() {
410     isolate_->logger()->removeCodeEventListener(this);
411   }
412
413   virtual void CodeMoveEvent(Address from, Address to) {
414     address_to_name_map_.Move(from, to);
415   }
416
417   virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
418   }
419
420   virtual void CodeDeleteEvent(Address from) {
421     address_to_name_map_.Remove(from);
422   }
423
424   const char* Lookup(Address address) {
425     return address_to_name_map_.Lookup(address);
426   }
427
428  private:
429   class NameMap {
430    public:
431     NameMap() : impl_(HashMap::PointersMatch) {}
432
433     ~NameMap() {
434       for (HashMap::Entry* p = impl_.Start(); p != NULL; p = impl_.Next(p)) {
435         DeleteArray(static_cast<const char*>(p->value));
436       }
437     }
438
439     void Insert(Address code_address, const char* name, int name_size) {
440       HashMap::Entry* entry = FindOrCreateEntry(code_address);
441       if (entry->value == NULL) {
442         entry->value = CopyName(name, name_size);
443       }
444     }
445
446     const char* Lookup(Address code_address) {
447       HashMap::Entry* entry = FindEntry(code_address);
448       return (entry != NULL) ? static_cast<const char*>(entry->value) : NULL;
449     }
450
451     void Remove(Address code_address) {
452       HashMap::Entry* entry = FindEntry(code_address);
453       if (entry != NULL) {
454         DeleteArray(static_cast<char*>(entry->value));
455         RemoveEntry(entry);
456       }
457     }
458
459     void Move(Address from, Address to) {
460       if (from == to) return;
461       HashMap::Entry* from_entry = FindEntry(from);
462       DCHECK(from_entry != NULL);
463       void* value = from_entry->value;
464       RemoveEntry(from_entry);
465       HashMap::Entry* to_entry = FindOrCreateEntry(to);
466       DCHECK(to_entry->value == NULL);
467       to_entry->value = value;
468     }
469
470    private:
471     static char* CopyName(const char* name, int name_size) {
472       char* result = NewArray<char>(name_size + 1);
473       for (int i = 0; i < name_size; ++i) {
474         char c = name[i];
475         if (c == '\0') c = ' ';
476         result[i] = c;
477       }
478       result[name_size] = '\0';
479       return result;
480     }
481
482     HashMap::Entry* FindOrCreateEntry(Address code_address) {
483       return impl_.Lookup(code_address, ComputePointerHash(code_address), true);
484     }
485
486     HashMap::Entry* FindEntry(Address code_address) {
487       return impl_.Lookup(code_address,
488                           ComputePointerHash(code_address),
489                           false);
490     }
491
492     void RemoveEntry(HashMap::Entry* entry) {
493       impl_.Remove(entry->key, entry->hash);
494     }
495
496     HashMap impl_;
497
498     DISALLOW_COPY_AND_ASSIGN(NameMap);
499   };
500
501   virtual void LogRecordedBuffer(Code* code,
502                                  SharedFunctionInfo*,
503                                  const char* name,
504                                  int length) {
505     address_to_name_map_.Insert(code->address(), name, length);
506   }
507
508   NameMap address_to_name_map_;
509   Isolate* isolate_;
510 };
511
512
513 void Deserializer::DecodeReservation(
514     Vector<const SerializedData::Reservation> res) {
515   DCHECK_EQ(0, reservations_[NEW_SPACE].length());
516   STATIC_ASSERT(NEW_SPACE == 0);
517   int current_space = NEW_SPACE;
518   for (auto& r : res) {
519     reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
520     if (r.is_last()) current_space++;
521   }
522   DCHECK_EQ(kNumberOfSpaces, current_space);
523   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
524 }
525
526
527 void Deserializer::FlushICacheForNewCodeObjects() {
528   PageIterator it(isolate_->heap()->code_space());
529   while (it.has_next()) {
530     Page* p = it.next();
531     CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start());
532   }
533 }
534
535
536 bool Deserializer::ReserveSpace() {
537 #ifdef DEBUG
538   for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
539     CHECK(reservations_[i].length() > 0);
540   }
541 #endif  // DEBUG
542   if (!isolate_->heap()->ReserveSpace(reservations_)) return false;
543   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
544     high_water_[i] = reservations_[i][0].start;
545   }
546   return true;
547 }
548
549
550 void Deserializer::Initialize(Isolate* isolate) {
551   DCHECK_NULL(isolate_);
552   DCHECK_NOT_NULL(isolate);
553   isolate_ = isolate;
554   DCHECK_NULL(external_reference_table_);
555   external_reference_table_ = ExternalReferenceTable::instance(isolate);
556   CHECK_EQ(magic_number_,
557            SerializedData::ComputeMagicNumber(external_reference_table_));
558 }
559
560
561 void Deserializer::Deserialize(Isolate* isolate) {
562   Initialize(isolate);
563   if (!ReserveSpace()) V8::FatalProcessOutOfMemory("deserializing context");
564   // No active threads.
565   DCHECK_NULL(isolate_->thread_manager()->FirstThreadStateInUse());
566   // No active handles.
567   DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
568   isolate_->heap()->IterateSmiRoots(this);
569   isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
570   isolate_->heap()->RepairFreeListsAfterDeserialization();
571   isolate_->heap()->IterateWeakRoots(this, VISIT_ALL);
572
573   isolate_->heap()->set_native_contexts_list(
574       isolate_->heap()->undefined_value());
575   isolate_->heap()->set_array_buffers_list(
576       isolate_->heap()->undefined_value());
577   isolate->heap()->set_new_array_buffer_views_list(
578       isolate_->heap()->undefined_value());
579
580   // The allocation site list is build during root iteration, but if no sites
581   // were encountered then it needs to be initialized to undefined.
582   if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
583     isolate_->heap()->set_allocation_sites_list(
584         isolate_->heap()->undefined_value());
585   }
586
587   // Update data pointers to the external strings containing natives sources.
588   for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
589     Object* source = isolate_->heap()->natives_source_cache()->get(i);
590     if (!source->IsUndefined()) {
591       ExternalOneByteString::cast(source)->update_data_cache();
592     }
593   }
594
595   FlushICacheForNewCodeObjects();
596
597   // Issue code events for newly deserialized code objects.
598   LOG_CODE_EVENT(isolate_, LogCodeObjects());
599   LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
600 }
601
602
603 MaybeHandle<Object> Deserializer::DeserializePartial(
604     Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
605     Handle<FixedArray>* outdated_contexts_out) {
606   Initialize(isolate);
607   if (!ReserveSpace()) {
608     V8::FatalProcessOutOfMemory("deserialize context");
609     return MaybeHandle<Object>();
610   }
611
612   Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(1);
613   attached_objects[kGlobalProxyReference] = global_proxy;
614   SetAttachedObjects(attached_objects);
615
616   DisallowHeapAllocation no_gc;
617   // Keep track of the code space start and end pointers in case new
618   // code objects were unserialized
619   OldSpace* code_space = isolate_->heap()->code_space();
620   Address start_address = code_space->top();
621   Object* root;
622   Object* outdated_contexts;
623   VisitPointer(&root);
624   VisitPointer(&outdated_contexts);
625
626   // There's no code deserialized here. If this assert fires
627   // then that's changed and logging should be added to notify
628   // the profiler et al of the new code.
629   CHECK_EQ(start_address, code_space->top());
630   CHECK(outdated_contexts->IsFixedArray());
631   *outdated_contexts_out =
632       Handle<FixedArray>(FixedArray::cast(outdated_contexts), isolate);
633   return Handle<Object>(root, isolate);
634 }
635
636
637 MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
638     Isolate* isolate) {
639   Initialize(isolate);
640   if (!ReserveSpace()) {
641     return Handle<SharedFunctionInfo>();
642   } else {
643     deserializing_user_code_ = true;
644     DisallowHeapAllocation no_gc;
645     Object* root;
646     VisitPointer(&root);
647     return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
648   }
649 }
650
651
652 Deserializer::~Deserializer() {
653   // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
654   // DCHECK(source_.AtEOF());
655   attached_objects_.Dispose();
656 }
657
658
659 // This is called on the roots.  It is the driver of the deserialization
660 // process.  It is also called on the body of each function.
661 void Deserializer::VisitPointers(Object** start, Object** end) {
662   // The space must be new space.  Any other space would cause ReadChunk to try
663   // to update the remembered using NULL as the address.
664   ReadData(start, end, NEW_SPACE, NULL);
665 }
666
667
668 void Deserializer::RelinkAllocationSite(AllocationSite* site) {
669   if (isolate_->heap()->allocation_sites_list() == Smi::FromInt(0)) {
670     site->set_weak_next(isolate_->heap()->undefined_value());
671   } else {
672     site->set_weak_next(isolate_->heap()->allocation_sites_list());
673   }
674   isolate_->heap()->set_allocation_sites_list(site);
675 }
676
677
678 // Used to insert a deserialized internalized string into the string table.
679 class StringTableInsertionKey : public HashTableKey {
680  public:
681   explicit StringTableInsertionKey(String* string)
682       : string_(string), hash_(HashForObject(string)) {
683     DCHECK(string->IsInternalizedString());
684   }
685
686   bool IsMatch(Object* string) OVERRIDE {
687     // We know that all entries in a hash table had their hash keys created.
688     // Use that knowledge to have fast failure.
689     if (hash_ != HashForObject(string)) return false;
690     // We want to compare the content of two internalized strings here.
691     return string_->SlowEquals(String::cast(string));
692   }
693
694   uint32_t Hash() OVERRIDE { return hash_; }
695
696   uint32_t HashForObject(Object* key) OVERRIDE {
697     return String::cast(key)->Hash();
698   }
699
700   MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate)
701       OVERRIDE {
702     return handle(string_, isolate);
703   }
704
705   String* string_;
706   uint32_t hash_;
707 };
708
709
710 HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) {
711   if (obj->IsString()) {
712     String* string = String::cast(obj);
713     // Uninitialize hash field as the hash seed may have changed.
714     string->set_hash_field(String::kEmptyHashField);
715     if (string->IsInternalizedString()) {
716       DisallowHeapAllocation no_gc;
717       HandleScope scope(isolate_);
718       StringTableInsertionKey key(string);
719       String* canonical = *StringTable::LookupKey(isolate_, &key);
720       string->SetForwardedInternalizedString(canonical);
721       return canonical;
722     }
723   } else if (obj->IsScript()) {
724     Script::cast(obj)->set_id(isolate_->heap()->NextScriptId());
725   }
726   return obj;
727 }
728
729
730 HeapObject* Deserializer::GetBackReferencedObject(int space) {
731   HeapObject* obj;
732   BackReference back_reference(source_.GetInt());
733   if (space == LO_SPACE) {
734     CHECK(back_reference.chunk_index() == 0);
735     uint32_t index = back_reference.large_object_index();
736     obj = deserialized_large_objects_[index];
737   } else {
738     DCHECK(space < kNumberOfPreallocatedSpaces);
739     uint32_t chunk_index = back_reference.chunk_index();
740     DCHECK_LE(chunk_index, current_chunk_[space]);
741     uint32_t chunk_offset = back_reference.chunk_offset();
742     obj = HeapObject::FromAddress(reservations_[space][chunk_index].start +
743                                   chunk_offset);
744   }
745   if (deserializing_user_code() && obj->IsInternalizedString()) {
746     obj = String::cast(obj)->GetForwardedInternalizedString();
747   }
748   hot_objects_.Add(obj);
749   return obj;
750 }
751
752
753 // This routine writes the new object into the pointer provided and then
754 // returns true if the new object was in young space and false otherwise.
755 // The reason for this strange interface is that otherwise the object is
756 // written very late, which means the FreeSpace map is not set up by the
757 // time we need to use it to mark the space at the end of a page free.
758 void Deserializer::ReadObject(int space_number, Object** write_back) {
759   Address address;
760   HeapObject* obj;
761   int next_int = source_.GetInt();
762
763   bool double_align = false;
764 #ifndef V8_HOST_ARCH_64_BIT
765   double_align = next_int == kDoubleAlignmentSentinel;
766   if (double_align) next_int = source_.GetInt();
767 #endif
768
769   DCHECK_NE(kDoubleAlignmentSentinel, next_int);
770   int size = next_int << kObjectAlignmentBits;
771   int reserved_size = size + (double_align ? kPointerSize : 0);
772   address = Allocate(space_number, reserved_size);
773   obj = HeapObject::FromAddress(address);
774   if (double_align) {
775     obj = isolate_->heap()->DoubleAlignForDeserialization(obj, reserved_size);
776     address = obj->address();
777   }
778
779   isolate_->heap()->OnAllocationEvent(obj, size);
780   Object** current = reinterpret_cast<Object**>(address);
781   Object** limit = current + (size >> kPointerSizeLog2);
782   if (FLAG_log_snapshot_positions) {
783     LOG(isolate_, SnapshotPositionEvent(address, source_.position()));
784   }
785   ReadData(current, limit, space_number, address);
786
787   // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
788   // as a (weak) root. If this root is relocated correctly,
789   // RelinkAllocationSite() isn't necessary.
790   if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj));
791
792   // Fix up strings from serialized user code.
793   if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj);
794
795   Object* write_back_obj = obj;
796   UnalignedCopy(write_back, &write_back_obj);
797 #ifdef DEBUG
798   if (obj->IsCode()) {
799     DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
800 #ifdef VERIFY_HEAP
801     obj->ObjectVerify();
802 #endif  // VERIFY_HEAP
803   } else {
804     DCHECK(space_number != CODE_SPACE);
805   }
806 #endif  // DEBUG
807 }
808
809
810 // We know the space requirements before deserialization and can
811 // pre-allocate that reserved space. During deserialization, all we need
812 // to do is to bump up the pointer for each space in the reserved
813 // space. This is also used for fixing back references.
814 // We may have to split up the pre-allocation into several chunks
815 // because it would not fit onto a single page. We do not have to keep
816 // track of when to move to the next chunk. An opcode will signal this.
817 // Since multiple large objects cannot be folded into one large object
818 // space allocation, we have to do an actual allocation when deserializing
819 // each large object. Instead of tracking offset for back references, we
820 // reference large objects by index.
821 Address Deserializer::Allocate(int space_index, int size) {
822   if (space_index == LO_SPACE) {
823     AlwaysAllocateScope scope(isolate_);
824     LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
825     Executability exec = static_cast<Executability>(source_.Get());
826     AllocationResult result = lo_space->AllocateRaw(size, exec);
827     HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
828     deserialized_large_objects_.Add(obj);
829     return obj->address();
830   } else {
831     DCHECK(space_index < kNumberOfPreallocatedSpaces);
832     Address address = high_water_[space_index];
833     DCHECK_NOT_NULL(address);
834     high_water_[space_index] += size;
835 #ifdef DEBUG
836     // Assert that the current reserved chunk is still big enough.
837     const Heap::Reservation& reservation = reservations_[space_index];
838     int chunk_index = current_chunk_[space_index];
839     CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
840 #endif
841     return address;
842   }
843 }
844
845
846 void Deserializer::ReadData(Object** current, Object** limit, int source_space,
847                             Address current_object_address) {
848   Isolate* const isolate = isolate_;
849   // Write barrier support costs around 1% in startup time.  In fact there
850   // are no new space objects in current boot snapshots, so it's not needed,
851   // but that may change.
852   bool write_barrier_needed =
853       (current_object_address != NULL && source_space != NEW_SPACE &&
854        source_space != CELL_SPACE && source_space != CODE_SPACE &&
855        source_space != OLD_DATA_SPACE);
856   while (current < limit) {
857     byte data = source_.Get();
858     switch (data) {
859 #define CASE_STATEMENT(where, how, within, space_number) \
860   case where + how + within + space_number:              \
861     STATIC_ASSERT((where & ~kWhereMask) == 0);           \
862     STATIC_ASSERT((how & ~kHowToCodeMask) == 0);         \
863     STATIC_ASSERT((within & ~kWhereToPointMask) == 0);   \
864     STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
865
866 #define CASE_BODY(where, how, within, space_number_if_any)                     \
867   {                                                                            \
868     bool emit_write_barrier = false;                                           \
869     bool current_was_incremented = false;                                      \
870     int space_number = space_number_if_any == kAnyOldSpace                     \
871                            ? (data & kSpaceMask)                               \
872                            : space_number_if_any;                              \
873     if (where == kNewObject && how == kPlain && within == kStartOfObject) {    \
874       ReadObject(space_number, current);                                       \
875       emit_write_barrier = (space_number == NEW_SPACE);                        \
876     } else {                                                                   \
877       Object* new_object = NULL; /* May not be a real Object pointer. */       \
878       if (where == kNewObject) {                                               \
879         ReadObject(space_number, &new_object);                                 \
880       } else if (where == kBackref) {                                          \
881         emit_write_barrier = (space_number == NEW_SPACE);                      \
882         new_object = GetBackReferencedObject(data & kSpaceMask);               \
883       } else if (where == kBackrefWithSkip) {                                  \
884         int skip = source_.GetInt();                                           \
885         current = reinterpret_cast<Object**>(                                  \
886             reinterpret_cast<Address>(current) + skip);                        \
887         emit_write_barrier = (space_number == NEW_SPACE);                      \
888         new_object = GetBackReferencedObject(data & kSpaceMask);               \
889       } else if (where == kRootArray) {                                        \
890         int root_id = source_.GetInt();                                        \
891         new_object = isolate->heap()->roots_array_start()[root_id];            \
892         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
893       } else if (where == kPartialSnapshotCache) {                             \
894         int cache_index = source_.GetInt();                                    \
895         new_object = isolate->partial_snapshot_cache()->at(cache_index);       \
896         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
897       } else if (where == kExternalReference) {                                \
898         int skip = source_.GetInt();                                           \
899         current = reinterpret_cast<Object**>(                                  \
900             reinterpret_cast<Address>(current) + skip);                        \
901         int reference_id = source_.GetInt();                                   \
902         Address address = external_reference_table_->address(reference_id);    \
903         new_object = reinterpret_cast<Object*>(address);                       \
904       } else if (where == kAttachedReference) {                                \
905         int index = source_.GetInt();                                          \
906         DCHECK(deserializing_user_code() || index == kGlobalProxyReference);   \
907         new_object = *attached_objects_[index];                                \
908         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
909       } else {                                                                 \
910         DCHECK(where == kBuiltin);                                             \
911         DCHECK(deserializing_user_code());                                     \
912         int builtin_id = source_.GetInt();                                     \
913         DCHECK_LE(0, builtin_id);                                              \
914         DCHECK_LT(builtin_id, Builtins::builtin_count);                        \
915         Builtins::Name name = static_cast<Builtins::Name>(builtin_id);         \
916         new_object = isolate->builtins()->builtin(name);                       \
917         emit_write_barrier = false;                                            \
918       }                                                                        \
919       if (within == kInnerPointer) {                                           \
920         if (space_number != CODE_SPACE || new_object->IsCode()) {              \
921           Code* new_code_object = reinterpret_cast<Code*>(new_object);         \
922           new_object =                                                         \
923               reinterpret_cast<Object*>(new_code_object->instruction_start()); \
924         } else {                                                               \
925           DCHECK(space_number == CODE_SPACE);                                  \
926           Cell* cell = Cell::cast(new_object);                                 \
927           new_object = reinterpret_cast<Object*>(cell->ValueAddress());        \
928         }                                                                      \
929       }                                                                        \
930       if (how == kFromCode) {                                                  \
931         Address location_of_branch_data = reinterpret_cast<Address>(current);  \
932         Assembler::deserialization_set_special_target_at(                      \
933             location_of_branch_data,                                           \
934             Code::cast(HeapObject::FromAddress(current_object_address)),       \
935             reinterpret_cast<Address>(new_object));                            \
936         location_of_branch_data += Assembler::kSpecialTargetSize;              \
937         current = reinterpret_cast<Object**>(location_of_branch_data);         \
938         current_was_incremented = true;                                        \
939       } else {                                                                 \
940         UnalignedCopy(current, &new_object);                                   \
941       }                                                                        \
942     }                                                                          \
943     if (emit_write_barrier && write_barrier_needed) {                          \
944       Address current_address = reinterpret_cast<Address>(current);            \
945       isolate->heap()->RecordWrite(                                            \
946           current_object_address,                                              \
947           static_cast<int>(current_address - current_object_address));         \
948     }                                                                          \
949     if (!current_was_incremented) {                                            \
950       current++;                                                               \
951     }                                                                          \
952     break;                                                                     \
953   }
954
955 // This generates a case and a body for the new space (which has to do extra
956 // write barrier handling) and handles the other spaces with fall-through cases
957 // and one body.
958 #define ALL_SPACES(where, how, within)                  \
959   CASE_STATEMENT(where, how, within, NEW_SPACE)         \
960   CASE_BODY(where, how, within, NEW_SPACE)              \
961   CASE_STATEMENT(where, how, within, OLD_DATA_SPACE)    \
962   CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
963   CASE_STATEMENT(where, how, within, CODE_SPACE)        \
964   CASE_STATEMENT(where, how, within, MAP_SPACE)         \
965   CASE_STATEMENT(where, how, within, CELL_SPACE)        \
966   CASE_STATEMENT(where, how, within, LO_SPACE)          \
967   CASE_BODY(where, how, within, kAnyOldSpace)
968
969 #define FOUR_CASES(byte_code)             \
970   case byte_code:                         \
971   case byte_code + 1:                     \
972   case byte_code + 2:                     \
973   case byte_code + 3:
974
975 #define SIXTEEN_CASES(byte_code)          \
976   FOUR_CASES(byte_code)                   \
977   FOUR_CASES(byte_code + 4)               \
978   FOUR_CASES(byte_code + 8)               \
979   FOUR_CASES(byte_code + 12)
980
981       // Deserialize a new object and write a pointer to it to the current
982       // object.
983       ALL_SPACES(kNewObject, kPlain, kStartOfObject)
984       // Support for direct instruction pointers in functions.  It's an inner
985       // pointer because it points at the entry point, not at the start of the
986       // code object.
987       CASE_STATEMENT(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
988       CASE_BODY(kNewObject, kPlain, kInnerPointer, CODE_SPACE)
989       // Deserialize a new code object and write a pointer to its first
990       // instruction to the current code object.
991       ALL_SPACES(kNewObject, kFromCode, kInnerPointer)
992       // Find a recently deserialized object using its offset from the current
993       // allocation point and write a pointer to it to the current object.
994       ALL_SPACES(kBackref, kPlain, kStartOfObject)
995       ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
996 #if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
997     defined(V8_TARGET_ARCH_PPC) || V8_OOL_CONSTANT_POOL
998       // Deserialize a new object from pointer found in code and write
999       // a pointer to it to the current object. Required only for MIPS, PPC or
1000       // ARM with ool constant pool, and omitted on the other architectures
1001       // because it is fully unrolled and would cause bloat.
1002       ALL_SPACES(kNewObject, kFromCode, kStartOfObject)
1003       // Find a recently deserialized code object using its offset from the
1004       // current allocation point and write a pointer to it to the current
1005       // object. Required only for MIPS, PPC or ARM with ool constant pool.
1006       ALL_SPACES(kBackref, kFromCode, kStartOfObject)
1007       ALL_SPACES(kBackrefWithSkip, kFromCode, kStartOfObject)
1008 #endif
1009       // Find a recently deserialized code object using its offset from the
1010       // current allocation point and write a pointer to its first instruction
1011       // to the current code object or the instruction pointer in a function
1012       // object.
1013       ALL_SPACES(kBackref, kFromCode, kInnerPointer)
1014       ALL_SPACES(kBackrefWithSkip, kFromCode, kInnerPointer)
1015       ALL_SPACES(kBackref, kPlain, kInnerPointer)
1016       ALL_SPACES(kBackrefWithSkip, kPlain, kInnerPointer)
1017       // Find an object in the roots array and write a pointer to it to the
1018       // current object.
1019       CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
1020       CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
1021 #if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
1022     defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC)
1023       // Find an object in the roots array and write a pointer to it to in code.
1024       CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0)
1025       CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0)
1026 #endif
1027       // Find an object in the partial snapshots cache and write a pointer to it
1028       // to the current object.
1029       CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
1030       CASE_BODY(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
1031       // Find an code entry in the partial snapshots cache and
1032       // write a pointer to it to the current object.
1033       CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
1034       CASE_BODY(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
1035       // Find an external reference and write a pointer to it to the current
1036       // object.
1037       CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
1038       CASE_BODY(kExternalReference, kPlain, kStartOfObject, 0)
1039       // Find an external reference and write a pointer to it in the current
1040       // code object.
1041       CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
1042       CASE_BODY(kExternalReference, kFromCode, kStartOfObject, 0)
1043       // Find an object in the attached references and write a pointer to it to
1044       // the current object.
1045       CASE_STATEMENT(kAttachedReference, kPlain, kStartOfObject, 0)
1046       CASE_BODY(kAttachedReference, kPlain, kStartOfObject, 0)
1047       CASE_STATEMENT(kAttachedReference, kPlain, kInnerPointer, 0)
1048       CASE_BODY(kAttachedReference, kPlain, kInnerPointer, 0)
1049       CASE_STATEMENT(kAttachedReference, kFromCode, kInnerPointer, 0)
1050       CASE_BODY(kAttachedReference, kFromCode, kInnerPointer, 0)
1051       // Find a builtin and write a pointer to it to the current object.
1052       CASE_STATEMENT(kBuiltin, kPlain, kStartOfObject, 0)
1053       CASE_BODY(kBuiltin, kPlain, kStartOfObject, 0)
1054       CASE_STATEMENT(kBuiltin, kPlain, kInnerPointer, 0)
1055       CASE_BODY(kBuiltin, kPlain, kInnerPointer, 0)
1056       CASE_STATEMENT(kBuiltin, kFromCode, kInnerPointer, 0)
1057       CASE_BODY(kBuiltin, kFromCode, kInnerPointer, 0)
1058
1059 #undef CASE_STATEMENT
1060 #undef CASE_BODY
1061 #undef ALL_SPACES
1062
1063       case kSkip: {
1064         int size = source_.GetInt();
1065         current = reinterpret_cast<Object**>(
1066             reinterpret_cast<intptr_t>(current) + size);
1067         break;
1068       }
1069
1070       case kInternalReferenceEncoded:
1071       case kInternalReference: {
1072         // Internal reference address is not encoded via skip, but by offset
1073         // from code entry.
1074         int pc_offset = source_.GetInt();
1075         int target_offset = source_.GetInt();
1076         Code* code =
1077             Code::cast(HeapObject::FromAddress(current_object_address));
1078         DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size());
1079         DCHECK(0 <= target_offset && target_offset <= code->instruction_size());
1080         Address pc = code->entry() + pc_offset;
1081         Address target = code->entry() + target_offset;
1082         Assembler::deserialization_set_target_internal_reference_at(
1083             pc, target, data == kInternalReference
1084                             ? RelocInfo::INTERNAL_REFERENCE
1085                             : RelocInfo::INTERNAL_REFERENCE_ENCODED);
1086         break;
1087       }
1088
1089       case kNop:
1090         break;
1091
1092       case kNextChunk: {
1093         int space = source_.Get();
1094         DCHECK(space < kNumberOfPreallocatedSpaces);
1095         int chunk_index = current_chunk_[space];
1096         const Heap::Reservation& reservation = reservations_[space];
1097         // Make sure the current chunk is indeed exhausted.
1098         CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
1099         // Move to next reserved chunk.
1100         chunk_index = ++current_chunk_[space];
1101         CHECK_LT(chunk_index, reservation.length());
1102         high_water_[space] = reservation[chunk_index].start;
1103         break;
1104       }
1105
1106       case kSynchronize:
1107         // If we get here then that indicates that you have a mismatch between
1108         // the number of GC roots when serializing and deserializing.
1109         CHECK(false);
1110         break;
1111
1112       case kNativesStringResource: {
1113         DCHECK(!isolate_->heap()->deserialization_complete());
1114         int index = source_.Get();
1115         Vector<const char> source_vector = Natives::GetScriptSource(index);
1116         NativesExternalStringResource* resource =
1117             new NativesExternalStringResource(source_vector.start(),
1118                                               source_vector.length());
1119         Object* resource_obj = reinterpret_cast<Object*>(resource);
1120         UnalignedCopy(current++, &resource_obj);
1121         break;
1122       }
1123
1124       // Deserialize raw data of variable length.
1125       case kVariableRawData: {
1126         int size_in_bytes = source_.GetInt();
1127         byte* raw_data_out = reinterpret_cast<byte*>(current);
1128         source_.CopyRaw(raw_data_out, size_in_bytes);
1129         break;
1130       }
1131
1132       case kVariableRepeat: {
1133         int repeats = source_.GetInt();
1134         Object* object = current[-1];
1135         DCHECK(!isolate->heap()->InNewSpace(object));
1136         for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
1137         break;
1138       }
1139
1140       STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
1141       STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
1142       SIXTEEN_CASES(kRootArrayConstantsWithSkip)
1143       SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
1144         int skip = source_.GetInt();
1145         current = reinterpret_cast<Object**>(
1146             reinterpret_cast<intptr_t>(current) + skip);
1147         // Fall through.
1148       }
1149
1150       SIXTEEN_CASES(kRootArrayConstants)
1151       SIXTEEN_CASES(kRootArrayConstants + 16) {
1152         int root_id = data & kRootArrayConstantsMask;
1153         Object* object = isolate->heap()->roots_array_start()[root_id];
1154         DCHECK(!isolate->heap()->InNewSpace(object));
1155         UnalignedCopy(current++, &object);
1156         break;
1157       }
1158
1159       STATIC_ASSERT(kNumberOfHotObjects == 8);
1160       FOUR_CASES(kHotObjectWithSkip)
1161       FOUR_CASES(kHotObjectWithSkip + 4) {
1162         int skip = source_.GetInt();
1163         current = reinterpret_cast<Object**>(
1164             reinterpret_cast<Address>(current) + skip);
1165         // Fall through.
1166       }
1167
1168       FOUR_CASES(kHotObject)
1169       FOUR_CASES(kHotObject + 4) {
1170         int index = data & kHotObjectMask;
1171         Object* hot_object = hot_objects_.Get(index);
1172         UnalignedCopy(current, &hot_object);
1173         if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
1174           Address current_address = reinterpret_cast<Address>(current);
1175           isolate->heap()->RecordWrite(
1176               current_object_address,
1177               static_cast<int>(current_address - current_object_address));
1178         }
1179         current++;
1180         break;
1181       }
1182
1183       // Deserialize raw data of fixed length from 1 to 32 words.
1184       STATIC_ASSERT(kNumberOfFixedRawData == 32);
1185       SIXTEEN_CASES(kFixedRawData)
1186       SIXTEEN_CASES(kFixedRawData + 16) {
1187         byte* raw_data_out = reinterpret_cast<byte*>(current);
1188         int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
1189         source_.CopyRaw(raw_data_out, size_in_bytes);
1190         current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes);
1191         break;
1192       }
1193
1194       STATIC_ASSERT(kNumberOfFixedRepeat == 16);
1195       SIXTEEN_CASES(kFixedRepeat) {
1196         int repeats = data - kFixedRepeatStart;
1197         Object* object;
1198         UnalignedCopy(&object, current - 1);
1199         DCHECK(!isolate->heap()->InNewSpace(object));
1200         for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
1201         break;
1202       }
1203
1204 #undef SIXTEEN_CASES
1205 #undef FOUR_CASES
1206
1207       default:
1208         CHECK(false);
1209     }
1210   }
1211   CHECK_EQ(limit, current);
1212 }
1213
1214
1215 Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
1216     : isolate_(isolate),
1217       sink_(sink),
1218       external_reference_encoder_(isolate),
1219       root_index_map_(isolate),
1220       code_address_map_(NULL),
1221       large_objects_total_size_(0),
1222       seen_large_objects_index_(0) {
1223   // The serializer is meant to be used only to generate initial heap images
1224   // from a context in which there is only one isolate.
1225   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
1226     pending_chunk_[i] = 0;
1227     max_chunk_size_[i] = static_cast<uint32_t>(
1228         MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
1229   }
1230 }
1231
1232
1233 Serializer::~Serializer() {
1234   if (code_address_map_ != NULL) delete code_address_map_;
1235 }
1236
1237
1238 void StartupSerializer::SerializeStrongReferences() {
1239   Isolate* isolate = this->isolate();
1240   // No active threads.
1241   CHECK_NULL(isolate->thread_manager()->FirstThreadStateInUse());
1242   // No active or weak handles.
1243   CHECK(isolate->handle_scope_implementer()->blocks()->is_empty());
1244   CHECK_EQ(0, isolate->global_handles()->NumberOfWeakHandles());
1245   CHECK_EQ(0, isolate->eternal_handles()->NumberOfHandles());
1246   // We don't support serializing installed extensions.
1247   CHECK(!isolate->has_installed_extensions());
1248   isolate->heap()->IterateSmiRoots(this);
1249   isolate->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
1250 }
1251
1252
1253 void StartupSerializer::VisitPointers(Object** start, Object** end) {
1254   for (Object** current = start; current < end; current++) {
1255     if (start == isolate()->heap()->roots_array_start()) {
1256       root_index_wave_front_ =
1257           Max(root_index_wave_front_, static_cast<intptr_t>(current - start));
1258     }
1259     if (ShouldBeSkipped(current)) {
1260       sink_->Put(kSkip, "Skip");
1261       sink_->PutInt(kPointerSize, "SkipOneWord");
1262     } else if ((*current)->IsSmi()) {
1263       sink_->Put(kOnePointerRawData, "Smi");
1264       for (int i = 0; i < kPointerSize; i++) {
1265         sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
1266       }
1267     } else {
1268       SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
1269     }
1270   }
1271 }
1272
1273
1274 void PartialSerializer::Serialize(Object** o) {
1275   if ((*o)->IsContext()) {
1276     Context* context = Context::cast(*o);
1277     global_object_ = context->global_object();
1278     back_reference_map()->AddGlobalProxy(context->global_proxy());
1279   }
1280   VisitPointer(o);
1281   SerializeOutdatedContextsAsFixedArray();
1282   Pad();
1283 }
1284
1285
1286 void PartialSerializer::SerializeOutdatedContextsAsFixedArray() {
1287   int length = outdated_contexts_.length();
1288   if (length == 0) {
1289     FixedArray* empty = isolate_->heap()->empty_fixed_array();
1290     SerializeObject(empty, kPlain, kStartOfObject, 0);
1291   } else {
1292     // Serialize an imaginary fixed array containing outdated contexts.
1293     int size = FixedArray::SizeFor(length);
1294     Allocate(NEW_SPACE, size);
1295     sink_->Put(kNewObject + NEW_SPACE, "emulated FixedArray");
1296     sink_->PutInt(size >> kObjectAlignmentBits, "FixedArray size in words");
1297     Map* map = isolate_->heap()->fixed_array_map();
1298     SerializeObject(map, kPlain, kStartOfObject, 0);
1299     Smi* length_smi = Smi::FromInt(length);
1300     sink_->Put(kOnePointerRawData, "Smi");
1301     for (int i = 0; i < kPointerSize; i++) {
1302       sink_->Put(reinterpret_cast<byte*>(&length_smi)[i], "Byte");
1303     }
1304     for (int i = 0; i < length; i++) {
1305       BackReference back_ref = outdated_contexts_[i];
1306       DCHECK(BackReferenceIsAlreadyAllocated(back_ref));
1307       sink_->Put(kBackref + back_ref.space(), "BackRef");
1308       sink_->PutInt(back_ref.reference(), "BackRefValue");
1309     }
1310   }
1311 }
1312
1313
1314 bool Serializer::ShouldBeSkipped(Object** current) {
1315   Object** roots = isolate()->heap()->roots_array_start();
1316   return current == &roots[Heap::kStoreBufferTopRootIndex]
1317       || current == &roots[Heap::kStackLimitRootIndex]
1318       || current == &roots[Heap::kRealStackLimitRootIndex];
1319 }
1320
1321
1322 void Serializer::VisitPointers(Object** start, Object** end) {
1323   for (Object** current = start; current < end; current++) {
1324     if ((*current)->IsSmi()) {
1325       sink_->Put(kOnePointerRawData, "Smi");
1326       for (int i = 0; i < kPointerSize; i++) {
1327         sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
1328       }
1329     } else {
1330       SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
1331     }
1332   }
1333 }
1334
1335
1336 void Serializer::EncodeReservations(
1337     List<SerializedData::Reservation>* out) const {
1338   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
1339     for (int j = 0; j < completed_chunks_[i].length(); j++) {
1340       out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
1341     }
1342
1343     if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
1344       out->Add(SerializedData::Reservation(pending_chunk_[i]));
1345     }
1346     out->last().mark_as_last();
1347   }
1348
1349   out->Add(SerializedData::Reservation(large_objects_total_size_));
1350   out->last().mark_as_last();
1351 }
1352
1353
1354 // This ensures that the partial snapshot cache keeps things alive during GC and
1355 // tracks their movement.  When it is called during serialization of the startup
1356 // snapshot nothing happens.  When the partial (context) snapshot is created,
1357 // this array is populated with the pointers that the partial snapshot will
1358 // need. As that happens we emit serialized objects to the startup snapshot
1359 // that correspond to the elements of this cache array.  On deserialization we
1360 // therefore need to visit the cache array.  This fills it up with pointers to
1361 // deserialized objects.
1362 void SerializerDeserializer::Iterate(Isolate* isolate,
1363                                      ObjectVisitor* visitor) {
1364   if (isolate->serializer_enabled()) return;
1365   List<Object*>* cache = isolate->partial_snapshot_cache();
1366   for (int i = 0;; ++i) {
1367     // Extend the array ready to get a value when deserializing.
1368     if (cache->length() <= i) cache->Add(Smi::FromInt(0));
1369     visitor->VisitPointer(&cache->at(i));
1370     // Sentinel is the undefined object, which is a root so it will not normally
1371     // be found in the cache.
1372     if (cache->at(i)->IsUndefined()) break;
1373   }
1374 }
1375
1376
1377 int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
1378   Isolate* isolate = this->isolate();
1379   List<Object*>* cache = isolate->partial_snapshot_cache();
1380   int new_index = cache->length();
1381
1382   int index = partial_cache_index_map_.LookupOrInsert(heap_object, new_index);
1383   if (index == PartialCacheIndexMap::kInvalidIndex) {
1384     // We didn't find the object in the cache.  So we add it to the cache and
1385     // then visit the pointer so that it becomes part of the startup snapshot
1386     // and we can refer to it from the partial snapshot.
1387     cache->Add(heap_object);
1388     startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
1389     // We don't recurse from the startup snapshot generator into the partial
1390     // snapshot generator.
1391     return new_index;
1392   }
1393   return index;
1394 }
1395
1396
1397 #ifdef DEBUG
1398 bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
1399   DCHECK(reference.is_valid());
1400   DCHECK(!reference.is_source());
1401   DCHECK(!reference.is_global_proxy());
1402   AllocationSpace space = reference.space();
1403   int chunk_index = reference.chunk_index();
1404   if (space == LO_SPACE) {
1405     return chunk_index == 0 &&
1406            reference.large_object_index() < seen_large_objects_index_;
1407   } else if (chunk_index == completed_chunks_[space].length()) {
1408     return reference.chunk_offset() < pending_chunk_[space];
1409   } else {
1410     return chunk_index < completed_chunks_[space].length() &&
1411            reference.chunk_offset() < completed_chunks_[space][chunk_index];
1412   }
1413 }
1414 #endif  // DEBUG
1415
1416
1417 bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
1418                                       WhereToPoint where_to_point, int skip) {
1419   if (how_to_code == kPlain && where_to_point == kStartOfObject) {
1420     // Encode a reference to a hot object by its index in the working set.
1421     int index = hot_objects_.Find(obj);
1422     if (index != HotObjectsList::kNotFound) {
1423       DCHECK(index >= 0 && index < kNumberOfHotObjects);
1424       if (FLAG_trace_serializer) {
1425         PrintF(" Encoding hot object %d:", index);
1426         obj->ShortPrint();
1427         PrintF("\n");
1428       }
1429       if (skip != 0) {
1430         sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
1431         sink_->PutInt(skip, "HotObjectSkipDistance");
1432       } else {
1433         sink_->Put(kHotObject + index, "HotObject");
1434       }
1435       return true;
1436     }
1437   }
1438   BackReference back_reference = back_reference_map_.Lookup(obj);
1439   if (back_reference.is_valid()) {
1440     // Encode the location of an already deserialized object in order to write
1441     // its location into a later object.  We can encode the location as an
1442     // offset fromthe start of the deserialized objects or as an offset
1443     // backwards from thecurrent allocation pointer.
1444     if (back_reference.is_source()) {
1445       FlushSkip(skip);
1446       if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
1447       DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
1448       sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Source");
1449       sink_->PutInt(kSourceObjectReference, "kSourceObjectReference");
1450     } else if (back_reference.is_global_proxy()) {
1451       FlushSkip(skip);
1452       if (FLAG_trace_serializer) PrintF(" Encoding global proxy\n");
1453       DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
1454       sink_->Put(kAttachedReference + kPlain + kStartOfObject, "Global Proxy");
1455       sink_->PutInt(kGlobalProxyReference, "kGlobalProxyReference");
1456     } else {
1457       if (FLAG_trace_serializer) {
1458         PrintF(" Encoding back reference to: ");
1459         obj->ShortPrint();
1460         PrintF("\n");
1461       }
1462
1463       AllocationSpace space = back_reference.space();
1464       if (skip == 0) {
1465         sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
1466       } else {
1467         sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
1468                    "BackRefWithSkip");
1469         sink_->PutInt(skip, "BackRefSkipDistance");
1470       }
1471       DCHECK(BackReferenceIsAlreadyAllocated(back_reference));
1472       sink_->PutInt(back_reference.reference(), "BackRefValue");
1473
1474       hot_objects_.Add(obj);
1475     }
1476     return true;
1477   }
1478   return false;
1479 }
1480
1481
1482 void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
1483                                         WhereToPoint where_to_point, int skip) {
1484   DCHECK(!obj->IsJSFunction());
1485
1486   int root_index = root_index_map_.Lookup(obj);
1487   // We can only encode roots as such if it has already been serialized.
1488   // That applies to root indices below the wave front.
1489   if (root_index != RootIndexMap::kInvalidRootIndex &&
1490       root_index < root_index_wave_front_) {
1491     PutRoot(root_index, obj, how_to_code, where_to_point, skip);
1492     return;
1493   }
1494
1495   if (obj->IsCode() && Code::cast(obj)->kind() == Code::FUNCTION) {
1496     obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
1497   }
1498
1499   if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
1500
1501   FlushSkip(skip);
1502
1503   // Object has not yet been serialized.  Serialize it here.
1504   ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
1505                                      where_to_point);
1506   object_serializer.Serialize();
1507 }
1508
1509
1510 void StartupSerializer::SerializeWeakReferences() {
1511   // This phase comes right after the serialization (of the snapshot).
1512   // After we have done the partial serialization the partial snapshot cache
1513   // will contain some references needed to decode the partial snapshot.  We
1514   // add one entry with 'undefined' which is the sentinel that the deserializer
1515   // uses to know it is done deserializing the array.
1516   Object* undefined = isolate()->heap()->undefined_value();
1517   VisitPointer(&undefined);
1518   isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
1519   Pad();
1520 }
1521
1522
1523 void Serializer::PutRoot(int root_index,
1524                          HeapObject* object,
1525                          SerializerDeserializer::HowToCode how_to_code,
1526                          SerializerDeserializer::WhereToPoint where_to_point,
1527                          int skip) {
1528   if (FLAG_trace_serializer) {
1529     PrintF(" Encoding root %d:", root_index);
1530     object->ShortPrint();
1531     PrintF("\n");
1532   }
1533
1534   if (how_to_code == kPlain && where_to_point == kStartOfObject &&
1535       root_index < kNumberOfRootArrayConstants &&
1536       !isolate()->heap()->InNewSpace(object)) {
1537     if (skip == 0) {
1538       sink_->Put(kRootArrayConstants + root_index, "RootConstant");
1539     } else {
1540       sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
1541       sink_->PutInt(skip, "SkipInPutRoot");
1542     }
1543   } else {
1544     FlushSkip(skip);
1545     sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
1546     sink_->PutInt(root_index, "root_index");
1547   }
1548 }
1549
1550
1551 void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
1552                                         WhereToPoint where_to_point, int skip) {
1553   if (obj->IsMap()) {
1554     // The code-caches link to context-specific code objects, which
1555     // the startup and context serializes cannot currently handle.
1556     DCHECK(Map::cast(obj)->code_cache() == obj->GetHeap()->empty_fixed_array());
1557   }
1558
1559   // Replace typed arrays by undefined.
1560   if (obj->IsJSTypedArray()) obj = isolate_->heap()->undefined_value();
1561
1562   int root_index = root_index_map_.Lookup(obj);
1563   if (root_index != RootIndexMap::kInvalidRootIndex) {
1564     PutRoot(root_index, obj, how_to_code, where_to_point, skip);
1565     return;
1566   }
1567
1568   if (ShouldBeInThePartialSnapshotCache(obj)) {
1569     FlushSkip(skip);
1570
1571     int cache_index = PartialSnapshotCacheIndex(obj);
1572     sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
1573                "PartialSnapshotCache");
1574     sink_->PutInt(cache_index, "partial_snapshot_cache_index");
1575     return;
1576   }
1577
1578   // Pointers from the partial snapshot to the objects in the startup snapshot
1579   // should go through the root array or through the partial snapshot cache.
1580   // If this is not the case you may have to add something to the root array.
1581   DCHECK(!startup_serializer_->back_reference_map()->Lookup(obj).is_valid());
1582   // All the internalized strings that the partial snapshot needs should be
1583   // either in the root table or in the partial snapshot cache.
1584   DCHECK(!obj->IsInternalizedString());
1585
1586   if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
1587
1588   FlushSkip(skip);
1589
1590   // Object has not yet been serialized.  Serialize it here.
1591   ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
1592   serializer.Serialize();
1593
1594   if (obj->IsContext() &&
1595       Context::cast(obj)->global_object() == global_object_) {
1596     // Context refers to the current global object. This reference will
1597     // become outdated after deserialization.
1598     BackReference back_reference = back_reference_map_.Lookup(obj);
1599     DCHECK(back_reference.is_valid());
1600     outdated_contexts_.Add(back_reference);
1601   }
1602 }
1603
1604
1605 void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
1606                                                      int size, Map* map) {
1607   if (serializer_->code_address_map_) {
1608     const char* code_name =
1609         serializer_->code_address_map_->Lookup(object_->address());
1610     LOG(serializer_->isolate_,
1611         CodeNameEvent(object_->address(), sink_->Position(), code_name));
1612     LOG(serializer_->isolate_,
1613         SnapshotPositionEvent(object_->address(), sink_->Position()));
1614   }
1615
1616   BackReference back_reference;
1617   if (space == LO_SPACE) {
1618     sink_->Put(kNewObject + reference_representation_ + space,
1619                "NewLargeObject");
1620     sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
1621     if (object_->IsCode()) {
1622       sink_->Put(EXECUTABLE, "executable large object");
1623     } else {
1624       sink_->Put(NOT_EXECUTABLE, "not executable large object");
1625     }
1626     back_reference = serializer_->AllocateLargeObject(size);
1627   } else {
1628     bool needs_double_align = false;
1629     if (object_->NeedsToEnsureDoubleAlignment()) {
1630       // Add wriggle room for double alignment padding.
1631       back_reference = serializer_->Allocate(space, size + kPointerSize);
1632       needs_double_align = true;
1633     } else {
1634       back_reference = serializer_->Allocate(space, size);
1635     }
1636     sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
1637     if (needs_double_align)
1638       sink_->PutInt(kDoubleAlignmentSentinel, "DoubleAlignSentinel");
1639     int encoded_size = size >> kObjectAlignmentBits;
1640     DCHECK_NE(kDoubleAlignmentSentinel, encoded_size);
1641     sink_->PutInt(encoded_size, "ObjectSizeInWords");
1642   }
1643
1644   // Mark this object as already serialized.
1645   serializer_->back_reference_map()->Add(object_, back_reference);
1646
1647   // Serialize the map (first word of the object).
1648   serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
1649 }
1650
1651
1652 void Serializer::ObjectSerializer::SerializeExternalString() {
1653   // Instead of serializing this as an external string, we serialize
1654   // an imaginary sequential string with the same content.
1655   Isolate* isolate = serializer_->isolate();
1656   DCHECK(object_->IsExternalString());
1657   DCHECK(object_->map() != isolate->heap()->native_source_string_map());
1658   ExternalString* string = ExternalString::cast(object_);
1659   int length = string->length();
1660   Map* map;
1661   int content_size;
1662   int allocation_size;
1663   const byte* resource;
1664   // Find the map and size for the imaginary sequential string.
1665   bool internalized = object_->IsInternalizedString();
1666   if (object_->IsExternalOneByteString()) {
1667     map = internalized ? isolate->heap()->one_byte_internalized_string_map()
1668                        : isolate->heap()->one_byte_string_map();
1669     allocation_size = SeqOneByteString::SizeFor(length);
1670     content_size = length * kCharSize;
1671     resource = reinterpret_cast<const byte*>(
1672         ExternalOneByteString::cast(string)->resource()->data());
1673   } else {
1674     map = internalized ? isolate->heap()->internalized_string_map()
1675                        : isolate->heap()->string_map();
1676     allocation_size = SeqTwoByteString::SizeFor(length);
1677     content_size = length * kShortSize;
1678     resource = reinterpret_cast<const byte*>(
1679         ExternalTwoByteString::cast(string)->resource()->data());
1680   }
1681
1682   AllocationSpace space = (allocation_size > Page::kMaxRegularHeapObjectSize)
1683                               ? LO_SPACE
1684                               : OLD_DATA_SPACE;
1685   SerializePrologue(space, allocation_size, map);
1686
1687   // Output the rest of the imaginary string.
1688   int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
1689
1690   // Output raw data header. Do not bother with common raw length cases here.
1691   sink_->Put(kVariableRawData, "RawDataForString");
1692   sink_->PutInt(bytes_to_output, "length");
1693
1694   // Serialize string header (except for map).
1695   Address string_start = string->address();
1696   for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
1697     sink_->PutSection(string_start[i], "StringHeader");
1698   }
1699
1700   // Serialize string content.
1701   sink_->PutRaw(resource, content_size, "StringContent");
1702
1703   // Since the allocation size is rounded up to object alignment, there
1704   // maybe left-over bytes that need to be padded.
1705   int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
1706   DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
1707   for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
1708
1709   sink_->Put(kSkip, "SkipAfterString");
1710   sink_->PutInt(bytes_to_output, "SkipDistance");
1711 }
1712
1713
1714 void Serializer::ObjectSerializer::Serialize() {
1715   if (FLAG_trace_serializer) {
1716     PrintF(" Encoding heap object: ");
1717     object_->ShortPrint();
1718     PrintF("\n");
1719   }
1720
1721   // We cannot serialize typed array objects correctly.
1722   DCHECK(!object_->IsJSTypedArray());
1723
1724   if (object_->IsScript()) {
1725     // Clear cached line ends.
1726     Object* undefined = serializer_->isolate()->heap()->undefined_value();
1727     Script::cast(object_)->set_line_ends(undefined);
1728   }
1729
1730   if (object_->IsExternalString()) {
1731     Heap* heap = serializer_->isolate()->heap();
1732     if (object_->map() != heap->native_source_string_map()) {
1733       // Usually we cannot recreate resources for external strings. To work
1734       // around this, external strings are serialized to look like ordinary
1735       // sequential strings.
1736       // The exception are native source code strings, since we can recreate
1737       // their resources. In that case we fall through and leave it to
1738       // VisitExternalOneByteString further down.
1739       SerializeExternalString();
1740       return;
1741     }
1742   }
1743
1744   int size = object_->Size();
1745   Map* map = object_->map();
1746   AllocationSpace space =
1747       MemoryChunk::FromAddress(object_->address())->owner()->identity();
1748   SerializePrologue(space, size, map);
1749
1750   // Serialize the rest of the object.
1751   CHECK_EQ(0, bytes_processed_so_far_);
1752   bytes_processed_so_far_ = kPointerSize;
1753
1754   object_->IterateBody(map->instance_type(), size, this);
1755   OutputRawData(object_->address() + size);
1756 }
1757
1758
1759 void Serializer::ObjectSerializer::VisitPointers(Object** start,
1760                                                  Object** end) {
1761   Object** current = start;
1762   while (current < end) {
1763     while (current < end && (*current)->IsSmi()) current++;
1764     if (current < end) OutputRawData(reinterpret_cast<Address>(current));
1765
1766     while (current < end && !(*current)->IsSmi()) {
1767       HeapObject* current_contents = HeapObject::cast(*current);
1768       int root_index = serializer_->root_index_map()->Lookup(current_contents);
1769       // Repeats are not subject to the write barrier so we can only use
1770       // immortal immovable root members. They are never in new space.
1771       if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
1772           Heap::RootIsImmortalImmovable(root_index) &&
1773           current_contents == current[-1]) {
1774         DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
1775         int repeat_count = 1;
1776         while (&current[repeat_count] < end - 1 &&
1777                current[repeat_count] == current_contents) {
1778           repeat_count++;
1779         }
1780         current += repeat_count;
1781         bytes_processed_so_far_ += repeat_count * kPointerSize;
1782         if (repeat_count > kNumberOfFixedRepeat) {
1783           sink_->Put(kVariableRepeat, "VariableRepeat");
1784           sink_->PutInt(repeat_count, "repeat count");
1785         } else {
1786           sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
1787         }
1788       } else {
1789         serializer_->SerializeObject(
1790                 current_contents, kPlain, kStartOfObject, 0);
1791         bytes_processed_so_far_ += kPointerSize;
1792         current++;
1793       }
1794     }
1795   }
1796 }
1797
1798
1799 void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
1800   // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
1801   if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
1802
1803   int skip = OutputRawData(rinfo->target_address_address(),
1804                            kCanReturnSkipInsteadOfSkipping);
1805   HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1806   Object* object = rinfo->target_object();
1807   serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
1808                                kStartOfObject, skip);
1809   bytes_processed_so_far_ += rinfo->target_address_size();
1810 }
1811
1812
1813 void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
1814   int skip = OutputRawData(reinterpret_cast<Address>(p),
1815                            kCanReturnSkipInsteadOfSkipping);
1816   sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
1817   sink_->PutInt(skip, "SkipB4ExternalRef");
1818   Address target = *p;
1819   sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1820   bytes_processed_so_far_ += kPointerSize;
1821 }
1822
1823
1824 void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
1825   int skip = OutputRawData(rinfo->target_address_address(),
1826                            kCanReturnSkipInsteadOfSkipping);
1827   HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1828   sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
1829   sink_->PutInt(skip, "SkipB4ExternalRef");
1830   Address target = rinfo->target_external_reference();
1831   sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1832   bytes_processed_so_far_ += rinfo->target_address_size();
1833 }
1834
1835
1836 void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
1837   // We can only reference to internal references of code that has been output.
1838   DCHECK(is_code_object_ && code_has_been_output_);
1839   // We do not use skip from last patched pc to find the pc to patch, since
1840   // target_address_address may not return addresses in ascending order when
1841   // used for internal references. External references may be stored at the
1842   // end of the code in the constant pool, whereas internal references are
1843   // inline. That would cause the skip to be negative. Instead, we store the
1844   // offset from code entry.
1845   Address entry = Code::cast(object_)->entry();
1846   intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
1847   intptr_t target_offset = rinfo->target_internal_reference() - entry;
1848   DCHECK(0 <= pc_offset &&
1849          pc_offset <= Code::cast(object_)->instruction_size());
1850   DCHECK(0 <= target_offset &&
1851          target_offset <= Code::cast(object_)->instruction_size());
1852   sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
1853                  ? kInternalReference
1854                  : kInternalReferenceEncoded,
1855              "InternalRef");
1856   sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
1857   sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
1858 }
1859
1860
1861 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
1862   int skip = OutputRawData(rinfo->target_address_address(),
1863                            kCanReturnSkipInsteadOfSkipping);
1864   HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
1865   sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
1866   sink_->PutInt(skip, "SkipB4ExternalRef");
1867   Address target = rinfo->target_address();
1868   sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
1869   bytes_processed_so_far_ += rinfo->target_address_size();
1870 }
1871
1872
1873 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
1874   // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
1875   if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
1876
1877   int skip = OutputRawData(rinfo->target_address_address(),
1878                            kCanReturnSkipInsteadOfSkipping);
1879   Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
1880   serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
1881   bytes_processed_so_far_ += rinfo->target_address_size();
1882 }
1883
1884
1885 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
1886   int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
1887   Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
1888   serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
1889   bytes_processed_so_far_ += kPointerSize;
1890 }
1891
1892
1893 void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
1894   // Out-of-line constant pool entries will be visited by the ConstantPoolArray.
1895   if (FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool()) return;
1896
1897   int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
1898   Cell* object = Cell::cast(rinfo->target_cell());
1899   serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
1900   bytes_processed_so_far_ += kPointerSize;
1901 }
1902
1903
1904 void Serializer::ObjectSerializer::VisitExternalOneByteString(
1905     v8::String::ExternalOneByteStringResource** resource_pointer) {
1906   Address references_start = reinterpret_cast<Address>(resource_pointer);
1907   OutputRawData(references_start);
1908   for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
1909     Object* source =
1910         serializer_->isolate()->heap()->natives_source_cache()->get(i);
1911     if (!source->IsUndefined()) {
1912       ExternalOneByteString* string = ExternalOneByteString::cast(source);
1913       typedef v8::String::ExternalOneByteStringResource Resource;
1914       const Resource* resource = string->resource();
1915       if (resource == *resource_pointer) {
1916         sink_->Put(kNativesStringResource, "NativesStringResource");
1917         sink_->PutSection(i, "NativesStringResourceEnd");
1918         bytes_processed_so_far_ += sizeof(resource);
1919         return;
1920       }
1921     }
1922   }
1923   // One of the strings in the natives cache should match the resource.  We
1924   // don't expect any other kinds of external strings here.
1925   UNREACHABLE();
1926 }
1927
1928
1929 Address Serializer::ObjectSerializer::PrepareCode() {
1930   // To make snapshots reproducible, we make a copy of the code object
1931   // and wipe all pointers in the copy, which we then serialize.
1932   Code* original = Code::cast(object_);
1933   Code* code = serializer_->CopyCode(original);
1934   // Code age headers are not serializable.
1935   code->MakeYoung(serializer_->isolate());
1936   int mode_mask = RelocInfo::kCodeTargetMask |
1937                   RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
1938                   RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
1939                   RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
1940                   RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
1941                   RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1942   for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
1943     RelocInfo* rinfo = it.rinfo();
1944     if (!(FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool())) {
1945       rinfo->WipeOut();
1946     }
1947   }
1948   // We need to wipe out the header fields *after* wiping out the
1949   // relocations, because some of these fields are needed for the latter.
1950   code->WipeOutHeader();
1951   return code->address();
1952 }
1953
1954
1955 int Serializer::ObjectSerializer::OutputRawData(
1956     Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
1957   Address object_start = object_->address();
1958   int base = bytes_processed_so_far_;
1959   int up_to_offset = static_cast<int>(up_to - object_start);
1960   int to_skip = up_to_offset - bytes_processed_so_far_;
1961   int bytes_to_output = to_skip;
1962   bytes_processed_so_far_ += to_skip;
1963   // This assert will fail if the reloc info gives us the target_address_address
1964   // locations in a non-ascending order.  Luckily that doesn't happen.
1965   DCHECK(to_skip >= 0);
1966   bool outputting_code = false;
1967   if (to_skip != 0 && is_code_object_ && !code_has_been_output_) {
1968     // Output the code all at once and fix later.
1969     bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
1970     outputting_code = true;
1971     code_has_been_output_ = true;
1972   }
1973   if (bytes_to_output != 0 && (!is_code_object_ || outputting_code)) {
1974     if (!outputting_code && bytes_to_output == to_skip &&
1975         IsAligned(bytes_to_output, kPointerAlignment) &&
1976         bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
1977       int size_in_words = bytes_to_output >> kPointerSizeLog2;
1978       sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
1979       to_skip = 0;  // This instruction includes skip.
1980     } else {
1981       // We always end up here if we are outputting the code of a code object.
1982       sink_->Put(kVariableRawData, "VariableRawData");
1983       sink_->PutInt(bytes_to_output, "length");
1984     }
1985
1986     if (is_code_object_) object_start = PrepareCode();
1987
1988     const char* description = is_code_object_ ? "Code" : "Byte";
1989 #ifdef MEMORY_SANITIZER
1990     // Object sizes are usually rounded up with uninitialized padding space.
1991     MSAN_MEMORY_IS_INITIALIZED(object_start + base, bytes_to_output);
1992 #endif  // MEMORY_SANITIZER
1993     sink_->PutRaw(object_start + base, bytes_to_output, description);
1994   }
1995   if (to_skip != 0 && return_skip == kIgnoringReturn) {
1996     sink_->Put(kSkip, "Skip");
1997     sink_->PutInt(to_skip, "SkipDistance");
1998     to_skip = 0;
1999   }
2000   return to_skip;
2001 }
2002
2003
2004 BackReference Serializer::AllocateLargeObject(int size) {
2005   // Large objects are allocated one-by-one when deserializing. We do not
2006   // have to keep track of multiple chunks.
2007   large_objects_total_size_ += size;
2008   return BackReference::LargeObjectReference(seen_large_objects_index_++);
2009 }
2010
2011
2012 BackReference Serializer::Allocate(AllocationSpace space, int size) {
2013   DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
2014   DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
2015   uint32_t new_chunk_size = pending_chunk_[space] + size;
2016   if (new_chunk_size > max_chunk_size(space)) {
2017     // The new chunk size would not fit onto a single page. Complete the
2018     // current chunk and start a new one.
2019     sink_->Put(kNextChunk, "NextChunk");
2020     sink_->Put(space, "NextChunkSpace");
2021     completed_chunks_[space].Add(pending_chunk_[space]);
2022     DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
2023     pending_chunk_[space] = 0;
2024     new_chunk_size = size;
2025   }
2026   uint32_t offset = pending_chunk_[space];
2027   pending_chunk_[space] = new_chunk_size;
2028   return BackReference::Reference(space, completed_chunks_[space].length(),
2029                                   offset);
2030 }
2031
2032
2033 void Serializer::Pad() {
2034   // The non-branching GetInt will read up to 3 bytes too far, so we need
2035   // to pad the snapshot to make sure we don't read over the end.
2036   for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
2037     sink_->Put(kNop, "Padding");
2038   }
2039   // Pad up to pointer size for checksum.
2040   while (!IsAligned(sink_->Position(), kPointerAlignment)) {
2041     sink_->Put(kNop, "Padding");
2042   }
2043 }
2044
2045
2046 void Serializer::InitializeCodeAddressMap() {
2047   isolate_->InitializeLoggingAndCounters();
2048   code_address_map_ = new CodeAddressMap(isolate_);
2049 }
2050
2051
2052 Code* Serializer::CopyCode(Code* code) {
2053   code_buffer_.Rewind(0);  // Clear buffer without deleting backing store.
2054   int size = code->CodeSize();
2055   code_buffer_.AddAll(Vector<byte>(code->address(), size));
2056   return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
2057 }
2058
2059
2060 ScriptData* CodeSerializer::Serialize(Isolate* isolate,
2061                                       Handle<SharedFunctionInfo> info,
2062                                       Handle<String> source) {
2063   base::ElapsedTimer timer;
2064   if (FLAG_profile_deserialization) timer.Start();
2065   if (FLAG_trace_serializer) {
2066     PrintF("[Serializing from");
2067     Object* script = info->script();
2068     if (script->IsScript()) Script::cast(script)->name()->ShortPrint();
2069     PrintF("]\n");
2070   }
2071
2072   // Serialize code object.
2073   SnapshotByteSink sink(info->code()->CodeSize() * 2);
2074   CodeSerializer cs(isolate, &sink, *source, info->code());
2075   DisallowHeapAllocation no_gc;
2076   Object** location = Handle<Object>::cast(info).location();
2077   cs.VisitPointer(location);
2078   cs.Pad();
2079
2080   SerializedCodeData data(sink.data(), cs);
2081   ScriptData* script_data = data.GetScriptData();
2082
2083   if (FLAG_profile_deserialization) {
2084     double ms = timer.Elapsed().InMillisecondsF();
2085     int length = script_data->length();
2086     PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
2087   }
2088
2089   return script_data;
2090 }
2091
2092
2093 void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
2094                                      WhereToPoint where_to_point, int skip) {
2095   int root_index = root_index_map_.Lookup(obj);
2096   if (root_index != RootIndexMap::kInvalidRootIndex) {
2097     PutRoot(root_index, obj, how_to_code, where_to_point, skip);
2098     return;
2099   }
2100
2101   if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
2102
2103   FlushSkip(skip);
2104
2105   if (obj->IsCode()) {
2106     Code* code_object = Code::cast(obj);
2107     switch (code_object->kind()) {
2108       case Code::OPTIMIZED_FUNCTION:  // No optimized code compiled yet.
2109       case Code::HANDLER:             // No handlers patched in yet.
2110       case Code::REGEXP:              // No regexp literals initialized yet.
2111       case Code::NUMBER_OF_KINDS:     // Pseudo enum value.
2112         CHECK(false);
2113       case Code::BUILTIN:
2114         SerializeBuiltin(code_object->builtin_index(), how_to_code,
2115                          where_to_point);
2116         return;
2117       case Code::STUB:
2118         SerializeCodeStub(code_object->stub_key(), how_to_code, where_to_point);
2119         return;
2120 #define IC_KIND_CASE(KIND) case Code::KIND:
2121         IC_KIND_LIST(IC_KIND_CASE)
2122 #undef IC_KIND_CASE
2123         SerializeIC(code_object, how_to_code, where_to_point);
2124         return;
2125       case Code::FUNCTION:
2126         DCHECK(code_object->has_reloc_info_for_serialization());
2127         // Only serialize the code for the toplevel function unless specified
2128         // by flag. Replace code of inner functions by the lazy compile builtin.
2129         // This is safe, as checked in Compiler::BuildFunctionInfo.
2130         if (code_object != main_code_ && !FLAG_serialize_inner) {
2131           SerializeBuiltin(Builtins::kCompileLazy, how_to_code, where_to_point);
2132         } else {
2133           SerializeGeneric(code_object, how_to_code, where_to_point);
2134         }
2135         return;
2136     }
2137     UNREACHABLE();
2138   }
2139
2140   // Past this point we should not see any (context-specific) maps anymore.
2141   CHECK(!obj->IsMap());
2142   // There should be no references to the global object embedded.
2143   CHECK(!obj->IsJSGlobalProxy() && !obj->IsGlobalObject());
2144   // There should be no hash table embedded. They would require rehashing.
2145   CHECK(!obj->IsHashTable());
2146   // We expect no instantiated function objects or contexts.
2147   CHECK(!obj->IsJSFunction() && !obj->IsContext());
2148
2149   SerializeGeneric(obj, how_to_code, where_to_point);
2150 }
2151
2152
2153 void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
2154                                       HowToCode how_to_code,
2155                                       WhereToPoint where_to_point) {
2156   if (heap_object->IsInternalizedString()) num_internalized_strings_++;
2157
2158   // Object has not yet been serialized.  Serialize it here.
2159   ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
2160                               where_to_point);
2161   serializer.Serialize();
2162 }
2163
2164
2165 void CodeSerializer::SerializeBuiltin(int builtin_index, HowToCode how_to_code,
2166                                       WhereToPoint where_to_point) {
2167   DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
2168          (how_to_code == kPlain && where_to_point == kInnerPointer) ||
2169          (how_to_code == kFromCode && where_to_point == kInnerPointer));
2170   DCHECK_LT(builtin_index, Builtins::builtin_count);
2171   DCHECK_LE(0, builtin_index);
2172
2173   if (FLAG_trace_serializer) {
2174     PrintF(" Encoding builtin: %s\n",
2175            isolate()->builtins()->name(builtin_index));
2176   }
2177
2178   sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
2179   sink_->PutInt(builtin_index, "builtin_index");
2180 }
2181
2182
2183 void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
2184                                        WhereToPoint where_to_point) {
2185   DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
2186          (how_to_code == kPlain && where_to_point == kInnerPointer) ||
2187          (how_to_code == kFromCode && where_to_point == kInnerPointer));
2188   DCHECK(CodeStub::MajorKeyFromKey(stub_key) != CodeStub::NoCache);
2189   DCHECK(!CodeStub::GetCode(isolate(), stub_key).is_null());
2190
2191   int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
2192
2193   if (FLAG_trace_serializer) {
2194     PrintF(" Encoding code stub %s as %d\n",
2195            CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key), false),
2196            index);
2197   }
2198
2199   sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
2200   sink_->PutInt(index, "CodeStub key");
2201 }
2202
2203
2204 void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
2205                                  WhereToPoint where_to_point) {
2206   // The IC may be implemented as a stub.
2207   uint32_t stub_key = ic->stub_key();
2208   if (stub_key != CodeStub::NoCacheKey()) {
2209     if (FLAG_trace_serializer) {
2210       PrintF(" %s is a code stub\n", Code::Kind2String(ic->kind()));
2211     }
2212     SerializeCodeStub(stub_key, how_to_code, where_to_point);
2213     return;
2214   }
2215   // The IC may be implemented as builtin. Only real builtins have an
2216   // actual builtin_index value attached (otherwise it's just garbage).
2217   // Compare to make sure we are really dealing with a builtin.
2218   int builtin_index = ic->builtin_index();
2219   if (builtin_index < Builtins::builtin_count) {
2220     Builtins::Name name = static_cast<Builtins::Name>(builtin_index);
2221     Code* builtin = isolate()->builtins()->builtin(name);
2222     if (builtin == ic) {
2223       if (FLAG_trace_serializer) {
2224         PrintF(" %s is a builtin\n", Code::Kind2String(ic->kind()));
2225       }
2226       DCHECK(ic->kind() == Code::KEYED_LOAD_IC ||
2227              ic->kind() == Code::KEYED_STORE_IC);
2228       SerializeBuiltin(builtin_index, how_to_code, where_to_point);
2229       return;
2230     }
2231   }
2232   // The IC may also just be a piece of code kept in the non_monomorphic_cache.
2233   // In that case, just serialize as a normal code object.
2234   if (FLAG_trace_serializer) {
2235     PrintF(" %s has no special handling\n", Code::Kind2String(ic->kind()));
2236   }
2237   DCHECK(ic->kind() == Code::LOAD_IC || ic->kind() == Code::STORE_IC);
2238   SerializeGeneric(ic, how_to_code, where_to_point);
2239 }
2240
2241
2242 int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
2243   // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
2244   int index = 0;
2245   while (index < stub_keys_.length()) {
2246     if (stub_keys_[index] == stub_key) return index;
2247     index++;
2248   }
2249   stub_keys_.Add(stub_key);
2250   return index;
2251 }
2252
2253
2254 MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
2255     Isolate* isolate, ScriptData* cached_data, Handle<String> source) {
2256   base::ElapsedTimer timer;
2257   if (FLAG_profile_deserialization) timer.Start();
2258
2259   HandleScope scope(isolate);
2260
2261   SmartPointer<SerializedCodeData> scd(
2262       SerializedCodeData::FromCachedData(isolate, cached_data, *source));
2263   if (scd.is_empty()) {
2264     if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
2265     DCHECK(cached_data->rejected());
2266     return MaybeHandle<SharedFunctionInfo>();
2267   }
2268
2269   // Eagerly expand string table to avoid allocations during deserialization.
2270   StringTable::EnsureCapacityForDeserialization(isolate,
2271                                                 scd->NumInternalizedStrings());
2272
2273   // Prepare and register list of attached objects.
2274   Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
2275   Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
2276       code_stub_keys.length() + kCodeStubsBaseIndex);
2277   attached_objects[kSourceObjectIndex] = source;
2278   for (int i = 0; i < code_stub_keys.length(); i++) {
2279     attached_objects[i + kCodeStubsBaseIndex] =
2280         CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
2281   }
2282
2283   Deserializer deserializer(scd.get());
2284   deserializer.SetAttachedObjects(attached_objects);
2285
2286   // Deserialize.
2287   Handle<SharedFunctionInfo> result;
2288   if (!deserializer.DeserializeCode(isolate).ToHandle(&result)) {
2289     // Deserializing may fail if the reservations cannot be fulfilled.
2290     if (FLAG_profile_deserialization) PrintF("[Deserializing failed]\n");
2291     return MaybeHandle<SharedFunctionInfo>();
2292   }
2293   deserializer.FlushICacheForNewCodeObjects();
2294
2295   if (FLAG_profile_deserialization) {
2296     double ms = timer.Elapsed().InMillisecondsF();
2297     int length = cached_data->length();
2298     PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
2299   }
2300   result->set_deserialized(true);
2301
2302   if (isolate->logger()->is_logging_code_events() ||
2303       isolate->cpu_profiler()->is_profiling()) {
2304     String* name = isolate->heap()->empty_string();
2305     if (result->script()->IsScript()) {
2306       Script* script = Script::cast(result->script());
2307       if (script->name()->IsString()) name = String::cast(script->name());
2308     }
2309     isolate->logger()->CodeCreateEvent(Logger::SCRIPT_TAG, result->code(),
2310                                        *result, NULL, name);
2311   }
2312   return scope.CloseAndEscape(result);
2313 }
2314
2315
2316 void SerializedData::AllocateData(int size) {
2317   DCHECK(!owns_data_);
2318   data_ = NewArray<byte>(size);
2319   size_ = size;
2320   owns_data_ = true;
2321   DCHECK(IsAligned(reinterpret_cast<intptr_t>(data_), kPointerAlignment));
2322 }
2323
2324
2325 SnapshotData::SnapshotData(const Serializer& ser) {
2326   DisallowHeapAllocation no_gc;
2327   List<Reservation> reservations;
2328   ser.EncodeReservations(&reservations);
2329   const List<byte>& payload = ser.sink()->data();
2330
2331   // Calculate sizes.
2332   int reservation_size = reservations.length() * kInt32Size;
2333   int size = kHeaderSize + reservation_size + payload.length();
2334
2335   // Allocate backing store and create result data.
2336   AllocateData(size);
2337
2338   // Set header values.
2339   SetMagicNumber(ser.isolate());
2340   SetHeaderValue(kCheckSumOffset, Version::Hash());
2341   SetHeaderValue(kNumReservationsOffset, reservations.length());
2342   SetHeaderValue(kPayloadLengthOffset, payload.length());
2343
2344   // Copy reservation chunk sizes.
2345   CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
2346             reservation_size);
2347
2348   // Copy serialized data.
2349   CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(),
2350             static_cast<size_t>(payload.length()));
2351 }
2352
2353
2354 bool SnapshotData::IsSane() {
2355   return GetHeaderValue(kCheckSumOffset) == Version::Hash();
2356 }
2357
2358
2359 Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
2360   return Vector<const Reservation>(
2361       reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
2362       GetHeaderValue(kNumReservationsOffset));
2363 }
2364
2365
2366 Vector<const byte> SnapshotData::Payload() const {
2367   int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
2368   const byte* payload = data_ + kHeaderSize + reservations_size;
2369   int length = GetHeaderValue(kPayloadLengthOffset);
2370   DCHECK_EQ(data_ + size_, payload + length);
2371   return Vector<const byte>(payload, length);
2372 }
2373
2374
2375 class Checksum {
2376  public:
2377   explicit Checksum(Vector<const byte> payload) {
2378     // Fletcher's checksum. Modified to reduce 64-bit sums to 32-bit.
2379     uintptr_t a = 1;
2380     uintptr_t b = 0;
2381     const uintptr_t* cur = reinterpret_cast<const uintptr_t*>(payload.start());
2382     DCHECK(IsAligned(payload.length(), kIntptrSize));
2383     const uintptr_t* end = cur + payload.length() / kIntptrSize;
2384     while (cur < end) {
2385       // Unsigned overflow expected and intended.
2386       a += *cur++;
2387       b += a;
2388     }
2389 #if V8_HOST_ARCH_64_BIT
2390     a ^= a >> 32;
2391     b ^= b >> 32;
2392 #endif  // V8_HOST_ARCH_64_BIT
2393     a_ = static_cast<uint32_t>(a);
2394     b_ = static_cast<uint32_t>(b);
2395   }
2396
2397   bool Check(uint32_t a, uint32_t b) const { return a == a_ && b == b_; }
2398
2399   uint32_t a() const { return a_; }
2400   uint32_t b() const { return b_; }
2401
2402  private:
2403   uint32_t a_;
2404   uint32_t b_;
2405
2406   DISALLOW_COPY_AND_ASSIGN(Checksum);
2407 };
2408
2409
2410 SerializedCodeData::SerializedCodeData(const List<byte>& payload,
2411                                        const CodeSerializer& cs) {
2412   DisallowHeapAllocation no_gc;
2413   const List<uint32_t>* stub_keys = cs.stub_keys();
2414
2415   List<Reservation> reservations;
2416   cs.EncodeReservations(&reservations);
2417
2418   // Calculate sizes.
2419   int reservation_size = reservations.length() * kInt32Size;
2420   int num_stub_keys = stub_keys->length();
2421   int stub_keys_size = stub_keys->length() * kInt32Size;
2422   int payload_offset = kHeaderSize + reservation_size + stub_keys_size;
2423   int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
2424   int size = padded_payload_offset + payload.length();
2425
2426   // Allocate backing store and create result data.
2427   AllocateData(size);
2428
2429   // Set header values.
2430   SetMagicNumber(cs.isolate());
2431   SetHeaderValue(kVersionHashOffset, Version::Hash());
2432   SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
2433   SetHeaderValue(kCpuFeaturesOffset,
2434                  static_cast<uint32_t>(CpuFeatures::SupportedFeatures()));
2435   SetHeaderValue(kFlagHashOffset, FlagList::Hash());
2436   SetHeaderValue(kNumInternalizedStringsOffset, cs.num_internalized_strings());
2437   SetHeaderValue(kNumReservationsOffset, reservations.length());
2438   SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
2439   SetHeaderValue(kPayloadLengthOffset, payload.length());
2440
2441   Checksum checksum(payload.ToConstVector());
2442   SetHeaderValue(kChecksum1Offset, checksum.a());
2443   SetHeaderValue(kChecksum2Offset, checksum.b());
2444
2445   // Copy reservation chunk sizes.
2446   CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
2447             reservation_size);
2448
2449   // Copy code stub keys.
2450   CopyBytes(data_ + kHeaderSize + reservation_size,
2451             reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
2452
2453   memset(data_ + payload_offset, 0, padded_payload_offset - payload_offset);
2454
2455   // Copy serialized data.
2456   CopyBytes(data_ + padded_payload_offset, payload.begin(),
2457             static_cast<size_t>(payload.length()));
2458 }
2459
2460
2461 SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
2462     Isolate* isolate, String* source) const {
2463   uint32_t magic_number = GetMagicNumber();
2464   uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
2465   uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
2466   uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
2467   uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
2468   uint32_t c1 = GetHeaderValue(kChecksum1Offset);
2469   uint32_t c2 = GetHeaderValue(kChecksum2Offset);
2470   if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
2471   if (version_hash != Version::Hash()) return VERSION_MISMATCH;
2472   if (source_hash != SourceHash(source)) return SOURCE_MISMATCH;
2473   if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
2474     return CPU_FEATURES_MISMATCH;
2475   }
2476   if (flags_hash != FlagList::Hash()) return FLAGS_MISMATCH;
2477   if (!Checksum(Payload()).Check(c1, c2)) return CHECKSUM_MISMATCH;
2478   return CHECK_SUCCESS;
2479 }
2480
2481
2482 // Return ScriptData object and relinquish ownership over it to the caller.
2483 ScriptData* SerializedCodeData::GetScriptData() {
2484   DCHECK(owns_data_);
2485   ScriptData* result = new ScriptData(data_, size_);
2486   result->AcquireDataOwnership();
2487   owns_data_ = false;
2488   data_ = NULL;
2489   return result;
2490 }
2491
2492
2493 Vector<const SerializedData::Reservation> SerializedCodeData::Reservations()
2494     const {
2495   return Vector<const Reservation>(
2496       reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
2497       GetHeaderValue(kNumReservationsOffset));
2498 }
2499
2500
2501 Vector<const byte> SerializedCodeData::Payload() const {
2502   int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
2503   int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
2504   int payload_offset = kHeaderSize + reservations_size + code_stubs_size;
2505   int padded_payload_offset = POINTER_SIZE_ALIGN(payload_offset);
2506   const byte* payload = data_ + padded_payload_offset;
2507   DCHECK(IsAligned(reinterpret_cast<intptr_t>(payload), kPointerAlignment));
2508   int length = GetHeaderValue(kPayloadLengthOffset);
2509   DCHECK_EQ(data_ + size_, payload + length);
2510   return Vector<const byte>(payload, length);
2511 }
2512
2513
2514 int SerializedCodeData::NumInternalizedStrings() const {
2515   return GetHeaderValue(kNumInternalizedStringsOffset);
2516 }
2517
2518 Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
2519   int reservations_size = GetHeaderValue(kNumReservationsOffset) * kInt32Size;
2520   const byte* start = data_ + kHeaderSize + reservations_size;
2521   return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
2522                                 GetHeaderValue(kNumCodeStubKeysOffset));
2523 }
2524
2525
2526 SerializedCodeData::SerializedCodeData(ScriptData* data)
2527     : SerializedData(const_cast<byte*>(data->data()), data->length()) {}
2528
2529
2530 SerializedCodeData* SerializedCodeData::FromCachedData(Isolate* isolate,
2531                                                        ScriptData* cached_data,
2532                                                        String* source) {
2533   DisallowHeapAllocation no_gc;
2534   SerializedCodeData* scd = new SerializedCodeData(cached_data);
2535   SanityCheckResult r = scd->SanityCheck(isolate, source);
2536   if (r == CHECK_SUCCESS) return scd;
2537   cached_data->Reject();
2538   source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(r);
2539   delete scd;
2540   return NULL;
2541 }
2542 } }  // namespace v8::internal