002b8cee30d093f94712a812d5a0212540231d09
[platform/upstream/nodejs.git] / deps / v8 / src / heap / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/base/bits.h"
10 #include "src/base/once.h"
11 #include "src/base/utils/random-number-generator.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/compilation-cache.h"
15 #include "src/conversions.h"
16 #include "src/cpu-profiler.h"
17 #include "src/debug.h"
18 #include "src/deoptimizer.h"
19 #include "src/global-handles.h"
20 #include "src/heap/gc-idle-time-handler.h"
21 #include "src/heap/incremental-marking.h"
22 #include "src/heap/mark-compact.h"
23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/objects-visiting.h"
25 #include "src/heap/store-buffer.h"
26 #include "src/heap-profiler.h"
27 #include "src/isolate-inl.h"
28 #include "src/natives.h"
29 #include "src/runtime-profiler.h"
30 #include "src/scopeinfo.h"
31 #include "src/serialize.h"
32 #include "src/snapshot.h"
33 #include "src/utils.h"
34 #include "src/v8threads.h"
35 #include "src/vm-state-inl.h"
36
37 #if V8_TARGET_ARCH_PPC && !V8_INTERPRETED_REGEXP
38 #include "src/regexp-macro-assembler.h"          // NOLINT
39 #include "src/ppc/regexp-macro-assembler-ppc.h"  // NOLINT
40 #endif
41 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
42 #include "src/regexp-macro-assembler.h"          // NOLINT
43 #include "src/arm/regexp-macro-assembler-arm.h"  // NOLINT
44 #endif
45 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
46 #include "src/regexp-macro-assembler.h"            // NOLINT
47 #include "src/mips/regexp-macro-assembler-mips.h"  // NOLINT
48 #endif
49 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
50 #include "src/regexp-macro-assembler.h"
51 #include "src/mips64/regexp-macro-assembler-mips64.h"
52 #endif
53
54 namespace v8 {
55 namespace internal {
56
57
58 Heap::Heap()
59     : amount_of_external_allocated_memory_(0),
60       amount_of_external_allocated_memory_at_last_global_gc_(0),
61       isolate_(NULL),
62       code_range_size_(0),
63       // semispace_size_ should be a power of 2 and old_generation_size_ should
64       // be a multiple of Page::kPageSize.
65       reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
66       max_semi_space_size_(8 * (kPointerSize / 4) * MB),
67       initial_semispace_size_(Page::kPageSize),
68       target_semispace_size_(Page::kPageSize),
69       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
70       initial_old_generation_size_(max_old_generation_size_ /
71                                    kInitalOldGenerationLimitFactor),
72       old_generation_size_configured_(false),
73       max_executable_size_(256ul * (kPointerSize / 4) * MB),
74       // Variables set based on semispace_size_ and old_generation_size_ in
75       // ConfigureHeap.
76       // Will be 4 * reserved_semispace_size_ to ensure that young
77       // generation can be aligned to its size.
78       maximum_committed_(0),
79       survived_since_last_expansion_(0),
80       survived_last_scavenge_(0),
81       sweep_generation_(0),
82       always_allocate_scope_depth_(0),
83       contexts_disposed_(0),
84       global_ic_age_(0),
85       scan_on_scavenge_pages_(0),
86       new_space_(this),
87       old_pointer_space_(NULL),
88       old_data_space_(NULL),
89       code_space_(NULL),
90       map_space_(NULL),
91       cell_space_(NULL),
92       property_cell_space_(NULL),
93       lo_space_(NULL),
94       gc_state_(NOT_IN_GC),
95       gc_post_processing_depth_(0),
96       allocations_count_(0),
97       raw_allocations_hash_(0),
98       dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
99       ms_count_(0),
100       gc_count_(0),
101       remembered_unmapped_pages_index_(0),
102       unflattened_strings_length_(0),
103 #ifdef DEBUG
104       allocation_timeout_(0),
105 #endif  // DEBUG
106       old_generation_allocation_limit_(initial_old_generation_size_),
107       idle_old_generation_allocation_limit_(
108           kMinimumOldGenerationAllocationLimit),
109       old_gen_exhausted_(false),
110       inline_allocation_disabled_(false),
111       store_buffer_rebuilder_(store_buffer()),
112       hidden_string_(NULL),
113       gc_safe_size_of_old_object_(NULL),
114       total_regexp_code_generated_(0),
115       tracer_(this),
116       high_survival_rate_period_length_(0),
117       promoted_objects_size_(0),
118       promotion_ratio_(0),
119       semi_space_copied_object_size_(0),
120       previous_semi_space_copied_object_size_(0),
121       semi_space_copied_rate_(0),
122       nodes_died_in_new_space_(0),
123       nodes_copied_in_new_space_(0),
124       nodes_promoted_(0),
125       maximum_size_scavenges_(0),
126       max_gc_pause_(0.0),
127       total_gc_time_ms_(0.0),
128       max_alive_after_gc_(0),
129       min_in_mutator_(kMaxInt),
130       marking_time_(0.0),
131       sweeping_time_(0.0),
132       last_idle_notification_time_(0.0),
133       mark_compact_collector_(this),
134       store_buffer_(this),
135       marking_(this),
136       incremental_marking_(this),
137       gc_count_at_last_idle_gc_(0),
138       full_codegen_bytes_generated_(0),
139       crankshaft_codegen_bytes_generated_(0),
140       gcs_since_last_deopt_(0),
141       allocation_sites_scratchpad_length_(0),
142       promotion_queue_(this),
143       configured_(false),
144       external_string_table_(this),
145       chunks_queued_for_free_(NULL),
146       gc_callbacks_depth_(0),
147       deserialization_complete_(false) {
148 // Allow build-time customization of the max semispace size. Building
149 // V8 with snapshots and a non-default max semispace size is much
150 // easier if you can define it as part of the build environment.
151 #if defined(V8_MAX_SEMISPACE_SIZE)
152   max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
153 #endif
154
155   // Ensure old_generation_size_ is a multiple of kPageSize.
156   DCHECK(MB >= Page::kPageSize);
157
158   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
159   set_native_contexts_list(NULL);
160   set_array_buffers_list(Smi::FromInt(0));
161   set_allocation_sites_list(Smi::FromInt(0));
162   set_encountered_weak_collections(Smi::FromInt(0));
163   set_encountered_weak_cells(Smi::FromInt(0));
164   // Put a dummy entry in the remembered pages so we can find the list the
165   // minidump even if there are no real unmapped pages.
166   RememberUnmappedPage(NULL, false);
167
168   ClearObjectStats(true);
169 }
170
171
172 intptr_t Heap::Capacity() {
173   if (!HasBeenSetUp()) return 0;
174
175   return new_space_.Capacity() + old_pointer_space_->Capacity() +
176          old_data_space_->Capacity() + code_space_->Capacity() +
177          map_space_->Capacity() + cell_space_->Capacity() +
178          property_cell_space_->Capacity();
179 }
180
181
182 intptr_t Heap::CommittedMemory() {
183   if (!HasBeenSetUp()) return 0;
184
185   return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
186          old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
187          map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
188          property_cell_space_->CommittedMemory() + lo_space_->Size();
189 }
190
191
192 size_t Heap::CommittedPhysicalMemory() {
193   if (!HasBeenSetUp()) return 0;
194
195   return new_space_.CommittedPhysicalMemory() +
196          old_pointer_space_->CommittedPhysicalMemory() +
197          old_data_space_->CommittedPhysicalMemory() +
198          code_space_->CommittedPhysicalMemory() +
199          map_space_->CommittedPhysicalMemory() +
200          cell_space_->CommittedPhysicalMemory() +
201          property_cell_space_->CommittedPhysicalMemory() +
202          lo_space_->CommittedPhysicalMemory();
203 }
204
205
206 intptr_t Heap::CommittedMemoryExecutable() {
207   if (!HasBeenSetUp()) return 0;
208
209   return isolate()->memory_allocator()->SizeExecutable();
210 }
211
212
213 void Heap::UpdateMaximumCommitted() {
214   if (!HasBeenSetUp()) return;
215
216   intptr_t current_committed_memory = CommittedMemory();
217   if (current_committed_memory > maximum_committed_) {
218     maximum_committed_ = current_committed_memory;
219   }
220 }
221
222
223 intptr_t Heap::Available() {
224   if (!HasBeenSetUp()) return 0;
225
226   return new_space_.Available() + old_pointer_space_->Available() +
227          old_data_space_->Available() + code_space_->Available() +
228          map_space_->Available() + cell_space_->Available() +
229          property_cell_space_->Available();
230 }
231
232
233 bool Heap::HasBeenSetUp() {
234   return old_pointer_space_ != NULL && old_data_space_ != NULL &&
235          code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
236          property_cell_space_ != NULL && lo_space_ != NULL;
237 }
238
239
240 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
241   if (IntrusiveMarking::IsMarked(object)) {
242     return IntrusiveMarking::SizeOfMarkedObject(object);
243   }
244   return object->SizeFromMap(object->map());
245 }
246
247
248 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
249                                               const char** reason) {
250   // Is global GC requested?
251   if (space != NEW_SPACE) {
252     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
253     *reason = "GC in old space requested";
254     return MARK_COMPACTOR;
255   }
256
257   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
258     *reason = "GC in old space forced by flags";
259     return MARK_COMPACTOR;
260   }
261
262   // Is enough data promoted to justify a global GC?
263   if (OldGenerationAllocationLimitReached()) {
264     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
265     *reason = "promotion limit reached";
266     return MARK_COMPACTOR;
267   }
268
269   // Have allocation in OLD and LO failed?
270   if (old_gen_exhausted_) {
271     isolate_->counters()
272         ->gc_compactor_caused_by_oldspace_exhaustion()
273         ->Increment();
274     *reason = "old generations exhausted";
275     return MARK_COMPACTOR;
276   }
277
278   // Is there enough space left in OLD to guarantee that a scavenge can
279   // succeed?
280   //
281   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
282   // for object promotion. It counts only the bytes that the memory
283   // allocator has not yet allocated from the OS and assigned to any space,
284   // and does not count available bytes already in the old space or code
285   // space.  Undercounting is safe---we may get an unrequested full GC when
286   // a scavenge would have succeeded.
287   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
288     isolate_->counters()
289         ->gc_compactor_caused_by_oldspace_exhaustion()
290         ->Increment();
291     *reason = "scavenge might not succeed";
292     return MARK_COMPACTOR;
293   }
294
295   // Default
296   *reason = NULL;
297   return SCAVENGER;
298 }
299
300
301 // TODO(1238405): Combine the infrastructure for --heap-stats and
302 // --log-gc to avoid the complicated preprocessor and flag testing.
303 void Heap::ReportStatisticsBeforeGC() {
304 // Heap::ReportHeapStatistics will also log NewSpace statistics when
305 // compiled --log-gc is set.  The following logic is used to avoid
306 // double logging.
307 #ifdef DEBUG
308   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
309   if (FLAG_heap_stats) {
310     ReportHeapStatistics("Before GC");
311   } else if (FLAG_log_gc) {
312     new_space_.ReportStatistics();
313   }
314   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
315 #else
316   if (FLAG_log_gc) {
317     new_space_.CollectStatistics();
318     new_space_.ReportStatistics();
319     new_space_.ClearHistograms();
320   }
321 #endif  // DEBUG
322 }
323
324
325 void Heap::PrintShortHeapStatistics() {
326   if (!FLAG_trace_gc_verbose) return;
327   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX
328            "d KB"
329            ", available: %6" V8_PTR_PREFIX "d KB\n",
330            isolate_->memory_allocator()->Size() / KB,
331            isolate_->memory_allocator()->Available() / KB);
332   PrintPID("New space,          used: %6" V8_PTR_PREFIX
333            "d KB"
334            ", available: %6" V8_PTR_PREFIX
335            "d KB"
336            ", committed: %6" V8_PTR_PREFIX "d KB\n",
337            new_space_.Size() / KB, new_space_.Available() / KB,
338            new_space_.CommittedMemory() / KB);
339   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX
340            "d KB"
341            ", available: %6" V8_PTR_PREFIX
342            "d KB"
343            ", committed: %6" V8_PTR_PREFIX "d KB\n",
344            old_pointer_space_->SizeOfObjects() / KB,
345            old_pointer_space_->Available() / KB,
346            old_pointer_space_->CommittedMemory() / KB);
347   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX
348            "d KB"
349            ", available: %6" V8_PTR_PREFIX
350            "d KB"
351            ", committed: %6" V8_PTR_PREFIX "d KB\n",
352            old_data_space_->SizeOfObjects() / KB,
353            old_data_space_->Available() / KB,
354            old_data_space_->CommittedMemory() / KB);
355   PrintPID("Code space,         used: %6" V8_PTR_PREFIX
356            "d KB"
357            ", available: %6" V8_PTR_PREFIX
358            "d KB"
359            ", committed: %6" V8_PTR_PREFIX "d KB\n",
360            code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
361            code_space_->CommittedMemory() / KB);
362   PrintPID("Map space,          used: %6" V8_PTR_PREFIX
363            "d KB"
364            ", available: %6" V8_PTR_PREFIX
365            "d KB"
366            ", committed: %6" V8_PTR_PREFIX "d KB\n",
367            map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
368            map_space_->CommittedMemory() / KB);
369   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX
370            "d KB"
371            ", available: %6" V8_PTR_PREFIX
372            "d KB"
373            ", committed: %6" V8_PTR_PREFIX "d KB\n",
374            cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
375            cell_space_->CommittedMemory() / KB);
376   PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
377            "d KB"
378            ", available: %6" V8_PTR_PREFIX
379            "d KB"
380            ", committed: %6" V8_PTR_PREFIX "d KB\n",
381            property_cell_space_->SizeOfObjects() / KB,
382            property_cell_space_->Available() / KB,
383            property_cell_space_->CommittedMemory() / KB);
384   PrintPID("Large object space, used: %6" V8_PTR_PREFIX
385            "d KB"
386            ", available: %6" V8_PTR_PREFIX
387            "d KB"
388            ", committed: %6" V8_PTR_PREFIX "d KB\n",
389            lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
390            lo_space_->CommittedMemory() / KB);
391   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX
392            "d KB"
393            ", available: %6" V8_PTR_PREFIX
394            "d KB"
395            ", committed: %6" V8_PTR_PREFIX "d KB\n",
396            this->SizeOfObjects() / KB, this->Available() / KB,
397            this->CommittedMemory() / KB);
398   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
399            static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
400   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
401 }
402
403
404 // TODO(1238405): Combine the infrastructure for --heap-stats and
405 // --log-gc to avoid the complicated preprocessor and flag testing.
406 void Heap::ReportStatisticsAfterGC() {
407 // Similar to the before GC, we use some complicated logic to ensure that
408 // NewSpace statistics are logged exactly once when --log-gc is turned on.
409 #if defined(DEBUG)
410   if (FLAG_heap_stats) {
411     new_space_.CollectStatistics();
412     ReportHeapStatistics("After GC");
413   } else if (FLAG_log_gc) {
414     new_space_.ReportStatistics();
415   }
416 #else
417   if (FLAG_log_gc) new_space_.ReportStatistics();
418 #endif  // DEBUG
419 }
420
421
422 void Heap::GarbageCollectionPrologue() {
423   {
424     AllowHeapAllocation for_the_first_part_of_prologue;
425     ClearJSFunctionResultCaches();
426     gc_count_++;
427     unflattened_strings_length_ = 0;
428
429     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
430       mark_compact_collector()->EnableCodeFlushing(true);
431     }
432
433 #ifdef VERIFY_HEAP
434     if (FLAG_verify_heap) {
435       Verify();
436     }
437 #endif
438   }
439
440   // Reset GC statistics.
441   promoted_objects_size_ = 0;
442   previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
443   semi_space_copied_object_size_ = 0;
444   nodes_died_in_new_space_ = 0;
445   nodes_copied_in_new_space_ = 0;
446   nodes_promoted_ = 0;
447
448   UpdateMaximumCommitted();
449
450 #ifdef DEBUG
451   DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
452
453   if (FLAG_gc_verbose) Print();
454
455   ReportStatisticsBeforeGC();
456 #endif  // DEBUG
457
458   store_buffer()->GCPrologue();
459
460   if (isolate()->concurrent_osr_enabled()) {
461     isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
462   }
463
464   if (new_space_.IsAtMaximumCapacity()) {
465     maximum_size_scavenges_++;
466   } else {
467     maximum_size_scavenges_ = 0;
468   }
469   CheckNewSpaceExpansionCriteria();
470 }
471
472
473 intptr_t Heap::SizeOfObjects() {
474   intptr_t total = 0;
475   AllSpaces spaces(this);
476   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
477     total += space->SizeOfObjects();
478   }
479   return total;
480 }
481
482
483 void Heap::ClearAllICsByKind(Code::Kind kind) {
484   HeapObjectIterator it(code_space());
485
486   for (Object* object = it.Next(); object != NULL; object = it.Next()) {
487     Code* code = Code::cast(object);
488     Code::Kind current_kind = code->kind();
489     if (current_kind == Code::FUNCTION ||
490         current_kind == Code::OPTIMIZED_FUNCTION) {
491       code->ClearInlineCaches(kind);
492     }
493   }
494 }
495
496
497 void Heap::RepairFreeListsAfterDeserialization() {
498   PagedSpaces spaces(this);
499   for (PagedSpace* space = spaces.next(); space != NULL;
500        space = spaces.next()) {
501     space->RepairFreeListsAfterDeserialization();
502   }
503 }
504
505
506 void Heap::ProcessPretenuringFeedback() {
507   if (FLAG_allocation_site_pretenuring) {
508     int tenure_decisions = 0;
509     int dont_tenure_decisions = 0;
510     int allocation_mementos_found = 0;
511     int allocation_sites = 0;
512     int active_allocation_sites = 0;
513
514     // If the scratchpad overflowed, we have to iterate over the allocation
515     // sites list.
516     // TODO(hpayer): We iterate over the whole list of allocation sites when
517     // we grew to the maximum semi-space size to deopt maybe tenured
518     // allocation sites. We could hold the maybe tenured allocation sites
519     // in a seperate data structure if this is a performance problem.
520     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
521     bool use_scratchpad =
522         allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
523         !deopt_maybe_tenured;
524
525     int i = 0;
526     Object* list_element = allocation_sites_list();
527     bool trigger_deoptimization = false;
528     bool maximum_size_scavenge = MaximumSizeScavenge();
529     while (use_scratchpad ? i < allocation_sites_scratchpad_length_
530                           : list_element->IsAllocationSite()) {
531       AllocationSite* site =
532           use_scratchpad
533               ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
534               : AllocationSite::cast(list_element);
535       allocation_mementos_found += site->memento_found_count();
536       if (site->memento_found_count() > 0) {
537         active_allocation_sites++;
538         if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
539           trigger_deoptimization = true;
540         }
541         if (site->GetPretenureMode() == TENURED) {
542           tenure_decisions++;
543         } else {
544           dont_tenure_decisions++;
545         }
546         allocation_sites++;
547       }
548
549       if (deopt_maybe_tenured && site->IsMaybeTenure()) {
550         site->set_deopt_dependent_code(true);
551         trigger_deoptimization = true;
552       }
553
554       if (use_scratchpad) {
555         i++;
556       } else {
557         list_element = site->weak_next();
558       }
559     }
560
561     if (trigger_deoptimization) {
562       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
563     }
564
565     FlushAllocationSitesScratchpad();
566
567     if (FLAG_trace_pretenuring_statistics &&
568         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
569          dont_tenure_decisions > 0)) {
570       PrintF(
571           "GC: (mode, #visited allocation sites, #active allocation sites, "
572           "#mementos, #tenure decisions, #donttenure decisions) "
573           "(%s, %d, %d, %d, %d, %d)\n",
574           use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
575           active_allocation_sites, allocation_mementos_found, tenure_decisions,
576           dont_tenure_decisions);
577     }
578   }
579 }
580
581
582 void Heap::DeoptMarkedAllocationSites() {
583   // TODO(hpayer): If iterating over the allocation sites list becomes a
584   // performance issue, use a cache heap data structure instead (similar to the
585   // allocation sites scratchpad).
586   Object* list_element = allocation_sites_list();
587   while (list_element->IsAllocationSite()) {
588     AllocationSite* site = AllocationSite::cast(list_element);
589     if (site->deopt_dependent_code()) {
590       site->dependent_code()->MarkCodeForDeoptimization(
591           isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
592       site->set_deopt_dependent_code(false);
593     }
594     list_element = site->weak_next();
595   }
596   Deoptimizer::DeoptimizeMarkedCode(isolate_);
597 }
598
599
600 void Heap::GarbageCollectionEpilogue() {
601   store_buffer()->GCEpilogue();
602
603   // In release mode, we only zap the from space under heap verification.
604   if (Heap::ShouldZapGarbage()) {
605     ZapFromSpace();
606   }
607
608   // Process pretenuring feedback and update allocation sites.
609   ProcessPretenuringFeedback();
610
611 #ifdef VERIFY_HEAP
612   if (FLAG_verify_heap) {
613     Verify();
614   }
615 #endif
616
617   AllowHeapAllocation for_the_rest_of_the_epilogue;
618
619 #ifdef DEBUG
620   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
621   if (FLAG_print_handles) PrintHandles();
622   if (FLAG_gc_verbose) Print();
623   if (FLAG_code_stats) ReportCodeStatistics("After GC");
624 #endif
625   if (FLAG_deopt_every_n_garbage_collections > 0) {
626     // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
627     // the topmost optimized frame can be deoptimized safely, because it
628     // might not have a lazy bailout point right after its current PC.
629     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
630       Deoptimizer::DeoptimizeAll(isolate());
631       gcs_since_last_deopt_ = 0;
632     }
633   }
634
635   UpdateMaximumCommitted();
636
637   isolate_->counters()->alive_after_last_gc()->Set(
638       static_cast<int>(SizeOfObjects()));
639
640   isolate_->counters()->string_table_capacity()->Set(
641       string_table()->Capacity());
642   isolate_->counters()->number_of_symbols()->Set(
643       string_table()->NumberOfElements());
644
645   if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
646     isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
647         static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
648                          (crankshaft_codegen_bytes_generated_ +
649                           full_codegen_bytes_generated_)));
650   }
651
652   if (CommittedMemory() > 0) {
653     isolate_->counters()->external_fragmentation_total()->AddSample(
654         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
655
656     isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
657         (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
658     isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
659         static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
660                          CommittedMemory()));
661     isolate_->counters()->heap_fraction_old_data_space()->AddSample(
662         static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
663                          CommittedMemory()));
664     isolate_->counters()->heap_fraction_code_space()->AddSample(
665         static_cast<int>((code_space()->CommittedMemory() * 100.0) /
666                          CommittedMemory()));
667     isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
668         (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
669     isolate_->counters()->heap_fraction_cell_space()->AddSample(
670         static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
671                          CommittedMemory()));
672     isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
673         static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
674                          CommittedMemory()));
675     isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
676         (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
677
678     isolate_->counters()->heap_sample_total_committed()->AddSample(
679         static_cast<int>(CommittedMemory() / KB));
680     isolate_->counters()->heap_sample_total_used()->AddSample(
681         static_cast<int>(SizeOfObjects() / KB));
682     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
683         static_cast<int>(map_space()->CommittedMemory() / KB));
684     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
685         static_cast<int>(cell_space()->CommittedMemory() / KB));
686     isolate_->counters()
687         ->heap_sample_property_cell_space_committed()
688         ->AddSample(
689             static_cast<int>(property_cell_space()->CommittedMemory() / KB));
690     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
691         static_cast<int>(code_space()->CommittedMemory() / KB));
692
693     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
694         static_cast<int>(MaximumCommittedMemory() / KB));
695   }
696
697 #define UPDATE_COUNTERS_FOR_SPACE(space)                \
698   isolate_->counters()->space##_bytes_available()->Set( \
699       static_cast<int>(space()->Available()));          \
700   isolate_->counters()->space##_bytes_committed()->Set( \
701       static_cast<int>(space()->CommittedMemory()));    \
702   isolate_->counters()->space##_bytes_used()->Set(      \
703       static_cast<int>(space()->SizeOfObjects()));
704 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
705   if (space()->CommittedMemory() > 0) {                                \
706     isolate_->counters()->external_fragmentation_##space()->AddSample( \
707         static_cast<int>(100 -                                         \
708                          (space()->SizeOfObjects() * 100.0) /          \
709                              space()->CommittedMemory()));             \
710   }
711 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
712   UPDATE_COUNTERS_FOR_SPACE(space)                         \
713   UPDATE_FRAGMENTATION_FOR_SPACE(space)
714
715   UPDATE_COUNTERS_FOR_SPACE(new_space)
716   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
717   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
718   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
719   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
720   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
721   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
722   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
723 #undef UPDATE_COUNTERS_FOR_SPACE
724 #undef UPDATE_FRAGMENTATION_FOR_SPACE
725 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
726
727 #ifdef DEBUG
728   ReportStatisticsAfterGC();
729 #endif  // DEBUG
730
731   // Remember the last top pointer so that we can later find out
732   // whether we allocated in new space since the last GC.
733   new_space_top_after_last_gc_ = new_space()->top();
734 }
735
736
737 void Heap::HandleGCRequest() {
738   if (incremental_marking()->request_type() ==
739       IncrementalMarking::COMPLETE_MARKING) {
740     CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt");
741     return;
742   }
743   DCHECK(FLAG_overapproximate_weak_closure);
744   DCHECK(!incremental_marking()->weak_closure_was_overapproximated());
745   OverApproximateWeakClosure("GC interrupt");
746 }
747
748
749 void Heap::OverApproximateWeakClosure(const char* gc_reason) {
750   if (FLAG_trace_incremental_marking) {
751     PrintF("[IncrementalMarking] Overapproximate weak closure (%s).\n",
752            gc_reason);
753   }
754   {
755     GCCallbacksScope scope(this);
756     if (scope.CheckReenter()) {
757       AllowHeapAllocation allow_allocation;
758       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
759       VMState<EXTERNAL> state(isolate_);
760       HandleScope handle_scope(isolate_);
761       CallGCPrologueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
762     }
763   }
764   mark_compact_collector()->OverApproximateWeakClosure();
765   incremental_marking()->set_should_hurry(false);
766   incremental_marking()->set_weak_closure_was_overapproximated(true);
767   {
768     GCCallbacksScope scope(this);
769     if (scope.CheckReenter()) {
770       AllowHeapAllocation allow_allocation;
771       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
772       VMState<EXTERNAL> state(isolate_);
773       HandleScope handle_scope(isolate_);
774       CallGCEpilogueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
775     }
776   }
777 }
778
779
780 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
781                              const v8::GCCallbackFlags gc_callback_flags) {
782   // Since we are ignoring the return value, the exact choice of space does
783   // not matter, so long as we do not specify NEW_SPACE, which would not
784   // cause a full GC.
785   mark_compact_collector_.SetFlags(flags);
786   CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
787   mark_compact_collector_.SetFlags(kNoGCFlags);
788 }
789
790
791 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
792   // Since we are ignoring the return value, the exact choice of space does
793   // not matter, so long as we do not specify NEW_SPACE, which would not
794   // cause a full GC.
795   // Major GC would invoke weak handle callbacks on weakly reachable
796   // handles, but won't collect weakly reachable objects until next
797   // major GC.  Therefore if we collect aggressively and weak handle callback
798   // has been invoked, we rerun major GC to release objects which become
799   // garbage.
800   // Note: as weak callbacks can execute arbitrary code, we cannot
801   // hope that eventually there will be no weak callbacks invocations.
802   // Therefore stop recollecting after several attempts.
803   if (isolate()->concurrent_recompilation_enabled()) {
804     // The optimizing compiler may be unnecessarily holding on to memory.
805     DisallowHeapAllocation no_recursive_gc;
806     isolate()->optimizing_compiler_thread()->Flush();
807   }
808   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
809                                      kReduceMemoryFootprintMask);
810   isolate_->compilation_cache()->Clear();
811   const int kMaxNumberOfAttempts = 7;
812   const int kMinNumberOfAttempts = 2;
813   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
814     if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
815         attempt + 1 >= kMinNumberOfAttempts) {
816       break;
817     }
818   }
819   mark_compact_collector()->SetFlags(kNoGCFlags);
820   new_space_.Shrink();
821   UncommitFromSpace();
822 }
823
824
825 void Heap::EnsureFillerObjectAtTop() {
826   // There may be an allocation memento behind every object in new space.
827   // If we evacuate a not full new space or if we are on the last page of
828   // the new space, then there may be uninitialized memory behind the top
829   // pointer of the new space page. We store a filler object there to
830   // identify the unused space.
831   Address from_top = new_space_.top();
832   Address from_limit = new_space_.limit();
833   if (from_top < from_limit) {
834     int remaining_in_page = static_cast<int>(from_limit - from_top);
835     CreateFillerObjectAt(from_top, remaining_in_page);
836   }
837 }
838
839
840 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
841                           const char* collector_reason,
842                           const v8::GCCallbackFlags gc_callback_flags) {
843   // The VM is in the GC state until exiting this function.
844   VMState<GC> state(isolate_);
845
846 #ifdef DEBUG
847   // Reset the allocation timeout to the GC interval, but make sure to
848   // allow at least a few allocations after a collection. The reason
849   // for this is that we have a lot of allocation sequences and we
850   // assume that a garbage collection will allow the subsequent
851   // allocation attempts to go through.
852   allocation_timeout_ = Max(6, FLAG_gc_interval);
853 #endif
854
855   EnsureFillerObjectAtTop();
856
857   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
858     if (FLAG_trace_incremental_marking) {
859       PrintF("[IncrementalMarking] Scavenge during marking.\n");
860     }
861   }
862
863   if (collector == MARK_COMPACTOR &&
864       !mark_compact_collector()->abort_incremental_marking() &&
865       !incremental_marking()->IsStopped() &&
866       !incremental_marking()->should_hurry() &&
867       FLAG_incremental_marking_steps) {
868     // Make progress in incremental marking.
869     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
870     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
871                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
872     if (!incremental_marking()->IsComplete() &&
873         !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) {
874       if (FLAG_trace_incremental_marking) {
875         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
876       }
877       collector = SCAVENGER;
878       collector_reason = "incremental marking delaying mark-sweep";
879     }
880   }
881
882   bool next_gc_likely_to_collect_more = false;
883
884   {
885     tracer()->Start(collector, gc_reason, collector_reason);
886     DCHECK(AllowHeapAllocation::IsAllowed());
887     DisallowHeapAllocation no_allocation_during_gc;
888     GarbageCollectionPrologue();
889
890     {
891       HistogramTimerScope histogram_timer_scope(
892           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
893                                    : isolate_->counters()->gc_compactor());
894       next_gc_likely_to_collect_more =
895           PerformGarbageCollection(collector, gc_callback_flags);
896     }
897
898     GarbageCollectionEpilogue();
899     if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
900       isolate()->CheckDetachedContextsAfterGC();
901     }
902     tracer()->Stop(collector);
903   }
904
905   // Start incremental marking for the next cycle. The heap snapshot
906   // generator needs incremental marking to stay off after it aborted.
907   if (!mark_compact_collector()->abort_incremental_marking() &&
908       WorthActivatingIncrementalMarking()) {
909     incremental_marking()->Start();
910   }
911
912   return next_gc_likely_to_collect_more;
913 }
914
915
916 int Heap::NotifyContextDisposed(bool dependant_context) {
917   if (!dependant_context) {
918     tracer()->ResetSurvivalEvents();
919     old_generation_size_configured_ = false;
920   }
921   if (isolate()->concurrent_recompilation_enabled()) {
922     // Flush the queued recompilation tasks.
923     isolate()->optimizing_compiler_thread()->Flush();
924   }
925   AgeInlineCaches();
926   tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
927   return ++contexts_disposed_;
928 }
929
930
931 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
932                         int len) {
933   if (len == 0) return;
934
935   DCHECK(array->map() != fixed_cow_array_map());
936   Object** dst_objects = array->data_start() + dst_index;
937   MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
938   if (!InNewSpace(array)) {
939     for (int i = 0; i < len; i++) {
940       // TODO(hpayer): check store buffer for entries
941       if (InNewSpace(dst_objects[i])) {
942         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
943       }
944     }
945   }
946   incremental_marking()->RecordWrites(array);
947 }
948
949
950 #ifdef VERIFY_HEAP
951 // Helper class for verifying the string table.
952 class StringTableVerifier : public ObjectVisitor {
953  public:
954   void VisitPointers(Object** start, Object** end) {
955     // Visit all HeapObject pointers in [start, end).
956     for (Object** p = start; p < end; p++) {
957       if ((*p)->IsHeapObject()) {
958         // Check that the string is actually internalized.
959         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
960               (*p)->IsInternalizedString());
961       }
962     }
963   }
964 };
965
966
967 static void VerifyStringTable(Heap* heap) {
968   StringTableVerifier verifier;
969   heap->string_table()->IterateElements(&verifier);
970 }
971 #endif  // VERIFY_HEAP
972
973
974 bool Heap::ReserveSpace(Reservation* reservations) {
975   bool gc_performed = true;
976   int counter = 0;
977   static const int kThreshold = 20;
978   while (gc_performed && counter++ < kThreshold) {
979     gc_performed = false;
980     for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
981       Reservation* reservation = &reservations[space];
982       DCHECK_LE(1, reservation->length());
983       if (reservation->at(0).size == 0) continue;
984       bool perform_gc = false;
985       if (space == LO_SPACE) {
986         DCHECK_EQ(1, reservation->length());
987         perform_gc = !lo_space()->CanAllocateSize(reservation->at(0).size);
988       } else {
989         for (auto& chunk : *reservation) {
990           AllocationResult allocation;
991           int size = chunk.size;
992           DCHECK_LE(size, MemoryAllocator::PageAreaSize(
993                               static_cast<AllocationSpace>(space)));
994           if (space == NEW_SPACE) {
995             allocation = new_space()->AllocateRaw(size);
996           } else {
997             allocation = paged_space(space)->AllocateRaw(size);
998           }
999           HeapObject* free_space;
1000           if (allocation.To(&free_space)) {
1001             // Mark with a free list node, in case we have a GC before
1002             // deserializing.
1003             Address free_space_address = free_space->address();
1004             CreateFillerObjectAt(free_space_address, size);
1005             DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
1006             chunk.start = free_space_address;
1007             chunk.end = free_space_address + size;
1008           } else {
1009             perform_gc = true;
1010             break;
1011           }
1012         }
1013       }
1014       if (perform_gc) {
1015         if (space == NEW_SPACE) {
1016           CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
1017         } else {
1018           if (counter > 1) {
1019             CollectAllGarbage(
1020                 kReduceMemoryFootprintMask,
1021                 "failed to reserve space in paged or large "
1022                 "object space, trying to reduce memory footprint");
1023           } else {
1024             CollectAllGarbage(
1025                 kAbortIncrementalMarkingMask,
1026                 "failed to reserve space in paged or large object space");
1027           }
1028         }
1029         gc_performed = true;
1030         break;  // Abort for-loop over spaces and retry.
1031       }
1032     }
1033   }
1034
1035   return !gc_performed;
1036 }
1037
1038
1039 void Heap::EnsureFromSpaceIsCommitted() {
1040   if (new_space_.CommitFromSpaceIfNeeded()) return;
1041
1042   // Committing memory to from space failed.
1043   // Memory is exhausted and we will die.
1044   V8::FatalProcessOutOfMemory("Committing semi space failed.");
1045 }
1046
1047
1048 void Heap::ClearJSFunctionResultCaches() {
1049   if (isolate_->bootstrapper()->IsActive()) return;
1050
1051   Object* context = native_contexts_list();
1052   while (!context->IsUndefined()) {
1053     // Get the caches for this context. GC can happen when the context
1054     // is not fully initialized, so the caches can be undefined.
1055     Object* caches_or_undefined =
1056         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
1057     if (!caches_or_undefined->IsUndefined()) {
1058       FixedArray* caches = FixedArray::cast(caches_or_undefined);
1059       // Clear the caches:
1060       int length = caches->length();
1061       for (int i = 0; i < length; i++) {
1062         JSFunctionResultCache::cast(caches->get(i))->Clear();
1063       }
1064     }
1065     // Get the next context:
1066     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1067   }
1068 }
1069
1070
1071 void Heap::ClearNormalizedMapCaches() {
1072   if (isolate_->bootstrapper()->IsActive() &&
1073       !incremental_marking()->IsMarking()) {
1074     return;
1075   }
1076
1077   Object* context = native_contexts_list();
1078   while (!context->IsUndefined()) {
1079     // GC can happen when the context is not fully initialized,
1080     // so the cache can be undefined.
1081     Object* cache =
1082         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1083     if (!cache->IsUndefined()) {
1084       NormalizedMapCache::cast(cache)->Clear();
1085     }
1086     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1087   }
1088 }
1089
1090
1091 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1092   if (start_new_space_size == 0) return;
1093
1094   promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1095                       static_cast<double>(start_new_space_size) * 100);
1096
1097   if (previous_semi_space_copied_object_size_ > 0) {
1098     promotion_rate_ =
1099         (static_cast<double>(promoted_objects_size_) /
1100          static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1101   } else {
1102     promotion_rate_ = 0;
1103   }
1104
1105   semi_space_copied_rate_ =
1106       (static_cast<double>(semi_space_copied_object_size_) /
1107        static_cast<double>(start_new_space_size) * 100);
1108
1109   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1110   tracer()->AddSurvivalRatio(survival_rate);
1111   if (survival_rate > kYoungSurvivalRateHighThreshold) {
1112     high_survival_rate_period_length_++;
1113   } else {
1114     high_survival_rate_period_length_ = 0;
1115   }
1116 }
1117
1118 bool Heap::PerformGarbageCollection(
1119     GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1120   int freed_global_handles = 0;
1121
1122   if (collector != SCAVENGER) {
1123     PROFILE(isolate_, CodeMovingGCEvent());
1124   }
1125
1126 #ifdef VERIFY_HEAP
1127   if (FLAG_verify_heap) {
1128     VerifyStringTable(this);
1129   }
1130 #endif
1131
1132   GCType gc_type =
1133       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1134
1135   {
1136     GCCallbacksScope scope(this);
1137     if (scope.CheckReenter()) {
1138       AllowHeapAllocation allow_allocation;
1139       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1140       VMState<EXTERNAL> state(isolate_);
1141       HandleScope handle_scope(isolate_);
1142       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1143     }
1144   }
1145
1146   EnsureFromSpaceIsCommitted();
1147
1148   int start_new_space_size = Heap::new_space()->SizeAsInt();
1149
1150   if (IsHighSurvivalRate()) {
1151     // We speed up the incremental marker if it is running so that it
1152     // does not fall behind the rate of promotion, which would cause a
1153     // constantly growing old space.
1154     incremental_marking()->NotifyOfHighPromotionRate();
1155   }
1156
1157   if (collector == MARK_COMPACTOR) {
1158     // Perform mark-sweep with optional compaction.
1159     MarkCompact();
1160     sweep_generation_++;
1161     // Temporarily set the limit for case when PostGarbageCollectionProcessing
1162     // allocates and triggers GC. The real limit is set at after
1163     // PostGarbageCollectionProcessing.
1164     SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1165     old_gen_exhausted_ = false;
1166     old_generation_size_configured_ = true;
1167   } else {
1168     Scavenge();
1169   }
1170
1171   UpdateSurvivalStatistics(start_new_space_size);
1172   ConfigureInitialOldGenerationSize();
1173
1174   isolate_->counters()->objs_since_last_young()->Set(0);
1175
1176   // Callbacks that fire after this point might trigger nested GCs and
1177   // restart incremental marking, the assertion can't be moved down.
1178   DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1179
1180   gc_post_processing_depth_++;
1181   {
1182     AllowHeapAllocation allow_allocation;
1183     GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1184     freed_global_handles =
1185         isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
1186   }
1187   gc_post_processing_depth_--;
1188
1189   isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1190
1191   // Update relocatables.
1192   Relocatable::PostGarbageCollectionProcessing(isolate_);
1193
1194   if (collector == MARK_COMPACTOR) {
1195     // Register the amount of external allocated memory.
1196     amount_of_external_allocated_memory_at_last_global_gc_ =
1197         amount_of_external_allocated_memory_;
1198     SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
1199                                     freed_global_handles);
1200     // We finished a marking cycle. We can uncommit the marking deque until
1201     // we start marking again.
1202     mark_compact_collector_.UncommitMarkingDeque();
1203   }
1204
1205   {
1206     GCCallbacksScope scope(this);
1207     if (scope.CheckReenter()) {
1208       AllowHeapAllocation allow_allocation;
1209       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1210       VMState<EXTERNAL> state(isolate_);
1211       HandleScope handle_scope(isolate_);
1212       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1213     }
1214   }
1215
1216 #ifdef VERIFY_HEAP
1217   if (FLAG_verify_heap) {
1218     VerifyStringTable(this);
1219   }
1220 #endif
1221
1222   return freed_global_handles > 0;
1223 }
1224
1225
1226 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1227   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1228     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1229       if (!gc_prologue_callbacks_[i].pass_isolate_) {
1230         v8::GCPrologueCallback callback =
1231             reinterpret_cast<v8::GCPrologueCallback>(
1232                 gc_prologue_callbacks_[i].callback);
1233         callback(gc_type, flags);
1234       } else {
1235         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1236         gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1237       }
1238     }
1239   }
1240 }
1241
1242
1243 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1244                                    GCCallbackFlags gc_callback_flags) {
1245   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1246     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1247       if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1248         v8::GCPrologueCallback callback =
1249             reinterpret_cast<v8::GCPrologueCallback>(
1250                 gc_epilogue_callbacks_[i].callback);
1251         callback(gc_type, gc_callback_flags);
1252       } else {
1253         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1254         gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1255       }
1256     }
1257   }
1258 }
1259
1260
1261 void Heap::MarkCompact() {
1262   gc_state_ = MARK_COMPACT;
1263   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1264
1265   uint64_t size_of_objects_before_gc = SizeOfObjects();
1266
1267   mark_compact_collector_.Prepare();
1268
1269   ms_count_++;
1270
1271   MarkCompactPrologue();
1272
1273   mark_compact_collector_.CollectGarbage();
1274
1275   LOG(isolate_, ResourceEvent("markcompact", "end"));
1276
1277   MarkCompactEpilogue();
1278
1279   if (FLAG_allocation_site_pretenuring) {
1280     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1281   }
1282 }
1283
1284
1285 void Heap::MarkCompactEpilogue() {
1286   gc_state_ = NOT_IN_GC;
1287
1288   isolate_->counters()->objs_since_last_full()->Set(0);
1289
1290   incremental_marking()->Epilogue();
1291 }
1292
1293
1294 void Heap::MarkCompactPrologue() {
1295   // At any old GC clear the keyed lookup cache to enable collection of unused
1296   // maps.
1297   isolate_->keyed_lookup_cache()->Clear();
1298   isolate_->context_slot_cache()->Clear();
1299   isolate_->descriptor_lookup_cache()->Clear();
1300   RegExpResultsCache::Clear(string_split_cache());
1301   RegExpResultsCache::Clear(regexp_multiple_cache());
1302
1303   isolate_->compilation_cache()->MarkCompactPrologue();
1304
1305   CompletelyClearInstanceofCache();
1306
1307   FlushNumberStringCache();
1308   if (FLAG_cleanup_code_caches_at_gc) {
1309     polymorphic_code_cache()->set_cache(undefined_value());
1310   }
1311
1312   ClearNormalizedMapCaches();
1313 }
1314
1315
1316 // Helper class for copying HeapObjects
1317 class ScavengeVisitor : public ObjectVisitor {
1318  public:
1319   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1320
1321   void VisitPointer(Object** p) { ScavengePointer(p); }
1322
1323   void VisitPointers(Object** start, Object** end) {
1324     // Copy all HeapObject pointers in [start, end)
1325     for (Object** p = start; p < end; p++) ScavengePointer(p);
1326   }
1327
1328  private:
1329   void ScavengePointer(Object** p) {
1330     Object* object = *p;
1331     if (!heap_->InNewSpace(object)) return;
1332     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1333                          reinterpret_cast<HeapObject*>(object));
1334   }
1335
1336   Heap* heap_;
1337 };
1338
1339
1340 #ifdef VERIFY_HEAP
1341 // Visitor class to verify pointers in code or data space do not point into
1342 // new space.
1343 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1344  public:
1345   explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1346   void VisitPointers(Object** start, Object** end) {
1347     for (Object** current = start; current < end; current++) {
1348       if ((*current)->IsHeapObject()) {
1349         CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1350       }
1351     }
1352   }
1353
1354  private:
1355   Heap* heap_;
1356 };
1357
1358
1359 static void VerifyNonPointerSpacePointers(Heap* heap) {
1360   // Verify that there are no pointers to new space in spaces where we
1361   // do not expect them.
1362   VerifyNonPointerSpacePointersVisitor v(heap);
1363   HeapObjectIterator code_it(heap->code_space());
1364   for (HeapObject* object = code_it.Next(); object != NULL;
1365        object = code_it.Next())
1366     object->Iterate(&v);
1367
1368     HeapObjectIterator data_it(heap->old_data_space());
1369     for (HeapObject* object = data_it.Next(); object != NULL;
1370          object = data_it.Next())
1371       object->Iterate(&v);
1372 }
1373 #endif  // VERIFY_HEAP
1374
1375
1376 void Heap::CheckNewSpaceExpansionCriteria() {
1377   if (FLAG_experimental_new_space_growth_heuristic) {
1378     if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1379         survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
1380       // Grow the size of new space if there is room to grow, and more than 10%
1381       // have survived the last scavenge.
1382       new_space_.Grow();
1383       survived_since_last_expansion_ = 0;
1384     }
1385   } else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1386              survived_since_last_expansion_ > new_space_.TotalCapacity()) {
1387     // Grow the size of new space if there is room to grow, and enough data
1388     // has survived scavenge since the last expansion.
1389     new_space_.Grow();
1390     survived_since_last_expansion_ = 0;
1391   }
1392 }
1393
1394
1395 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1396   return heap->InNewSpace(*p) &&
1397          !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1398 }
1399
1400
1401 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1402                                        StoreBufferEvent event) {
1403   heap->store_buffer_rebuilder_.Callback(page, event);
1404 }
1405
1406
1407 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1408   if (event == kStoreBufferStartScanningPagesEvent) {
1409     start_of_current_page_ = NULL;
1410     current_page_ = NULL;
1411   } else if (event == kStoreBufferScanningPageEvent) {
1412     if (current_page_ != NULL) {
1413       // If this page already overflowed the store buffer during this iteration.
1414       if (current_page_->scan_on_scavenge()) {
1415         // Then we should wipe out the entries that have been added for it.
1416         store_buffer_->SetTop(start_of_current_page_);
1417       } else if (store_buffer_->Top() - start_of_current_page_ >=
1418                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1419         // Did we find too many pointers in the previous page?  The heuristic is
1420         // that no page can take more then 1/5 the remaining slots in the store
1421         // buffer.
1422         current_page_->set_scan_on_scavenge(true);
1423         store_buffer_->SetTop(start_of_current_page_);
1424       } else {
1425         // In this case the page we scanned took a reasonable number of slots in
1426         // the store buffer.  It has now been rehabilitated and is no longer
1427         // marked scan_on_scavenge.
1428         DCHECK(!current_page_->scan_on_scavenge());
1429       }
1430     }
1431     start_of_current_page_ = store_buffer_->Top();
1432     current_page_ = page;
1433   } else if (event == kStoreBufferFullEvent) {
1434     // The current page overflowed the store buffer again.  Wipe out its entries
1435     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1436     // several times while scanning.
1437     if (current_page_ == NULL) {
1438       // Store Buffer overflowed while scanning promoted objects.  These are not
1439       // in any particular page, though they are likely to be clustered by the
1440       // allocation routines.
1441       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1442     } else {
1443       // Store Buffer overflowed while scanning a particular old space page for
1444       // pointers to new space.
1445       DCHECK(current_page_ == page);
1446       DCHECK(page != NULL);
1447       current_page_->set_scan_on_scavenge(true);
1448       DCHECK(start_of_current_page_ != store_buffer_->Top());
1449       store_buffer_->SetTop(start_of_current_page_);
1450     }
1451   } else {
1452     UNREACHABLE();
1453   }
1454 }
1455
1456
1457 void PromotionQueue::Initialize() {
1458   // The last to-space page may be used for promotion queue. On promotion
1459   // conflict, we use the emergency stack.
1460   DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1461          0);
1462   front_ = rear_ =
1463       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1464   limit_ = reinterpret_cast<intptr_t*>(
1465       Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
1466   emergency_stack_ = NULL;
1467 }
1468
1469
1470 void PromotionQueue::RelocateQueueHead() {
1471   DCHECK(emergency_stack_ == NULL);
1472
1473   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1474   intptr_t* head_start = rear_;
1475   intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1476
1477   int entries_count =
1478       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1479
1480   emergency_stack_ = new List<Entry>(2 * entries_count);
1481
1482   while (head_start != head_end) {
1483     int size = static_cast<int>(*(head_start++));
1484     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1485     // New space allocation in SemiSpaceCopyObject marked the region
1486     // overlapping with promotion queue as uninitialized.
1487     MSAN_MEMORY_IS_INITIALIZED(&size, sizeof(size));
1488     MSAN_MEMORY_IS_INITIALIZED(&obj, sizeof(obj));
1489     emergency_stack_->Add(Entry(obj, size));
1490   }
1491   rear_ = head_end;
1492 }
1493
1494
1495 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1496  public:
1497   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1498
1499   virtual Object* RetainAs(Object* object) {
1500     if (!heap_->InFromSpace(object)) {
1501       return object;
1502     }
1503
1504     MapWord map_word = HeapObject::cast(object)->map_word();
1505     if (map_word.IsForwardingAddress()) {
1506       return map_word.ToForwardingAddress();
1507     }
1508     return NULL;
1509   }
1510
1511  private:
1512   Heap* heap_;
1513 };
1514
1515
1516 void Heap::Scavenge() {
1517   RelocationLock relocation_lock(this);
1518
1519 #ifdef VERIFY_HEAP
1520   if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1521 #endif
1522
1523   gc_state_ = SCAVENGE;
1524
1525   // Implements Cheney's copying algorithm
1526   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1527
1528   // Clear descriptor cache.
1529   isolate_->descriptor_lookup_cache()->Clear();
1530
1531   // Used for updating survived_since_last_expansion_ at function end.
1532   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1533
1534   SelectScavengingVisitorsTable();
1535
1536   incremental_marking()->PrepareForScavenge();
1537
1538   // Flip the semispaces.  After flipping, to space is empty, from space has
1539   // live objects.
1540   new_space_.Flip();
1541   new_space_.ResetAllocationInfo();
1542
1543   // We need to sweep newly copied objects which can be either in the
1544   // to space or promoted to the old generation.  For to-space
1545   // objects, we treat the bottom of the to space as a queue.  Newly
1546   // copied and unswept objects lie between a 'front' mark and the
1547   // allocation pointer.
1548   //
1549   // Promoted objects can go into various old-generation spaces, and
1550   // can be allocated internally in the spaces (from the free list).
1551   // We treat the top of the to space as a queue of addresses of
1552   // promoted objects.  The addresses of newly promoted and unswept
1553   // objects lie between a 'front' mark and a 'rear' mark that is
1554   // updated as a side effect of promoting an object.
1555   //
1556   // There is guaranteed to be enough room at the top of the to space
1557   // for the addresses of promoted objects: every object promoted
1558   // frees up its size in bytes from the top of the new space, and
1559   // objects are at least one pointer in size.
1560   Address new_space_front = new_space_.ToSpaceStart();
1561   promotion_queue_.Initialize();
1562
1563 #ifdef DEBUG
1564   store_buffer()->Clean();
1565 #endif
1566
1567   ScavengeVisitor scavenge_visitor(this);
1568   // Copy roots.
1569   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1570
1571   // Copy objects reachable from the old generation.
1572   {
1573     StoreBufferRebuildScope scope(this, store_buffer(),
1574                                   &ScavengeStoreBufferCallback);
1575     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1576   }
1577
1578   // Copy objects reachable from simple cells by scavenging cell values
1579   // directly.
1580   HeapObjectIterator cell_iterator(cell_space_);
1581   for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
1582        heap_object = cell_iterator.Next()) {
1583     if (heap_object->IsCell()) {
1584       Cell* cell = Cell::cast(heap_object);
1585       Address value_address = cell->ValueAddress();
1586       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1587     }
1588   }
1589
1590   // Copy objects reachable from global property cells by scavenging global
1591   // property cell values directly.
1592   HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1593   for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1594        heap_object != NULL;
1595        heap_object = js_global_property_cell_iterator.Next()) {
1596     if (heap_object->IsPropertyCell()) {
1597       PropertyCell* cell = PropertyCell::cast(heap_object);
1598       Address value_address = cell->ValueAddress();
1599       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1600       Address type_address = cell->TypeAddress();
1601       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1602     }
1603   }
1604
1605   // Copy objects reachable from the encountered weak collections list.
1606   scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1607   // Copy objects reachable from the encountered weak cells.
1608   scavenge_visitor.VisitPointer(&encountered_weak_cells_);
1609
1610   // Copy objects reachable from the code flushing candidates list.
1611   MarkCompactCollector* collector = mark_compact_collector();
1612   if (collector->is_code_flushing_enabled()) {
1613     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1614   }
1615
1616   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1617
1618   while (isolate()->global_handles()->IterateObjectGroups(
1619       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1620     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1621   }
1622   isolate()->global_handles()->RemoveObjectGroups();
1623   isolate()->global_handles()->RemoveImplicitRefGroups();
1624
1625   isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1626       &IsUnscavengedHeapObject);
1627
1628   isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
1629       &scavenge_visitor);
1630   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1631
1632   UpdateNewSpaceReferencesInExternalStringTable(
1633       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1634
1635   promotion_queue_.Destroy();
1636
1637   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1638
1639   ScavengeWeakObjectRetainer weak_object_retainer(this);
1640   ProcessYoungWeakReferences(&weak_object_retainer);
1641
1642   // Collects callback info for handles referenced by young generation that are
1643   // pending (about to be collected) and either phantom or internal-fields.
1644   // Releases the global handles.  See also PostGarbageCollectionProcessing.
1645   isolate()->global_handles()->CollectYoungPhantomCallbackData();
1646
1647   DCHECK(new_space_front == new_space_.top());
1648
1649   // Set age mark.
1650   new_space_.set_age_mark(new_space_.top());
1651
1652   new_space_.LowerInlineAllocationLimit(
1653       new_space_.inline_allocation_limit_step());
1654
1655   // Update how much has survived scavenge.
1656   IncrementYoungSurvivorsCounter(static_cast<int>(
1657       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1658
1659   LOG(isolate_, ResourceEvent("scavenge", "end"));
1660
1661   gc_state_ = NOT_IN_GC;
1662
1663   gc_idle_time_handler_.NotifyScavenge();
1664 }
1665
1666
1667 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1668                                                                 Object** p) {
1669   MapWord first_word = HeapObject::cast(*p)->map_word();
1670
1671   if (!first_word.IsForwardingAddress()) {
1672     // Unreachable external string can be finalized.
1673     heap->FinalizeExternalString(String::cast(*p));
1674     return NULL;
1675   }
1676
1677   // String is still reachable.
1678   return String::cast(first_word.ToForwardingAddress());
1679 }
1680
1681
1682 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1683     ExternalStringTableUpdaterCallback updater_func) {
1684 #ifdef VERIFY_HEAP
1685   if (FLAG_verify_heap) {
1686     external_string_table_.Verify();
1687   }
1688 #endif
1689
1690   if (external_string_table_.new_space_strings_.is_empty()) return;
1691
1692   Object** start = &external_string_table_.new_space_strings_[0];
1693   Object** end = start + external_string_table_.new_space_strings_.length();
1694   Object** last = start;
1695
1696   for (Object** p = start; p < end; ++p) {
1697     DCHECK(InFromSpace(*p));
1698     String* target = updater_func(this, p);
1699
1700     if (target == NULL) continue;
1701
1702     DCHECK(target->IsExternalString());
1703
1704     if (InNewSpace(target)) {
1705       // String is still in new space.  Update the table entry.
1706       *last = target;
1707       ++last;
1708     } else {
1709       // String got promoted.  Move it to the old string list.
1710       external_string_table_.AddOldString(target);
1711     }
1712   }
1713
1714   DCHECK(last <= end);
1715   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1716 }
1717
1718
1719 void Heap::UpdateReferencesInExternalStringTable(
1720     ExternalStringTableUpdaterCallback updater_func) {
1721   // Update old space string references.
1722   if (external_string_table_.old_space_strings_.length() > 0) {
1723     Object** start = &external_string_table_.old_space_strings_[0];
1724     Object** end = start + external_string_table_.old_space_strings_.length();
1725     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1726   }
1727
1728   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1729 }
1730
1731
1732 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
1733   ProcessArrayBuffers(retainer);
1734   ProcessNativeContexts(retainer);
1735   ProcessAllocationSites(retainer);
1736 }
1737
1738
1739 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
1740   ProcessArrayBuffers(retainer);
1741   ProcessNativeContexts(retainer);
1742 }
1743
1744
1745 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1746   Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1747   // Update the head of the list of contexts.
1748   set_native_contexts_list(head);
1749 }
1750
1751
1752 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1753   Object* array_buffer_obj =
1754       VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1755   set_array_buffers_list(array_buffer_obj);
1756 }
1757
1758
1759 void Heap::TearDownArrayBuffers() {
1760   Object* undefined = undefined_value();
1761   for (Object* o = array_buffers_list(); o != undefined;) {
1762     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1763     Runtime::FreeArrayBuffer(isolate(), buffer);
1764     o = buffer->weak_next();
1765   }
1766   set_array_buffers_list(undefined);
1767 }
1768
1769
1770 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1771   Object* allocation_site_obj =
1772       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1773   set_allocation_sites_list(allocation_site_obj);
1774 }
1775
1776
1777 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1778   DisallowHeapAllocation no_allocation_scope;
1779   Object* cur = allocation_sites_list();
1780   bool marked = false;
1781   while (cur->IsAllocationSite()) {
1782     AllocationSite* casted = AllocationSite::cast(cur);
1783     if (casted->GetPretenureMode() == flag) {
1784       casted->ResetPretenureDecision();
1785       casted->set_deopt_dependent_code(true);
1786       marked = true;
1787     }
1788     cur = casted->weak_next();
1789   }
1790   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1791 }
1792
1793
1794 void Heap::EvaluateOldSpaceLocalPretenuring(
1795     uint64_t size_of_objects_before_gc) {
1796   uint64_t size_of_objects_after_gc = SizeOfObjects();
1797   double old_generation_survival_rate =
1798       (static_cast<double>(size_of_objects_after_gc) * 100) /
1799       static_cast<double>(size_of_objects_before_gc);
1800
1801   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1802     // Too many objects died in the old generation, pretenuring of wrong
1803     // allocation sites may be the cause for that. We have to deopt all
1804     // dependent code registered in the allocation sites to re-evaluate
1805     // our pretenuring decisions.
1806     ResetAllAllocationSitesDependentCode(TENURED);
1807     if (FLAG_trace_pretenuring) {
1808       PrintF(
1809           "Deopt all allocation sites dependent code due to low survival "
1810           "rate in the old generation %f\n",
1811           old_generation_survival_rate);
1812     }
1813   }
1814 }
1815
1816
1817 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1818   DisallowHeapAllocation no_allocation;
1819   // All external strings are listed in the external string table.
1820
1821   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1822    public:
1823     explicit ExternalStringTableVisitorAdapter(
1824         v8::ExternalResourceVisitor* visitor)
1825         : visitor_(visitor) {}
1826     virtual void VisitPointers(Object** start, Object** end) {
1827       for (Object** p = start; p < end; p++) {
1828         DCHECK((*p)->IsExternalString());
1829         visitor_->VisitExternalString(
1830             Utils::ToLocal(Handle<String>(String::cast(*p))));
1831       }
1832     }
1833
1834    private:
1835     v8::ExternalResourceVisitor* visitor_;
1836   } external_string_table_visitor(visitor);
1837
1838   external_string_table_.Iterate(&external_string_table_visitor);
1839 }
1840
1841
1842 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1843  public:
1844   static inline void VisitPointer(Heap* heap, Object** p) {
1845     Object* object = *p;
1846     if (!heap->InNewSpace(object)) return;
1847     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1848                          reinterpret_cast<HeapObject*>(object));
1849   }
1850 };
1851
1852
1853 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1854                          Address new_space_front) {
1855   do {
1856     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1857     // The addresses new_space_front and new_space_.top() define a
1858     // queue of unprocessed copied objects.  Process them until the
1859     // queue is empty.
1860     while (new_space_front != new_space_.top()) {
1861       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1862         HeapObject* object = HeapObject::FromAddress(new_space_front);
1863         new_space_front +=
1864             NewSpaceScavenger::IterateBody(object->map(), object);
1865       } else {
1866         new_space_front =
1867             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1868       }
1869     }
1870
1871     // Promote and process all the to-be-promoted objects.
1872     {
1873       StoreBufferRebuildScope scope(this, store_buffer(),
1874                                     &ScavengeStoreBufferCallback);
1875       while (!promotion_queue()->is_empty()) {
1876         HeapObject* target;
1877         int size;
1878         promotion_queue()->remove(&target, &size);
1879
1880         // Promoted object might be already partially visited
1881         // during old space pointer iteration. Thus we search specifically
1882         // for pointers to from semispace instead of looking for pointers
1883         // to new space.
1884         DCHECK(!target->IsMap());
1885         Address obj_address = target->address();
1886 #if V8_DOUBLE_FIELDS_UNBOXING
1887         LayoutDescriptorHelper helper(target->map());
1888         bool has_only_tagged_fields = helper.all_fields_tagged();
1889
1890         if (!has_only_tagged_fields) {
1891           for (int offset = 0; offset < size;) {
1892             int end_of_region_offset;
1893             if (helper.IsTagged(offset, size, &end_of_region_offset)) {
1894               IterateAndMarkPointersToFromSpace(
1895                   obj_address + offset, obj_address + end_of_region_offset,
1896                   &ScavengeObject);
1897             }
1898             offset = end_of_region_offset;
1899           }
1900         } else {
1901 #endif
1902           IterateAndMarkPointersToFromSpace(obj_address, obj_address + size,
1903                                             &ScavengeObject);
1904 #if V8_DOUBLE_FIELDS_UNBOXING
1905         }
1906 #endif
1907       }
1908     }
1909
1910     // Take another spin if there are now unswept objects in new space
1911     // (there are currently no more unswept promoted objects).
1912   } while (new_space_front != new_space_.top());
1913
1914   return new_space_front;
1915 }
1916
1917
1918 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1919               0);  // NOLINT
1920 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
1921               0);  // NOLINT
1922 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1923                kDoubleAlignmentMask) == 0);  // NOLINT
1924
1925
1926 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1927                                               int size));
1928
1929 static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1930                                        int size) {
1931   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1932     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1933     return HeapObject::FromAddress(object->address() + kPointerSize);
1934   } else {
1935     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1936                                kPointerSize);
1937     return object;
1938   }
1939 }
1940
1941
1942 HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
1943   return EnsureDoubleAligned(this, object, size);
1944 }
1945
1946
1947 enum LoggingAndProfiling {
1948   LOGGING_AND_PROFILING_ENABLED,
1949   LOGGING_AND_PROFILING_DISABLED
1950 };
1951
1952
1953 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1954
1955
1956 template <MarksHandling marks_handling,
1957           LoggingAndProfiling logging_and_profiling_mode>
1958 class ScavengingVisitor : public StaticVisitorBase {
1959  public:
1960   static void Initialize() {
1961     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1962     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1963     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1964     table_.Register(kVisitByteArray, &EvacuateByteArray);
1965     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1966     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1967     table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1968     table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1969
1970     table_.Register(
1971         kVisitNativeContext,
1972         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1973             Context::kSize>);
1974
1975     table_.Register(
1976         kVisitConsString,
1977         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1978             ConsString::kSize>);
1979
1980     table_.Register(
1981         kVisitSlicedString,
1982         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1983             SlicedString::kSize>);
1984
1985     table_.Register(
1986         kVisitSymbol,
1987         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1988             Symbol::kSize>);
1989
1990     table_.Register(
1991         kVisitSharedFunctionInfo,
1992         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1993             SharedFunctionInfo::kSize>);
1994
1995     table_.Register(kVisitJSWeakCollection,
1996                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1997
1998     table_.Register(kVisitJSArrayBuffer,
1999                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2000
2001     table_.Register(kVisitJSTypedArray,
2002                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2003
2004     table_.Register(kVisitJSDataView,
2005                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2006
2007     table_.Register(kVisitJSRegExp,
2008                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2009
2010     if (marks_handling == IGNORE_MARKS) {
2011       table_.Register(
2012           kVisitJSFunction,
2013           &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2014               JSFunction::kSize>);
2015     } else {
2016       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2017     }
2018
2019     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2020                                    kVisitDataObject, kVisitDataObjectGeneric>();
2021
2022     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2023                                    kVisitJSObject, kVisitJSObjectGeneric>();
2024
2025     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2026                                    kVisitStruct, kVisitStructGeneric>();
2027   }
2028
2029   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2030     return &table_;
2031   }
2032
2033  private:
2034   enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
2035
2036   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2037     bool should_record = false;
2038 #ifdef DEBUG
2039     should_record = FLAG_heap_stats;
2040 #endif
2041     should_record = should_record || FLAG_log_gc;
2042     if (should_record) {
2043       if (heap->new_space()->Contains(obj)) {
2044         heap->new_space()->RecordAllocation(obj);
2045       } else {
2046         heap->new_space()->RecordPromotion(obj);
2047       }
2048     }
2049   }
2050
2051   // Helper function used by CopyObject to copy a source object to an
2052   // allocated target object and update the forwarding pointer in the source
2053   // object.  Returns the target object.
2054   INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
2055                                    HeapObject* target, int size)) {
2056     // If we migrate into to-space, then the to-space top pointer should be
2057     // right after the target object. Incorporate double alignment
2058     // over-allocation.
2059     DCHECK(!heap->InToSpace(target) ||
2060            target->address() + size == heap->new_space()->top() ||
2061            target->address() + size + kPointerSize == heap->new_space()->top());
2062
2063     // Make sure that we do not overwrite the promotion queue which is at
2064     // the end of to-space.
2065     DCHECK(!heap->InToSpace(target) ||
2066            heap->promotion_queue()->IsBelowPromotionQueue(
2067                heap->new_space()->top()));
2068
2069     // Copy the content of source to target.
2070     heap->CopyBlock(target->address(), source->address(), size);
2071
2072     // Set the forwarding address.
2073     source->set_map_word(MapWord::FromForwardingAddress(target));
2074
2075     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2076       // Update NewSpace stats if necessary.
2077       RecordCopiedObject(heap, target);
2078       heap->OnMoveEvent(target, source, size);
2079     }
2080
2081     if (marks_handling == TRANSFER_MARKS) {
2082       if (Marking::TransferColor(source, target)) {
2083         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2084       }
2085     }
2086   }
2087
2088   template <int alignment>
2089   static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
2090                                          HeapObject* object, int object_size) {
2091     Heap* heap = map->GetHeap();
2092
2093     int allocation_size = object_size;
2094     if (alignment != kObjectAlignment) {
2095       DCHECK(alignment == kDoubleAlignment);
2096       allocation_size += kPointerSize;
2097     }
2098
2099     DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
2100     AllocationResult allocation =
2101         heap->new_space()->AllocateRaw(allocation_size);
2102
2103     HeapObject* target = NULL;  // Initialization to please compiler.
2104     if (allocation.To(&target)) {
2105       // Order is important here: Set the promotion limit before storing a
2106       // filler for double alignment or migrating the object. Otherwise we
2107       // may end up overwriting promotion queue entries when we migrate the
2108       // object.
2109       heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2110
2111       if (alignment != kObjectAlignment) {
2112         target = EnsureDoubleAligned(heap, target, allocation_size);
2113       }
2114
2115       // Order is important: slot might be inside of the target if target
2116       // was allocated over a dead object and slot comes from the store
2117       // buffer.
2118       *slot = target;
2119       MigrateObject(heap, object, target, object_size);
2120
2121       heap->IncrementSemiSpaceCopiedObjectSize(object_size);
2122       return true;
2123     }
2124     return false;
2125   }
2126
2127
2128   template <ObjectContents object_contents, int alignment>
2129   static inline bool PromoteObject(Map* map, HeapObject** slot,
2130                                    HeapObject* object, int object_size) {
2131     Heap* heap = map->GetHeap();
2132
2133     int allocation_size = object_size;
2134     if (alignment != kObjectAlignment) {
2135       DCHECK(alignment == kDoubleAlignment);
2136       allocation_size += kPointerSize;
2137     }
2138
2139     AllocationResult allocation;
2140     if (object_contents == DATA_OBJECT) {
2141       DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2142       allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2143     } else {
2144       DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2145       allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2146     }
2147
2148     HeapObject* target = NULL;  // Initialization to please compiler.
2149     if (allocation.To(&target)) {
2150       if (alignment != kObjectAlignment) {
2151         target = EnsureDoubleAligned(heap, target, allocation_size);
2152       }
2153
2154       // Order is important: slot might be inside of the target if target
2155       // was allocated over a dead object and slot comes from the store
2156       // buffer.
2157
2158       // Unfortunately, the allocation can also write over the slot if the slot
2159       // was in free space and the allocation wrote free list data (such as the
2160       // free list map or entry size) over the slot.  We guard against this by
2161       // checking that the slot still points to the object being moved.  This
2162       // should be sufficient because neither the free list map nor the free
2163       // list entry size should look like a new space pointer (the former is an
2164       // old space pointer, the latter is word-aligned).
2165       if (*slot == object) {
2166         *slot = target;
2167       }
2168       MigrateObject(heap, object, target, object_size);
2169
2170       if (object_contents == POINTER_OBJECT) {
2171         if (map->instance_type() == JS_FUNCTION_TYPE) {
2172           heap->promotion_queue()->insert(target,
2173                                           JSFunction::kNonWeakFieldsEndOffset);
2174         } else {
2175           heap->promotion_queue()->insert(target, object_size);
2176         }
2177       }
2178       heap->IncrementPromotedObjectsSize(object_size);
2179       return true;
2180     }
2181     return false;
2182   }
2183
2184
2185   template <ObjectContents object_contents, int alignment>
2186   static inline void EvacuateObject(Map* map, HeapObject** slot,
2187                                     HeapObject* object, int object_size) {
2188     SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2189     SLOW_DCHECK(object->Size() == object_size);
2190     Heap* heap = map->GetHeap();
2191
2192     if (!heap->ShouldBePromoted(object->address(), object_size)) {
2193       // A semi-space copy may fail due to fragmentation. In that case, we
2194       // try to promote the object.
2195       if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2196         return;
2197       }
2198     }
2199
2200     if (PromoteObject<object_contents, alignment>(map, slot, object,
2201                                                   object_size)) {
2202       return;
2203     }
2204
2205     // If promotion failed, we try to copy the object to the other semi-space
2206     if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
2207
2208     UNREACHABLE();
2209   }
2210
2211
2212   static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2213                                         HeapObject* object) {
2214     ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2215         JSFunction::kSize>(map, slot, object);
2216
2217     MapWord map_word = object->map_word();
2218     DCHECK(map_word.IsForwardingAddress());
2219     HeapObject* target = map_word.ToForwardingAddress();
2220
2221     MarkBit mark_bit = Marking::MarkBitFrom(target);
2222     if (Marking::IsBlack(mark_bit)) {
2223       // This object is black and it might not be rescanned by marker.
2224       // We should explicitly record code entry slot for compaction because
2225       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2226       // miss it as it is not HeapObject-tagged.
2227       Address code_entry_slot =
2228           target->address() + JSFunction::kCodeEntryOffset;
2229       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2230       map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2231           code_entry_slot, code);
2232     }
2233   }
2234
2235
2236   static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2237                                         HeapObject* object) {
2238     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2239     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2240                                                      object_size);
2241   }
2242
2243
2244   static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2245                                               HeapObject* object) {
2246     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2247     int object_size = FixedDoubleArray::SizeFor(length);
2248     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2249                                                   object_size);
2250   }
2251
2252
2253   static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2254                                              HeapObject* object) {
2255     int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2256     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2257                                                   object_size);
2258   }
2259
2260
2261   static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2262                                                HeapObject* object) {
2263     int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2264     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2265                                                   object_size);
2266   }
2267
2268
2269   static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2270                                        HeapObject* object) {
2271     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2272     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2273                                                   object_size);
2274   }
2275
2276
2277   static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
2278                                               HeapObject* object) {
2279     int object_size = SeqOneByteString::cast(object)
2280                           ->SeqOneByteStringSize(map->instance_type());
2281     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2282                                                   object_size);
2283   }
2284
2285
2286   static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2287                                               HeapObject* object) {
2288     int object_size = SeqTwoByteString::cast(object)
2289                           ->SeqTwoByteStringSize(map->instance_type());
2290     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2291                                                   object_size);
2292   }
2293
2294
2295   static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2296                                                HeapObject* object) {
2297     DCHECK(IsShortcutCandidate(map->instance_type()));
2298
2299     Heap* heap = map->GetHeap();
2300
2301     if (marks_handling == IGNORE_MARKS &&
2302         ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2303       HeapObject* first =
2304           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2305
2306       *slot = first;
2307
2308       if (!heap->InNewSpace(first)) {
2309         object->set_map_word(MapWord::FromForwardingAddress(first));
2310         return;
2311       }
2312
2313       MapWord first_word = first->map_word();
2314       if (first_word.IsForwardingAddress()) {
2315         HeapObject* target = first_word.ToForwardingAddress();
2316
2317         *slot = target;
2318         object->set_map_word(MapWord::FromForwardingAddress(target));
2319         return;
2320       }
2321
2322       heap->DoScavengeObject(first->map(), slot, first);
2323       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2324       return;
2325     }
2326
2327     int object_size = ConsString::kSize;
2328     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2329                                                      object_size);
2330   }
2331
2332   template <ObjectContents object_contents>
2333   class ObjectEvacuationStrategy {
2334    public:
2335     template <int object_size>
2336     static inline void VisitSpecialized(Map* map, HeapObject** slot,
2337                                         HeapObject* object) {
2338       EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2339                                                         object_size);
2340     }
2341
2342     static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2343       int object_size = map->instance_size();
2344       EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2345                                                         object_size);
2346     }
2347   };
2348
2349   static VisitorDispatchTable<ScavengingCallback> table_;
2350 };
2351
2352
2353 template <MarksHandling marks_handling,
2354           LoggingAndProfiling logging_and_profiling_mode>
2355 VisitorDispatchTable<ScavengingCallback>
2356     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2357
2358
2359 static void InitializeScavengingVisitorsTables() {
2360   ScavengingVisitor<TRANSFER_MARKS,
2361                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2362   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2363   ScavengingVisitor<TRANSFER_MARKS,
2364                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2365   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2366 }
2367
2368
2369 void Heap::SelectScavengingVisitorsTable() {
2370   bool logging_and_profiling =
2371       FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2372       isolate()->cpu_profiler()->is_profiling() ||
2373       (isolate()->heap_profiler() != NULL &&
2374        isolate()->heap_profiler()->is_tracking_object_moves());
2375
2376   if (!incremental_marking()->IsMarking()) {
2377     if (!logging_and_profiling) {
2378       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2379           IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2380     } else {
2381       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2382           IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2383     }
2384   } else {
2385     if (!logging_and_profiling) {
2386       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2387           TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2388     } else {
2389       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2390           TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2391     }
2392
2393     if (incremental_marking()->IsCompacting()) {
2394       // When compacting forbid short-circuiting of cons-strings.
2395       // Scavenging code relies on the fact that new space object
2396       // can't be evacuated into evacuation candidate but
2397       // short-circuiting violates this assumption.
2398       scavenging_visitors_table_.Register(
2399           StaticVisitorBase::kVisitShortcutCandidate,
2400           scavenging_visitors_table_.GetVisitorById(
2401               StaticVisitorBase::kVisitConsString));
2402     }
2403   }
2404 }
2405
2406
2407 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2408   SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2409   MapWord first_word = object->map_word();
2410   SLOW_DCHECK(!first_word.IsForwardingAddress());
2411   Map* map = first_word.ToMap();
2412   map->GetHeap()->DoScavengeObject(map, p, object);
2413 }
2414
2415
2416 void Heap::ConfigureInitialOldGenerationSize() {
2417   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2418     old_generation_allocation_limit_ =
2419         Max(kMinimumOldGenerationAllocationLimit,
2420             static_cast<intptr_t>(
2421                 static_cast<double>(old_generation_allocation_limit_) *
2422                 (tracer()->AverageSurvivalRatio() / 100)));
2423   }
2424 }
2425
2426
2427 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2428                                           int instance_size) {
2429   Object* result;
2430   AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2431   if (!allocation.To(&result)) return allocation;
2432
2433   // Map::cast cannot be used due to uninitialized map field.
2434   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2435   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2436   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2437   // Initialize to only containing tagged fields.
2438   reinterpret_cast<Map*>(result)->set_visitor_id(
2439       StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
2440   if (FLAG_unbox_double_fields) {
2441     reinterpret_cast<Map*>(result)
2442         ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2443   }
2444   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2445   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2446   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2447   reinterpret_cast<Map*>(result)->set_bit_field(0);
2448   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2449   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2450                    Map::OwnsDescriptors::encode(true) |
2451                    Map::Counter::encode(Map::kRetainingCounterStart);
2452   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2453   return result;
2454 }
2455
2456
2457 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2458                                    int instance_size,
2459                                    ElementsKind elements_kind) {
2460   HeapObject* result;
2461   AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2462   if (!allocation.To(&result)) return allocation;
2463
2464   result->set_map_no_write_barrier(meta_map());
2465   Map* map = Map::cast(result);
2466   map->set_instance_type(instance_type);
2467   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2468   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2469   map->set_instance_size(instance_size);
2470   map->set_inobject_properties(0);
2471   map->set_pre_allocated_property_fields(0);
2472   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2473   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2474                           SKIP_WRITE_BARRIER);
2475   map->init_back_pointer(undefined_value());
2476   map->set_unused_property_fields(0);
2477   map->set_instance_descriptors(empty_descriptor_array());
2478   if (FLAG_unbox_double_fields) {
2479     map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2480   }
2481   // Must be called only after |instance_type|, |instance_size| and
2482   // |layout_descriptor| are set.
2483   map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
2484   map->set_bit_field(0);
2485   map->set_bit_field2(1 << Map::kIsExtensible);
2486   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2487                    Map::OwnsDescriptors::encode(true) |
2488                    Map::Counter::encode(Map::kRetainingCounterStart);
2489   map->set_bit_field3(bit_field3);
2490   map->set_elements_kind(elements_kind);
2491
2492   return map;
2493 }
2494
2495
2496 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2497                                             AllocationSpace space) {
2498   HeapObject* obj;
2499   {
2500     AllocationResult allocation = AllocateRaw(size, space, space);
2501     if (!allocation.To(&obj)) return allocation;
2502   }
2503 #ifdef DEBUG
2504   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2505   DCHECK(chunk->owner()->identity() == space);
2506 #endif
2507   CreateFillerObjectAt(obj->address(), size);
2508   return obj;
2509 }
2510
2511
2512 const Heap::StringTypeTable Heap::string_type_table[] = {
2513 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2514   { type, size, k##camel_name##MapRootIndex }             \
2515   ,
2516     STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2517 #undef STRING_TYPE_ELEMENT
2518 };
2519
2520
2521 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2522 #define CONSTANT_STRING_ELEMENT(name, contents) \
2523   { contents, k##name##RootIndex }              \
2524   ,
2525     INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2526 #undef CONSTANT_STRING_ELEMENT
2527 };
2528
2529
2530 const Heap::StructTable Heap::struct_table[] = {
2531 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)        \
2532   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2533   ,
2534     STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2535 #undef STRUCT_TABLE_ELEMENT
2536 };
2537
2538
2539 bool Heap::CreateInitialMaps() {
2540   HeapObject* obj;
2541   {
2542     AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2543     if (!allocation.To(&obj)) return false;
2544   }
2545   // Map::cast cannot be used due to uninitialized map field.
2546   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2547   set_meta_map(new_meta_map);
2548   new_meta_map->set_map(new_meta_map);
2549
2550   {  // Partial map allocation
2551 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)                \
2552   {                                                                          \
2553     Map* map;                                                                \
2554     if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2555     set_##field_name##_map(map);                                             \
2556   }
2557
2558     ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2559     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2560     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2561     ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2562                          constant_pool_array);
2563
2564 #undef ALLOCATE_PARTIAL_MAP
2565   }
2566
2567   // Allocate the empty array.
2568   {
2569     AllocationResult allocation = AllocateEmptyFixedArray();
2570     if (!allocation.To(&obj)) return false;
2571   }
2572   set_empty_fixed_array(FixedArray::cast(obj));
2573
2574   {
2575     AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2576     if (!allocation.To(&obj)) return false;
2577   }
2578   set_null_value(Oddball::cast(obj));
2579   Oddball::cast(obj)->set_kind(Oddball::kNull);
2580
2581   {
2582     AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2583     if (!allocation.To(&obj)) return false;
2584   }
2585   set_undefined_value(Oddball::cast(obj));
2586   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2587   DCHECK(!InNewSpace(undefined_value()));
2588
2589   // Set preliminary exception sentinel value before actually initializing it.
2590   set_exception(null_value());
2591
2592   // Allocate the empty descriptor array.
2593   {
2594     AllocationResult allocation = AllocateEmptyFixedArray();
2595     if (!allocation.To(&obj)) return false;
2596   }
2597   set_empty_descriptor_array(DescriptorArray::cast(obj));
2598
2599   // Allocate the constant pool array.
2600   {
2601     AllocationResult allocation = AllocateEmptyConstantPoolArray();
2602     if (!allocation.To(&obj)) return false;
2603   }
2604   set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2605
2606   // Fix the instance_descriptors for the existing maps.
2607   meta_map()->set_code_cache(empty_fixed_array());
2608   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2609   meta_map()->init_back_pointer(undefined_value());
2610   meta_map()->set_instance_descriptors(empty_descriptor_array());
2611   if (FLAG_unbox_double_fields) {
2612     meta_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2613   }
2614
2615   fixed_array_map()->set_code_cache(empty_fixed_array());
2616   fixed_array_map()->set_dependent_code(
2617       DependentCode::cast(empty_fixed_array()));
2618   fixed_array_map()->init_back_pointer(undefined_value());
2619   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2620   if (FLAG_unbox_double_fields) {
2621     fixed_array_map()->set_layout_descriptor(
2622         LayoutDescriptor::FastPointerLayout());
2623   }
2624
2625   undefined_map()->set_code_cache(empty_fixed_array());
2626   undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2627   undefined_map()->init_back_pointer(undefined_value());
2628   undefined_map()->set_instance_descriptors(empty_descriptor_array());
2629   if (FLAG_unbox_double_fields) {
2630     undefined_map()->set_layout_descriptor(
2631         LayoutDescriptor::FastPointerLayout());
2632   }
2633
2634   null_map()->set_code_cache(empty_fixed_array());
2635   null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2636   null_map()->init_back_pointer(undefined_value());
2637   null_map()->set_instance_descriptors(empty_descriptor_array());
2638   if (FLAG_unbox_double_fields) {
2639     null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2640   }
2641
2642   constant_pool_array_map()->set_code_cache(empty_fixed_array());
2643   constant_pool_array_map()->set_dependent_code(
2644       DependentCode::cast(empty_fixed_array()));
2645   constant_pool_array_map()->init_back_pointer(undefined_value());
2646   constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2647   if (FLAG_unbox_double_fields) {
2648     constant_pool_array_map()->set_layout_descriptor(
2649         LayoutDescriptor::FastPointerLayout());
2650   }
2651
2652   // Fix prototype object for existing maps.
2653   meta_map()->set_prototype(null_value());
2654   meta_map()->set_constructor(null_value());
2655
2656   fixed_array_map()->set_prototype(null_value());
2657   fixed_array_map()->set_constructor(null_value());
2658
2659   undefined_map()->set_prototype(null_value());
2660   undefined_map()->set_constructor(null_value());
2661
2662   null_map()->set_prototype(null_value());
2663   null_map()->set_constructor(null_value());
2664
2665   constant_pool_array_map()->set_prototype(null_value());
2666   constant_pool_array_map()->set_constructor(null_value());
2667
2668   {  // Map allocation
2669 #define ALLOCATE_MAP(instance_type, size, field_name)               \
2670   {                                                                 \
2671     Map* map;                                                       \
2672     if (!AllocateMap((instance_type), size).To(&map)) return false; \
2673     set_##field_name##_map(map);                                    \
2674   }
2675
2676 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2677   ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2678
2679     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2680     DCHECK(fixed_array_map() != fixed_cow_array_map());
2681
2682     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2683     ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2684     ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2685                  mutable_heap_number)
2686     ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2687     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2688
2689     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2690     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2691     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2692     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2693     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2694     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2695     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2696
2697     for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2698       const StringTypeTable& entry = string_type_table[i];
2699       {
2700         AllocationResult allocation = AllocateMap(entry.type, entry.size);
2701         if (!allocation.To(&obj)) return false;
2702       }
2703       // Mark cons string maps as unstable, because their objects can change
2704       // maps during GC.
2705       Map* map = Map::cast(obj);
2706       if (StringShape(entry.type).IsCons()) map->mark_unstable();
2707       roots_[entry.index] = map;
2708     }
2709
2710     {  // Create a separate external one byte string map for native sources.
2711       AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
2712                                                 ExternalOneByteString::kSize);
2713       if (!allocation.To(&obj)) return false;
2714       set_native_source_string_map(Map::cast(obj));
2715     }
2716
2717     ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2718     ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2719     ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2720
2721 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)        \
2722   ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2723                external_##type##_array)
2724
2725     TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2726 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2727
2728 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2729   ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2730
2731     TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2732 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2733
2734     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2735
2736     ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2737
2738     ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2739     ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2740     ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
2741     ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2742     ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2743
2744
2745     for (unsigned i = 0; i < arraysize(struct_table); i++) {
2746       const StructTable& entry = struct_table[i];
2747       Map* map;
2748       if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2749       roots_[entry.index] = map;
2750     }
2751
2752     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2753     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2754
2755     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2756     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2757     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2758     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2759     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2760     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
2761     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
2762
2763     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2764     native_context_map()->set_dictionary_map(true);
2765     native_context_map()->set_visitor_id(
2766         StaticVisitorBase::kVisitNativeContext);
2767
2768     ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2769                  shared_function_info)
2770
2771     ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2772     ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2773     external_map()->set_is_extensible(false);
2774 #undef ALLOCATE_VARSIZE_MAP
2775 #undef ALLOCATE_MAP
2776   }
2777
2778   {  // Empty arrays
2779     {
2780       ByteArray* byte_array;
2781       if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2782       set_empty_byte_array(byte_array);
2783     }
2784
2785 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)  \
2786   {                                                                   \
2787     ExternalArray* obj;                                               \
2788     if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2789       return false;                                                   \
2790     set_empty_external_##type##_array(obj);                           \
2791   }
2792
2793     TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2794 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2795
2796 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2797   {                                                                     \
2798     FixedTypedArrayBase* obj;                                           \
2799     if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2800       return false;                                                     \
2801     set_empty_fixed_##type##_array(obj);                                \
2802   }
2803
2804     TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2805 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2806   }
2807   DCHECK(!InNewSpace(empty_fixed_array()));
2808   return true;
2809 }
2810
2811
2812 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2813                                           PretenureFlag pretenure) {
2814   // Statically ensure that it is safe to allocate heap numbers in paged
2815   // spaces.
2816   int size = HeapNumber::kSize;
2817   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2818
2819   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2820
2821   HeapObject* result;
2822   {
2823     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2824     if (!allocation.To(&result)) return allocation;
2825   }
2826
2827   Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2828   HeapObject::cast(result)->set_map_no_write_barrier(map);
2829   HeapNumber::cast(result)->set_value(value);
2830   return result;
2831 }
2832
2833
2834 AllocationResult Heap::AllocateCell(Object* value) {
2835   int size = Cell::kSize;
2836   STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2837
2838   HeapObject* result;
2839   {
2840     AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2841     if (!allocation.To(&result)) return allocation;
2842   }
2843   result->set_map_no_write_barrier(cell_map());
2844   Cell::cast(result)->set_value(value);
2845   return result;
2846 }
2847
2848
2849 AllocationResult Heap::AllocatePropertyCell() {
2850   int size = PropertyCell::kSize;
2851   STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2852
2853   HeapObject* result;
2854   AllocationResult allocation =
2855       AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2856   if (!allocation.To(&result)) return allocation;
2857
2858   result->set_map_no_write_barrier(global_property_cell_map());
2859   PropertyCell* cell = PropertyCell::cast(result);
2860   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2861                            SKIP_WRITE_BARRIER);
2862   cell->set_value(the_hole_value());
2863   cell->set_type(HeapType::None());
2864   return result;
2865 }
2866
2867
2868 AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
2869   int size = WeakCell::kSize;
2870   STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
2871   HeapObject* result;
2872   {
2873     AllocationResult allocation =
2874         AllocateRaw(size, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
2875     if (!allocation.To(&result)) return allocation;
2876   }
2877   result->set_map_no_write_barrier(weak_cell_map());
2878   WeakCell::cast(result)->initialize(value);
2879   WeakCell::cast(result)->set_next(undefined_value(), SKIP_WRITE_BARRIER);
2880   return result;
2881 }
2882
2883
2884 void Heap::CreateApiObjects() {
2885   HandleScope scope(isolate());
2886   Factory* factory = isolate()->factory();
2887   Handle<Map> new_neander_map =
2888       factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2889
2890   // Don't use Smi-only elements optimizations for objects with the neander
2891   // map. There are too many cases where element values are set directly with a
2892   // bottleneck to trap the Smi-only -> fast elements transition, and there
2893   // appears to be no benefit for optimize this case.
2894   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2895   set_neander_map(*new_neander_map);
2896
2897   Handle<JSObject> listeners = factory->NewNeanderObject();
2898   Handle<FixedArray> elements = factory->NewFixedArray(2);
2899   elements->set(0, Smi::FromInt(0));
2900   listeners->set_elements(*elements);
2901   set_message_listeners(*listeners);
2902 }
2903
2904
2905 void Heap::CreateJSEntryStub() {
2906   JSEntryStub stub(isolate(), StackFrame::ENTRY);
2907   set_js_entry_code(*stub.GetCode());
2908 }
2909
2910
2911 void Heap::CreateJSConstructEntryStub() {
2912   JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2913   set_js_construct_entry_code(*stub.GetCode());
2914 }
2915
2916
2917 void Heap::CreateFixedStubs() {
2918   // Here we create roots for fixed stubs. They are needed at GC
2919   // for cooking and uncooking (check out frames.cc).
2920   // The eliminates the need for doing dictionary lookup in the
2921   // stub cache for these stubs.
2922   HandleScope scope(isolate());
2923
2924   // Create stubs that should be there, so we don't unexpectedly have to
2925   // create them if we need them during the creation of another stub.
2926   // Stub creation mixes raw pointers and handles in an unsafe manner so
2927   // we cannot create stubs while we are creating stubs.
2928   CodeStub::GenerateStubsAheadOfTime(isolate());
2929
2930   // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2931   // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2932   // is created.
2933
2934   // gcc-4.4 has problem generating correct code of following snippet:
2935   // {  JSEntryStub stub;
2936   //    js_entry_code_ = *stub.GetCode();
2937   // }
2938   // {  JSConstructEntryStub stub;
2939   //    js_construct_entry_code_ = *stub.GetCode();
2940   // }
2941   // To workaround the problem, make separate functions without inlining.
2942   Heap::CreateJSEntryStub();
2943   Heap::CreateJSConstructEntryStub();
2944 }
2945
2946
2947 void Heap::CreateInitialObjects() {
2948   HandleScope scope(isolate());
2949   Factory* factory = isolate()->factory();
2950
2951   // The -0 value must be set before NewNumber works.
2952   set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2953   DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2954
2955   set_nan_value(*factory->NewHeapNumber(
2956       std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
2957   set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2958
2959   // The hole has not been created yet, but we want to put something
2960   // predictable in the gaps in the string table, so lets make that Smi zero.
2961   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2962
2963   // Allocate initial string table.
2964   set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2965
2966   // Finish initializing oddballs after creating the string table.
2967   Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2968                       factory->nan_value(), Oddball::kUndefined);
2969
2970   // Initialize the null_value.
2971   Oddball::Initialize(isolate(), factory->null_value(), "null",
2972                       handle(Smi::FromInt(0), isolate()), Oddball::kNull);
2973
2974   set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
2975                                       handle(Smi::FromInt(1), isolate()),
2976                                       Oddball::kTrue));
2977
2978   set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
2979                                        handle(Smi::FromInt(0), isolate()),
2980                                        Oddball::kFalse));
2981
2982   set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
2983                                           handle(Smi::FromInt(-1), isolate()),
2984                                           Oddball::kTheHole));
2985
2986   set_uninitialized_value(*factory->NewOddball(
2987       factory->uninitialized_map(), "uninitialized",
2988       handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
2989
2990   set_arguments_marker(*factory->NewOddball(
2991       factory->arguments_marker_map(), "arguments_marker",
2992       handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
2993
2994   set_no_interceptor_result_sentinel(*factory->NewOddball(
2995       factory->no_interceptor_result_sentinel_map(),
2996       "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2997       Oddball::kOther));
2998
2999   set_termination_exception(*factory->NewOddball(
3000       factory->termination_exception_map(), "termination_exception",
3001       handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
3002
3003   set_exception(*factory->NewOddball(factory->exception_map(), "exception",
3004                                      handle(Smi::FromInt(-5), isolate()),
3005                                      Oddball::kException));
3006
3007   for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
3008     Handle<String> str =
3009         factory->InternalizeUtf8String(constant_string_table[i].contents);
3010     roots_[constant_string_table[i].index] = *str;
3011   }
3012
3013   // Allocate the hidden string which is used to identify the hidden properties
3014   // in JSObjects. The hash code has a special value so that it will not match
3015   // the empty string when searching for the property. It cannot be part of the
3016   // loop above because it needs to be allocated manually with the special
3017   // hash code in place. The hash code for the hidden_string is zero to ensure
3018   // that it will always be at the first entry in property descriptors.
3019   hidden_string_ = *factory->NewOneByteInternalizedString(
3020       OneByteVector("", 0), String::kEmptyStringHash);
3021
3022   // Create the code_stubs dictionary. The initial size is set to avoid
3023   // expanding the dictionary during bootstrapping.
3024   set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
3025
3026   // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
3027   // is set to avoid expanding the dictionary during bootstrapping.
3028   set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
3029
3030   set_polymorphic_code_cache(PolymorphicCodeCache::cast(
3031       *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
3032
3033   set_instanceof_cache_function(Smi::FromInt(0));
3034   set_instanceof_cache_map(Smi::FromInt(0));
3035   set_instanceof_cache_answer(Smi::FromInt(0));
3036
3037   {
3038     HandleScope scope(isolate());
3039 #define SYMBOL_INIT(name)                               \
3040   Handle<Symbol> name = factory->NewPrivateOwnSymbol(); \
3041   roots_[k##name##RootIndex] = *name;
3042     PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
3043 #undef SYMBOL_INIT
3044   }
3045
3046   {
3047     HandleScope scope(isolate());
3048 #define SYMBOL_INIT(name, varname, description)                             \
3049   Handle<Symbol> name = factory->NewSymbol();                               \
3050   Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
3051   name->set_name(*name##d);                                                 \
3052   roots_[k##name##RootIndex] = *name;
3053     PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
3054 #undef SYMBOL_INIT
3055   }
3056
3057   CreateFixedStubs();
3058
3059   // Allocate the dictionary of intrinsic function names.
3060   Handle<NameDictionary> intrinsic_names =
3061       NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
3062   Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
3063   set_intrinsic_function_names(*intrinsic_names);
3064
3065   set_number_string_cache(
3066       *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
3067
3068   // Allocate cache for single character one byte strings.
3069   set_single_character_string_cache(
3070       *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
3071
3072   // Allocate cache for string split and regexp-multiple.
3073   set_string_split_cache(*factory->NewFixedArray(
3074       RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
3075   set_regexp_multiple_cache(*factory->NewFixedArray(
3076       RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
3077
3078   // Allocate cache for external strings pointing to native source code.
3079   set_natives_source_cache(
3080       *factory->NewFixedArray(Natives::GetBuiltinsCount()));
3081
3082   set_undefined_cell(*factory->NewCell(factory->undefined_value()));
3083
3084   // The symbol registry is initialized lazily.
3085   set_symbol_registry(Smi::FromInt(0));
3086
3087   // Allocate object to hold object observation state.
3088   set_observation_state(*factory->NewJSObjectFromMap(
3089       factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
3090
3091   // Microtask queue uses the empty fixed array as a sentinel for "empty".
3092   // Number of queued microtasks stored in Isolate::pending_microtask_count().
3093   set_microtask_queue(empty_fixed_array());
3094
3095   if (FLAG_vector_ics) {
3096     FeedbackVectorSpec spec(0, 1);
3097     spec.SetKind(0, Code::KEYED_LOAD_IC);
3098     Handle<TypeFeedbackVector> dummy_vector =
3099         factory->NewTypeFeedbackVector(spec);
3100     dummy_vector->Set(FeedbackVectorICSlot(0),
3101                       *TypeFeedbackVector::MegamorphicSentinel(isolate()),
3102                       SKIP_WRITE_BARRIER);
3103     set_keyed_load_dummy_vector(*dummy_vector);
3104   } else {
3105     set_keyed_load_dummy_vector(empty_fixed_array());
3106   }
3107
3108   set_detached_contexts(empty_fixed_array());
3109
3110   set_weak_object_to_code_table(
3111       *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
3112                           TENURED));
3113
3114   Handle<SeededNumberDictionary> slow_element_dictionary =
3115       SeededNumberDictionary::New(isolate(), 0, TENURED);
3116   slow_element_dictionary->set_requires_slow_elements();
3117   set_empty_slow_element_dictionary(*slow_element_dictionary);
3118
3119   set_materialized_objects(*factory->NewFixedArray(0, TENURED));
3120
3121   // Handling of script id generation is in Factory::NewScript.
3122   set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
3123
3124   set_allocation_sites_scratchpad(
3125       *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
3126   InitializeAllocationSitesScratchpad();
3127
3128   // Initialize keyed lookup cache.
3129   isolate_->keyed_lookup_cache()->Clear();
3130
3131   // Initialize context slot cache.
3132   isolate_->context_slot_cache()->Clear();
3133
3134   // Initialize descriptor cache.
3135   isolate_->descriptor_lookup_cache()->Clear();
3136
3137   // Initialize compilation cache.
3138   isolate_->compilation_cache()->Clear();
3139 }
3140
3141
3142 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3143   RootListIndex writable_roots[] = {
3144       kStoreBufferTopRootIndex,
3145       kStackLimitRootIndex,
3146       kNumberStringCacheRootIndex,
3147       kInstanceofCacheFunctionRootIndex,
3148       kInstanceofCacheMapRootIndex,
3149       kInstanceofCacheAnswerRootIndex,
3150       kCodeStubsRootIndex,
3151       kNonMonomorphicCacheRootIndex,
3152       kPolymorphicCodeCacheRootIndex,
3153       kLastScriptIdRootIndex,
3154       kEmptyScriptRootIndex,
3155       kRealStackLimitRootIndex,
3156       kArgumentsAdaptorDeoptPCOffsetRootIndex,
3157       kConstructStubDeoptPCOffsetRootIndex,
3158       kGetterStubDeoptPCOffsetRootIndex,
3159       kSetterStubDeoptPCOffsetRootIndex,
3160       kStringTableRootIndex,
3161   };
3162
3163   for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
3164     if (root_index == writable_roots[i]) return true;
3165   }
3166   return false;
3167 }
3168
3169
3170 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
3171   return !RootCanBeWrittenAfterInitialization(root_index) &&
3172          !InNewSpace(roots_array_start()[root_index]);
3173 }
3174
3175
3176 Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
3177                                    Object* key_pattern, ResultsCacheType type) {
3178   FixedArray* cache;
3179   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3180   if (type == STRING_SPLIT_SUBSTRINGS) {
3181     DCHECK(key_pattern->IsString());
3182     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3183     cache = heap->string_split_cache();
3184   } else {
3185     DCHECK(type == REGEXP_MULTIPLE_INDICES);
3186     DCHECK(key_pattern->IsFixedArray());
3187     cache = heap->regexp_multiple_cache();
3188   }
3189
3190   uint32_t hash = key_string->Hash();
3191   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3192                     ~(kArrayEntriesPerCacheEntry - 1));
3193   if (cache->get(index + kStringOffset) == key_string &&
3194       cache->get(index + kPatternOffset) == key_pattern) {
3195     return cache->get(index + kArrayOffset);
3196   }
3197   index =
3198       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3199   if (cache->get(index + kStringOffset) == key_string &&
3200       cache->get(index + kPatternOffset) == key_pattern) {
3201     return cache->get(index + kArrayOffset);
3202   }
3203   return Smi::FromInt(0);
3204 }
3205
3206
3207 void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
3208                                Handle<Object> key_pattern,
3209                                Handle<FixedArray> value_array,
3210                                ResultsCacheType type) {
3211   Factory* factory = isolate->factory();
3212   Handle<FixedArray> cache;
3213   if (!key_string->IsInternalizedString()) return;
3214   if (type == STRING_SPLIT_SUBSTRINGS) {
3215     DCHECK(key_pattern->IsString());
3216     if (!key_pattern->IsInternalizedString()) return;
3217     cache = factory->string_split_cache();
3218   } else {
3219     DCHECK(type == REGEXP_MULTIPLE_INDICES);
3220     DCHECK(key_pattern->IsFixedArray());
3221     cache = factory->regexp_multiple_cache();
3222   }
3223
3224   uint32_t hash = key_string->Hash();
3225   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3226                     ~(kArrayEntriesPerCacheEntry - 1));
3227   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3228     cache->set(index + kStringOffset, *key_string);
3229     cache->set(index + kPatternOffset, *key_pattern);
3230     cache->set(index + kArrayOffset, *value_array);
3231   } else {
3232     uint32_t index2 =
3233         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3234     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3235       cache->set(index2 + kStringOffset, *key_string);
3236       cache->set(index2 + kPatternOffset, *key_pattern);
3237       cache->set(index2 + kArrayOffset, *value_array);
3238     } else {
3239       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3240       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3241       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3242       cache->set(index + kStringOffset, *key_string);
3243       cache->set(index + kPatternOffset, *key_pattern);
3244       cache->set(index + kArrayOffset, *value_array);
3245     }
3246   }
3247   // If the array is a reasonably short list of substrings, convert it into a
3248   // list of internalized strings.
3249   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3250     for (int i = 0; i < value_array->length(); i++) {
3251       Handle<String> str(String::cast(value_array->get(i)), isolate);
3252       Handle<String> internalized_str = factory->InternalizeString(str);
3253       value_array->set(i, *internalized_str);
3254     }
3255   }
3256   // Convert backing store to a copy-on-write array.
3257   value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3258 }
3259
3260
3261 void RegExpResultsCache::Clear(FixedArray* cache) {
3262   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3263     cache->set(i, Smi::FromInt(0));
3264   }
3265 }
3266
3267
3268 int Heap::FullSizeNumberStringCacheLength() {
3269   // Compute the size of the number string cache based on the max newspace size.
3270   // The number string cache has a minimum size based on twice the initial cache
3271   // size to ensure that it is bigger after being made 'full size'.
3272   int number_string_cache_size = max_semi_space_size_ / 512;
3273   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3274                                  Min(0x4000, number_string_cache_size));
3275   // There is a string and a number per entry so the length is twice the number
3276   // of entries.
3277   return number_string_cache_size * 2;
3278 }
3279
3280
3281 void Heap::FlushNumberStringCache() {
3282   // Flush the number to string cache.
3283   int len = number_string_cache()->length();
3284   for (int i = 0; i < len; i++) {
3285     number_string_cache()->set_undefined(i);
3286   }
3287 }
3288
3289
3290 void Heap::FlushAllocationSitesScratchpad() {
3291   for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3292     allocation_sites_scratchpad()->set_undefined(i);
3293   }
3294   allocation_sites_scratchpad_length_ = 0;
3295 }
3296
3297
3298 void Heap::InitializeAllocationSitesScratchpad() {
3299   DCHECK(allocation_sites_scratchpad()->length() ==
3300          kAllocationSiteScratchpadSize);
3301   for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3302     allocation_sites_scratchpad()->set_undefined(i);
3303   }
3304 }
3305
3306
3307 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3308                                          ScratchpadSlotMode mode) {
3309   if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3310     // We cannot use the normal write-barrier because slots need to be
3311     // recorded with non-incremental marking as well. We have to explicitly
3312     // record the slot to take evacuation candidates into account.
3313     allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3314                                        site, SKIP_WRITE_BARRIER);
3315     Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3316         allocation_sites_scratchpad_length_);
3317
3318     if (mode == RECORD_SCRATCHPAD_SLOT) {
3319       // We need to allow slots buffer overflow here since the evacuation
3320       // candidates are not part of the global list of old space pages and
3321       // releasing an evacuation candidate due to a slots buffer overflow
3322       // results in lost pages.
3323       mark_compact_collector()->RecordSlot(slot, slot, *slot,
3324                                            SlotsBuffer::IGNORE_OVERFLOW);
3325     }
3326     allocation_sites_scratchpad_length_++;
3327   }
3328 }
3329
3330
3331 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3332   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3333 }
3334
3335
3336 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3337     ExternalArrayType array_type) {
3338   switch (array_type) {
3339 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3340   case kExternal##Type##Array:                                  \
3341     return kExternal##Type##ArrayMapRootIndex;
3342
3343     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3344 #undef ARRAY_TYPE_TO_ROOT_INDEX
3345
3346     default:
3347       UNREACHABLE();
3348       return kUndefinedValueRootIndex;
3349   }
3350 }
3351
3352
3353 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3354   return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3355 }
3356
3357
3358 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3359     ExternalArrayType array_type) {
3360   switch (array_type) {
3361 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3362   case kExternal##Type##Array:                                  \
3363     return kFixed##Type##ArrayMapRootIndex;
3364
3365     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3366 #undef ARRAY_TYPE_TO_ROOT_INDEX
3367
3368     default:
3369       UNREACHABLE();
3370       return kUndefinedValueRootIndex;
3371   }
3372 }
3373
3374
3375 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3376     ElementsKind elementsKind) {
3377   switch (elementsKind) {
3378 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3379   case EXTERNAL_##TYPE##_ELEMENTS:                                \
3380     return kEmptyExternal##Type##ArrayRootIndex;
3381
3382     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3383 #undef ELEMENT_KIND_TO_ROOT_INDEX
3384
3385     default:
3386       UNREACHABLE();
3387       return kUndefinedValueRootIndex;
3388   }
3389 }
3390
3391
3392 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3393     ElementsKind elementsKind) {
3394   switch (elementsKind) {
3395 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3396   case TYPE##_ELEMENTS:                                           \
3397     return kEmptyFixed##Type##ArrayRootIndex;
3398
3399     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3400 #undef ELEMENT_KIND_TO_ROOT_INDEX
3401     default:
3402       UNREACHABLE();
3403       return kUndefinedValueRootIndex;
3404   }
3405 }
3406
3407
3408 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3409   return ExternalArray::cast(
3410       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3411 }
3412
3413
3414 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3415   return FixedTypedArrayBase::cast(
3416       roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3417 }
3418
3419
3420 AllocationResult Heap::AllocateForeign(Address address,
3421                                        PretenureFlag pretenure) {
3422   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3423   STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3424   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3425   Foreign* result;
3426   AllocationResult allocation = Allocate(foreign_map(), space);
3427   if (!allocation.To(&result)) return allocation;
3428   result->set_foreign_address(address);
3429   return result;
3430 }
3431
3432
3433 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3434   if (length < 0 || length > ByteArray::kMaxLength) {
3435     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3436   }
3437   int size = ByteArray::SizeFor(length);
3438   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3439   HeapObject* result;
3440   {
3441     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3442     if (!allocation.To(&result)) return allocation;
3443   }
3444
3445   result->set_map_no_write_barrier(byte_array_map());
3446   ByteArray::cast(result)->set_length(length);
3447   return result;
3448 }
3449
3450
3451 void Heap::CreateFillerObjectAt(Address addr, int size) {
3452   if (size == 0) return;
3453   HeapObject* filler = HeapObject::FromAddress(addr);
3454   // At this point, we may be deserializing the heap from a snapshot, and
3455   // none of the maps have been created yet and are NULL.
3456   if (size == kPointerSize) {
3457     filler->set_map_no_write_barrier(raw_unchecked_one_pointer_filler_map());
3458     DCHECK(filler->map() == NULL || filler->map() == one_pointer_filler_map());
3459   } else if (size == 2 * kPointerSize) {
3460     filler->set_map_no_write_barrier(raw_unchecked_two_pointer_filler_map());
3461     DCHECK(filler->map() == NULL || filler->map() == two_pointer_filler_map());
3462   } else {
3463     filler->set_map_no_write_barrier(raw_unchecked_free_space_map());
3464     DCHECK(filler->map() == NULL || filler->map() == free_space_map());
3465     FreeSpace::cast(filler)->nobarrier_set_size(size);
3466   }
3467 }
3468
3469
3470 bool Heap::CanMoveObjectStart(HeapObject* object) {
3471   Address address = object->address();
3472   bool is_in_old_pointer_space = InOldPointerSpace(address);
3473   bool is_in_old_data_space = InOldDataSpace(address);
3474
3475   if (lo_space()->Contains(object)) return false;
3476
3477   Page* page = Page::FromAddress(address);
3478   // We can move the object start if:
3479   // (1) the object is not in old pointer or old data space,
3480   // (2) the page of the object was already swept,
3481   // (3) the page was already concurrently swept. This case is an optimization
3482   // for concurrent sweeping. The WasSwept predicate for concurrently swept
3483   // pages is set after sweeping all pages.
3484   return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3485          page->WasSwept() || page->SweepingCompleted();
3486 }
3487
3488
3489 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3490   if (incremental_marking()->IsMarking() &&
3491       Marking::IsBlack(Marking::MarkBitFrom(address))) {
3492     if (mode == FROM_GC) {
3493       MemoryChunk::IncrementLiveBytesFromGC(address, by);
3494     } else {
3495       MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3496     }
3497   }
3498 }
3499
3500
3501 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3502                                          int elements_to_trim) {
3503   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3504   const int bytes_to_trim = elements_to_trim * element_size;
3505   Map* map = object->map();
3506
3507   // For now this trick is only applied to objects in new and paged space.
3508   // In large object space the object's start must coincide with chunk
3509   // and thus the trick is just not applicable.
3510   DCHECK(!lo_space()->Contains(object));
3511   DCHECK(object->map() != fixed_cow_array_map());
3512
3513   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3514   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3515   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3516
3517   const int len = object->length();
3518   DCHECK(elements_to_trim <= len);
3519
3520   // Calculate location of new array start.
3521   Address new_start = object->address() + bytes_to_trim;
3522
3523   // Technically in new space this write might be omitted (except for
3524   // debug mode which iterates through the heap), but to play safer
3525   // we still do it.
3526   CreateFillerObjectAt(object->address(), bytes_to_trim);
3527
3528   // Initialize header of the trimmed array. Since left trimming is only
3529   // performed on pages which are not concurrently swept creating a filler
3530   // object does not require synchronization.
3531   DCHECK(CanMoveObjectStart(object));
3532   Object** former_start = HeapObject::RawField(object, 0);
3533   int new_start_index = elements_to_trim * (element_size / kPointerSize);
3534   former_start[new_start_index] = map;
3535   former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3536   FixedArrayBase* new_object =
3537       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3538
3539   // Maintain consistency of live bytes during incremental marking
3540   marking()->TransferMark(object->address(), new_start);
3541   AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
3542
3543   // Notify the heap profiler of change in object layout.
3544   OnMoveEvent(new_object, object, new_object->Size());
3545   return new_object;
3546 }
3547
3548
3549 // Force instantiation of templatized method.
3550 template
3551 void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
3552 template
3553 void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
3554
3555
3556 template<Heap::InvocationMode mode>
3557 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3558   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3559   const int bytes_to_trim = elements_to_trim * element_size;
3560
3561   // For now this trick is only applied to objects in new and paged space.
3562   DCHECK(object->map() != fixed_cow_array_map());
3563
3564   const int len = object->length();
3565   DCHECK(elements_to_trim < len);
3566
3567   // Calculate location of new array end.
3568   Address new_end = object->address() + object->Size() - bytes_to_trim;
3569
3570   // Technically in new space this write might be omitted (except for
3571   // debug mode which iterates through the heap), but to play safer
3572   // we still do it.
3573   // We do not create a filler for objects in large object space.
3574   // TODO(hpayer): We should shrink the large object page if the size
3575   // of the object changed significantly.
3576   if (!lo_space()->Contains(object)) {
3577     CreateFillerObjectAt(new_end, bytes_to_trim);
3578   }
3579
3580   // Initialize header of the trimmed array. We are storing the new length
3581   // using release store after creating a filler for the left-over space to
3582   // avoid races with the sweeper thread.
3583   object->synchronized_set_length(len - elements_to_trim);
3584
3585   // Maintain consistency of live bytes during incremental marking
3586   AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
3587
3588   // Notify the heap profiler of change in object layout. The array may not be
3589   // moved during GC, and size has to be adjusted nevertheless.
3590   HeapProfiler* profiler = isolate()->heap_profiler();
3591   if (profiler->is_tracking_allocations()) {
3592     profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3593   }
3594 }
3595
3596
3597 AllocationResult Heap::AllocateExternalArray(int length,
3598                                              ExternalArrayType array_type,
3599                                              void* external_pointer,
3600                                              PretenureFlag pretenure) {
3601   int size = ExternalArray::kAlignedSize;
3602   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3603   HeapObject* result;
3604   {
3605     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3606     if (!allocation.To(&result)) return allocation;
3607   }
3608
3609   result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
3610   ExternalArray::cast(result)->set_length(length);
3611   ExternalArray::cast(result)->set_external_pointer(external_pointer);
3612   return result;
3613 }
3614
3615 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3616                                ElementsKind* element_kind) {
3617   switch (array_type) {
3618 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3619   case kExternal##Type##Array:                          \
3620     *element_size = size;                               \
3621     *element_kind = TYPE##_ELEMENTS;                    \
3622     return;
3623
3624     TYPED_ARRAYS(TYPED_ARRAY_CASE)
3625 #undef TYPED_ARRAY_CASE
3626
3627     default:
3628       *element_size = 0;               // Bogus
3629       *element_kind = UINT8_ELEMENTS;  // Bogus
3630       UNREACHABLE();
3631   }
3632 }
3633
3634
3635 AllocationResult Heap::AllocateFixedTypedArray(int length,
3636                                                ExternalArrayType array_type,
3637                                                PretenureFlag pretenure) {
3638   int element_size;
3639   ElementsKind elements_kind;
3640   ForFixedTypedArray(array_type, &element_size, &elements_kind);
3641   int size = OBJECT_POINTER_ALIGN(length * element_size +
3642                                   FixedTypedArrayBase::kDataOffset);
3643 #ifndef V8_HOST_ARCH_64_BIT
3644   if (array_type == kExternalFloat64Array) {
3645     size += kPointerSize;
3646   }
3647 #endif
3648   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3649
3650   HeapObject* object;
3651   AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3652   if (!allocation.To(&object)) return allocation;
3653
3654   if (array_type == kExternalFloat64Array) {
3655     object = EnsureDoubleAligned(this, object, size);
3656   }
3657
3658   object->set_map(MapForFixedTypedArray(array_type));
3659   FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3660   elements->set_length(length);
3661   memset(elements->DataPtr(), 0, elements->DataSize());
3662   return elements;
3663 }
3664
3665
3666 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3667   DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3668   AllocationResult allocation =
3669       AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3670
3671   HeapObject* result;
3672   if (!allocation.To(&result)) return allocation;
3673
3674   if (immovable) {
3675     Address address = result->address();
3676     // Code objects which should stay at a fixed address are allocated either
3677     // in the first page of code space (objects on the first page of each space
3678     // are never moved) or in large object space.
3679     if (!code_space_->FirstPage()->Contains(address) &&
3680         MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3681       // Discard the first code allocation, which was on a page where it could
3682       // be moved.
3683       CreateFillerObjectAt(result->address(), object_size);
3684       allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3685       if (!allocation.To(&result)) return allocation;
3686       OnAllocationEvent(result, object_size);
3687     }
3688   }
3689
3690   result->set_map_no_write_barrier(code_map());
3691   Code* code = Code::cast(result);
3692   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3693          isolate_->code_range()->contains(code->address()));
3694   code->set_gc_metadata(Smi::FromInt(0));
3695   code->set_ic_age(global_ic_age_);
3696   return code;
3697 }
3698
3699
3700 AllocationResult Heap::CopyCode(Code* code) {
3701   AllocationResult allocation;
3702   HeapObject* new_constant_pool;
3703   if (FLAG_enable_ool_constant_pool &&
3704       code->constant_pool() != empty_constant_pool_array()) {
3705     // Copy the constant pool, since edits to the copied code may modify
3706     // the constant pool.
3707     allocation = CopyConstantPoolArray(code->constant_pool());
3708     if (!allocation.To(&new_constant_pool)) return allocation;
3709   } else {
3710     new_constant_pool = empty_constant_pool_array();
3711   }
3712
3713   HeapObject* result;
3714   // Allocate an object the same size as the code object.
3715   int obj_size = code->Size();
3716   allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3717   if (!allocation.To(&result)) return allocation;
3718
3719   // Copy code object.
3720   Address old_addr = code->address();
3721   Address new_addr = result->address();
3722   CopyBlock(new_addr, old_addr, obj_size);
3723   Code* new_code = Code::cast(result);
3724
3725   // Update the constant pool.
3726   new_code->set_constant_pool(new_constant_pool);
3727
3728   // Relocate the copy.
3729   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3730          isolate_->code_range()->contains(code->address()));
3731   new_code->Relocate(new_addr - old_addr);
3732   return new_code;
3733 }
3734
3735
3736 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3737   // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3738   // do not risk leaving uninitialized Code object (and breaking the heap).
3739   ByteArray* reloc_info_array;
3740   {
3741     AllocationResult allocation =
3742         AllocateByteArray(reloc_info.length(), TENURED);
3743     if (!allocation.To(&reloc_info_array)) return allocation;
3744   }
3745   HeapObject* new_constant_pool;
3746   if (FLAG_enable_ool_constant_pool &&
3747       code->constant_pool() != empty_constant_pool_array()) {
3748     // Copy the constant pool, since edits to the copied code may modify
3749     // the constant pool.
3750     AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
3751     if (!allocation.To(&new_constant_pool)) return allocation;
3752   } else {
3753     new_constant_pool = empty_constant_pool_array();
3754   }
3755
3756   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3757
3758   int new_obj_size = Code::SizeFor(new_body_size);
3759
3760   Address old_addr = code->address();
3761
3762   size_t relocation_offset =
3763       static_cast<size_t>(code->instruction_end() - old_addr);
3764
3765   HeapObject* result;
3766   AllocationResult allocation =
3767       AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3768   if (!allocation.To(&result)) return allocation;
3769
3770   // Copy code object.
3771   Address new_addr = result->address();
3772
3773   // Copy header and instructions.
3774   CopyBytes(new_addr, old_addr, relocation_offset);
3775
3776   Code* new_code = Code::cast(result);
3777   new_code->set_relocation_info(reloc_info_array);
3778
3779   // Update constant pool.
3780   new_code->set_constant_pool(new_constant_pool);
3781
3782   // Copy patched rinfo.
3783   CopyBytes(new_code->relocation_start(), reloc_info.start(),
3784             static_cast<size_t>(reloc_info.length()));
3785
3786   // Relocate the copy.
3787   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3788          isolate_->code_range()->contains(code->address()));
3789   new_code->Relocate(new_addr - old_addr);
3790
3791 #ifdef VERIFY_HEAP
3792   if (FLAG_verify_heap) code->ObjectVerify();
3793 #endif
3794   return new_code;
3795 }
3796
3797
3798 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3799                                        AllocationSite* allocation_site) {
3800   memento->set_map_no_write_barrier(allocation_memento_map());
3801   DCHECK(allocation_site->map() == allocation_site_map());
3802   memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3803   if (FLAG_allocation_site_pretenuring) {
3804     allocation_site->IncrementMementoCreateCount();
3805   }
3806 }
3807
3808
3809 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3810                                 AllocationSite* allocation_site) {
3811   DCHECK(gc_state_ == NOT_IN_GC);
3812   DCHECK(map->instance_type() != MAP_TYPE);
3813   // If allocation failures are disallowed, we may allocate in a different
3814   // space when new space is full and the object is not a large object.
3815   AllocationSpace retry_space =
3816       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3817   int size = map->instance_size();
3818   if (allocation_site != NULL) {
3819     size += AllocationMemento::kSize;
3820   }
3821   HeapObject* result;
3822   AllocationResult allocation = AllocateRaw(size, space, retry_space);
3823   if (!allocation.To(&result)) return allocation;
3824   // No need for write barrier since object is white and map is in old space.
3825   result->set_map_no_write_barrier(map);
3826   if (allocation_site != NULL) {
3827     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3828         reinterpret_cast<Address>(result) + map->instance_size());
3829     InitializeAllocationMemento(alloc_memento, allocation_site);
3830   }
3831   return result;
3832 }
3833
3834
3835 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3836                                      Map* map) {
3837   obj->set_properties(properties);
3838   obj->initialize_elements();
3839   // TODO(1240798): Initialize the object's body using valid initial values
3840   // according to the object's initial map.  For example, if the map's
3841   // instance type is JS_ARRAY_TYPE, the length field should be initialized
3842   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3843   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
3844   // verification code has to cope with (temporarily) invalid objects.  See
3845   // for example, JSArray::JSArrayVerify).
3846   Object* filler;
3847   // We cannot always fill with one_pointer_filler_map because objects
3848   // created from API functions expect their internal fields to be initialized
3849   // with undefined_value.
3850   // Pre-allocated fields need to be initialized with undefined_value as well
3851   // so that object accesses before the constructor completes (e.g. in the
3852   // debugger) will not cause a crash.
3853   if (map->constructor()->IsJSFunction() &&
3854       JSFunction::cast(map->constructor())
3855           ->IsInobjectSlackTrackingInProgress()) {
3856     // We might want to shrink the object later.
3857     DCHECK(obj->GetInternalFieldCount() == 0);
3858     filler = Heap::one_pointer_filler_map();
3859   } else {
3860     filler = Heap::undefined_value();
3861   }
3862   obj->InitializeBody(map, Heap::undefined_value(), filler);
3863 }
3864
3865
3866 AllocationResult Heap::AllocateJSObjectFromMap(
3867     Map* map, PretenureFlag pretenure, bool allocate_properties,
3868     AllocationSite* allocation_site) {
3869   // JSFunctions should be allocated using AllocateFunction to be
3870   // properly initialized.
3871   DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3872
3873   // Both types of global objects should be allocated using
3874   // AllocateGlobalObject to be properly initialized.
3875   DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3876   DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3877
3878   // Allocate the backing storage for the properties.
3879   FixedArray* properties;
3880   if (allocate_properties) {
3881     int prop_size = map->InitialPropertiesLength();
3882     DCHECK(prop_size >= 0);
3883     {
3884       AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3885       if (!allocation.To(&properties)) return allocation;
3886     }
3887   } else {
3888     properties = empty_fixed_array();
3889   }
3890
3891   // Allocate the JSObject.
3892   int size = map->instance_size();
3893   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3894   JSObject* js_obj;
3895   AllocationResult allocation = Allocate(map, space, allocation_site);
3896   if (!allocation.To(&js_obj)) return allocation;
3897
3898   // Initialize the JSObject.
3899   InitializeJSObjectFromMap(js_obj, properties, map);
3900   DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
3901          js_obj->HasFixedTypedArrayElements());
3902   return js_obj;
3903 }
3904
3905
3906 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3907                                         PretenureFlag pretenure,
3908                                         AllocationSite* allocation_site) {
3909   DCHECK(constructor->has_initial_map());
3910
3911   // Allocate the object based on the constructors initial map.
3912   AllocationResult allocation = AllocateJSObjectFromMap(
3913       constructor->initial_map(), pretenure, true, allocation_site);
3914 #ifdef DEBUG
3915   // Make sure result is NOT a global object if valid.
3916   HeapObject* obj;
3917   DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
3918 #endif
3919   return allocation;
3920 }
3921
3922
3923 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3924   // Make the clone.
3925   Map* map = source->map();
3926
3927   // We can only clone normal objects or arrays. Copying anything else
3928   // will break invariants.
3929   CHECK(map->instance_type() == JS_OBJECT_TYPE ||
3930         map->instance_type() == JS_ARRAY_TYPE);
3931
3932   int object_size = map->instance_size();
3933   HeapObject* clone;
3934
3935   DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3936
3937   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3938
3939   // If we're forced to always allocate, we use the general allocation
3940   // functions which may leave us with an object in old space.
3941   if (always_allocate()) {
3942     {
3943       AllocationResult allocation =
3944           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3945       if (!allocation.To(&clone)) return allocation;
3946     }
3947     Address clone_address = clone->address();
3948     CopyBlock(clone_address, source->address(), object_size);
3949
3950     // Update write barrier for all tagged fields that lie beyond the header.
3951     const int start_offset = JSObject::kHeaderSize;
3952     const int end_offset = object_size;
3953
3954 #if V8_DOUBLE_FIELDS_UNBOXING
3955     LayoutDescriptorHelper helper(map);
3956     bool has_only_tagged_fields = helper.all_fields_tagged();
3957
3958     if (!has_only_tagged_fields) {
3959       for (int offset = start_offset; offset < end_offset;) {
3960         int end_of_region_offset;
3961         if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
3962           RecordWrites(clone_address, offset,
3963                        (end_of_region_offset - offset) / kPointerSize);
3964         }
3965         offset = end_of_region_offset;
3966       }
3967     } else {
3968 #endif
3969       // Object has only tagged fields.
3970       RecordWrites(clone_address, start_offset,
3971                    (end_offset - start_offset) / kPointerSize);
3972 #if V8_DOUBLE_FIELDS_UNBOXING
3973     }
3974 #endif
3975
3976   } else {
3977     wb_mode = SKIP_WRITE_BARRIER;
3978
3979     {
3980       int adjusted_object_size =
3981           site != NULL ? object_size + AllocationMemento::kSize : object_size;
3982       AllocationResult allocation =
3983           AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3984       if (!allocation.To(&clone)) return allocation;
3985     }
3986     SLOW_DCHECK(InNewSpace(clone));
3987     // Since we know the clone is allocated in new space, we can copy
3988     // the contents without worrying about updating the write barrier.
3989     CopyBlock(clone->address(), source->address(), object_size);
3990
3991     if (site != NULL) {
3992       AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3993           reinterpret_cast<Address>(clone) + object_size);
3994       InitializeAllocationMemento(alloc_memento, site);
3995     }
3996   }
3997
3998   SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3999               source->GetElementsKind());
4000   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4001   FixedArray* properties = FixedArray::cast(source->properties());
4002   // Update elements if necessary.
4003   if (elements->length() > 0) {
4004     FixedArrayBase* elem;
4005     {
4006       AllocationResult allocation;
4007       if (elements->map() == fixed_cow_array_map()) {
4008         allocation = FixedArray::cast(elements);
4009       } else if (source->HasFastDoubleElements()) {
4010         allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4011       } else {
4012         allocation = CopyFixedArray(FixedArray::cast(elements));
4013       }
4014       if (!allocation.To(&elem)) return allocation;
4015     }
4016     JSObject::cast(clone)->set_elements(elem, wb_mode);
4017   }
4018   // Update properties if necessary.
4019   if (properties->length() > 0) {
4020     FixedArray* prop;
4021     {
4022       AllocationResult allocation = CopyFixedArray(properties);
4023       if (!allocation.To(&prop)) return allocation;
4024     }
4025     JSObject::cast(clone)->set_properties(prop, wb_mode);
4026   }
4027   // Return the new clone.
4028   return clone;
4029 }
4030
4031
4032 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
4033                                     int len) {
4034   // Only works for one byte strings.
4035   DCHECK(vector.length() == len);
4036   MemCopy(chars, vector.start(), len);
4037 }
4038
4039 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
4040                                     int len) {
4041   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
4042   size_t stream_length = vector.length();
4043   while (stream_length != 0) {
4044     size_t consumed = 0;
4045     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
4046     DCHECK(c != unibrow::Utf8::kBadChar);
4047     DCHECK(consumed <= stream_length);
4048     stream_length -= consumed;
4049     stream += consumed;
4050     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4051       len -= 2;
4052       if (len < 0) break;
4053       *chars++ = unibrow::Utf16::LeadSurrogate(c);
4054       *chars++ = unibrow::Utf16::TrailSurrogate(c);
4055     } else {
4056       len -= 1;
4057       if (len < 0) break;
4058       *chars++ = c;
4059     }
4060   }
4061   DCHECK(stream_length == 0);
4062   DCHECK(len == 0);
4063 }
4064
4065
4066 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
4067   DCHECK(s->length() == len);
4068   String::WriteToFlat(s, chars, 0, len);
4069 }
4070
4071
4072 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
4073   DCHECK(s->length() == len);
4074   String::WriteToFlat(s, chars, 0, len);
4075 }
4076
4077
4078 template <bool is_one_byte, typename T>
4079 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
4080                                                       uint32_t hash_field) {
4081   DCHECK(chars >= 0);
4082   // Compute map and object size.
4083   int size;
4084   Map* map;
4085
4086   DCHECK_LE(0, chars);
4087   DCHECK_GE(String::kMaxLength, chars);
4088   if (is_one_byte) {
4089     map = one_byte_internalized_string_map();
4090     size = SeqOneByteString::SizeFor(chars);
4091   } else {
4092     map = internalized_string_map();
4093     size = SeqTwoByteString::SizeFor(chars);
4094   }
4095   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
4096
4097   // Allocate string.
4098   HeapObject* result;
4099   {
4100     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4101     if (!allocation.To(&result)) return allocation;
4102   }
4103
4104   result->set_map_no_write_barrier(map);
4105   // Set length and hash fields of the allocated string.
4106   String* answer = String::cast(result);
4107   answer->set_length(chars);
4108   answer->set_hash_field(hash_field);
4109
4110   DCHECK_EQ(size, answer->Size());
4111
4112   if (is_one_byte) {
4113     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
4114   } else {
4115     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
4116   }
4117   return answer;
4118 }
4119
4120
4121 // Need explicit instantiations.
4122 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
4123                                                                      int,
4124                                                                      uint32_t);
4125 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
4126                                                                       int,
4127                                                                       uint32_t);
4128 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
4129     Vector<const char>, int, uint32_t);
4130
4131
4132 AllocationResult Heap::AllocateRawOneByteString(int length,
4133                                                 PretenureFlag pretenure) {
4134   DCHECK_LE(0, length);
4135   DCHECK_GE(String::kMaxLength, length);
4136   int size = SeqOneByteString::SizeFor(length);
4137   DCHECK(size <= SeqOneByteString::kMaxSize);
4138   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4139
4140   HeapObject* result;
4141   {
4142     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4143     if (!allocation.To(&result)) return allocation;
4144   }
4145
4146   // Partially initialize the object.
4147   result->set_map_no_write_barrier(one_byte_string_map());
4148   String::cast(result)->set_length(length);
4149   String::cast(result)->set_hash_field(String::kEmptyHashField);
4150   DCHECK_EQ(size, HeapObject::cast(result)->Size());
4151
4152   return result;
4153 }
4154
4155
4156 AllocationResult Heap::AllocateRawTwoByteString(int length,
4157                                                 PretenureFlag pretenure) {
4158   DCHECK_LE(0, length);
4159   DCHECK_GE(String::kMaxLength, length);
4160   int size = SeqTwoByteString::SizeFor(length);
4161   DCHECK(size <= SeqTwoByteString::kMaxSize);
4162   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4163
4164   HeapObject* result;
4165   {
4166     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4167     if (!allocation.To(&result)) return allocation;
4168   }
4169
4170   // Partially initialize the object.
4171   result->set_map_no_write_barrier(string_map());
4172   String::cast(result)->set_length(length);
4173   String::cast(result)->set_hash_field(String::kEmptyHashField);
4174   DCHECK_EQ(size, HeapObject::cast(result)->Size());
4175   return result;
4176 }
4177
4178
4179 AllocationResult Heap::AllocateEmptyFixedArray() {
4180   int size = FixedArray::SizeFor(0);
4181   HeapObject* result;
4182   {
4183     AllocationResult allocation =
4184         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4185     if (!allocation.To(&result)) return allocation;
4186   }
4187   // Initialize the object.
4188   result->set_map_no_write_barrier(fixed_array_map());
4189   FixedArray::cast(result)->set_length(0);
4190   return result;
4191 }
4192
4193
4194 AllocationResult Heap::AllocateEmptyExternalArray(
4195     ExternalArrayType array_type) {
4196   return AllocateExternalArray(0, array_type, NULL, TENURED);
4197 }
4198
4199
4200 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
4201   if (!InNewSpace(src)) {
4202     return src;
4203   }
4204
4205   int len = src->length();
4206   HeapObject* obj;
4207   {
4208     AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
4209     if (!allocation.To(&obj)) return allocation;
4210   }
4211   obj->set_map_no_write_barrier(fixed_array_map());
4212   FixedArray* result = FixedArray::cast(obj);
4213   result->set_length(len);
4214
4215   // Copy the content
4216   DisallowHeapAllocation no_gc;
4217   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4218   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4219
4220   // TODO(mvstanton): The map is set twice because of protection against calling
4221   // set() on a COW FixedArray. Issue v8:3221 created to track this, and
4222   // we might then be able to remove this whole method.
4223   HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
4224   return result;
4225 }
4226
4227
4228 AllocationResult Heap::AllocateEmptyFixedTypedArray(
4229     ExternalArrayType array_type) {
4230   return AllocateFixedTypedArray(0, array_type, TENURED);
4231 }
4232
4233
4234 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4235   int len = src->length();
4236   HeapObject* obj;
4237   {
4238     AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
4239     if (!allocation.To(&obj)) return allocation;
4240   }
4241   if (InNewSpace(obj)) {
4242     obj->set_map_no_write_barrier(map);
4243     CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
4244               FixedArray::SizeFor(len) - kPointerSize);
4245     return obj;
4246   }
4247   obj->set_map_no_write_barrier(map);
4248   FixedArray* result = FixedArray::cast(obj);
4249   result->set_length(len);
4250
4251   // Copy the content
4252   DisallowHeapAllocation no_gc;
4253   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4254   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4255   return result;
4256 }
4257
4258
4259 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4260                                                    Map* map) {
4261   int len = src->length();
4262   HeapObject* obj;
4263   {
4264     AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4265     if (!allocation.To(&obj)) return allocation;
4266   }
4267   obj->set_map_no_write_barrier(map);
4268   CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
4269             src->address() + FixedDoubleArray::kLengthOffset,
4270             FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4271   return obj;
4272 }
4273
4274
4275 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4276                                                     Map* map) {
4277   HeapObject* obj;
4278   if (src->is_extended_layout()) {
4279     ConstantPoolArray::NumberOfEntries small(src,
4280                                              ConstantPoolArray::SMALL_SECTION);
4281     ConstantPoolArray::NumberOfEntries extended(
4282         src, ConstantPoolArray::EXTENDED_SECTION);
4283     AllocationResult allocation =
4284         AllocateExtendedConstantPoolArray(small, extended);
4285     if (!allocation.To(&obj)) return allocation;
4286   } else {
4287     ConstantPoolArray::NumberOfEntries small(src,
4288                                              ConstantPoolArray::SMALL_SECTION);
4289     AllocationResult allocation = AllocateConstantPoolArray(small);
4290     if (!allocation.To(&obj)) return allocation;
4291   }
4292   obj->set_map_no_write_barrier(map);
4293   CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
4294             src->address() + ConstantPoolArray::kFirstEntryOffset,
4295             src->size() - ConstantPoolArray::kFirstEntryOffset);
4296   return obj;
4297 }
4298
4299
4300 AllocationResult Heap::AllocateRawFixedArray(int length,
4301                                              PretenureFlag pretenure) {
4302   if (length < 0 || length > FixedArray::kMaxLength) {
4303     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4304   }
4305   int size = FixedArray::SizeFor(length);
4306   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4307
4308   return AllocateRaw(size, space, OLD_POINTER_SPACE);
4309 }
4310
4311
4312 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4313                                                     PretenureFlag pretenure,
4314                                                     Object* filler) {
4315   DCHECK(length >= 0);
4316   DCHECK(empty_fixed_array()->IsFixedArray());
4317   if (length == 0) return empty_fixed_array();
4318
4319   DCHECK(!InNewSpace(filler));
4320   HeapObject* result;
4321   {
4322     AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4323     if (!allocation.To(&result)) return allocation;
4324   }
4325
4326   result->set_map_no_write_barrier(fixed_array_map());
4327   FixedArray* array = FixedArray::cast(result);
4328   array->set_length(length);
4329   MemsetPointer(array->data_start(), filler, length);
4330   return array;
4331 }
4332
4333
4334 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4335   return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4336 }
4337
4338
4339 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4340   if (length == 0) return empty_fixed_array();
4341
4342   HeapObject* obj;
4343   {
4344     AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4345     if (!allocation.To(&obj)) return allocation;
4346   }
4347
4348   obj->set_map_no_write_barrier(fixed_array_map());
4349   FixedArray::cast(obj)->set_length(length);
4350   return obj;
4351 }
4352
4353
4354 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4355     int length, PretenureFlag pretenure) {
4356   if (length == 0) return empty_fixed_array();
4357
4358   HeapObject* elements;
4359   AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4360   if (!allocation.To(&elements)) return allocation;
4361
4362   elements->set_map_no_write_barrier(fixed_double_array_map());
4363   FixedDoubleArray::cast(elements)->set_length(length);
4364   return elements;
4365 }
4366
4367
4368 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4369                                                    PretenureFlag pretenure) {
4370   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4371     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4372   }
4373   int size = FixedDoubleArray::SizeFor(length);
4374 #ifndef V8_HOST_ARCH_64_BIT
4375   size += kPointerSize;
4376 #endif
4377   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4378
4379   HeapObject* object;
4380   {
4381     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4382     if (!allocation.To(&object)) return allocation;
4383   }
4384
4385   return EnsureDoubleAligned(this, object, size);
4386 }
4387
4388
4389 AllocationResult Heap::AllocateConstantPoolArray(
4390     const ConstantPoolArray::NumberOfEntries& small) {
4391   CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4392   int size = ConstantPoolArray::SizeFor(small);
4393 #ifndef V8_HOST_ARCH_64_BIT
4394   size += kPointerSize;
4395 #endif
4396   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4397
4398   HeapObject* object;
4399   {
4400     AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4401     if (!allocation.To(&object)) return allocation;
4402   }
4403   object = EnsureDoubleAligned(this, object, size);
4404   object->set_map_no_write_barrier(constant_pool_array_map());
4405
4406   ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4407   constant_pool->Init(small);
4408   constant_pool->ClearPtrEntries(isolate());
4409   return constant_pool;
4410 }
4411
4412
4413 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4414     const ConstantPoolArray::NumberOfEntries& small,
4415     const ConstantPoolArray::NumberOfEntries& extended) {
4416   CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4417   CHECK(extended.are_in_range(0, kMaxInt));
4418   int size = ConstantPoolArray::SizeForExtended(small, extended);
4419 #ifndef V8_HOST_ARCH_64_BIT
4420   size += kPointerSize;
4421 #endif
4422   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4423
4424   HeapObject* object;
4425   {
4426     AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4427     if (!allocation.To(&object)) return allocation;
4428   }
4429   object = EnsureDoubleAligned(this, object, size);
4430   object->set_map_no_write_barrier(constant_pool_array_map());
4431
4432   ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4433   constant_pool->InitExtended(small, extended);
4434   constant_pool->ClearPtrEntries(isolate());
4435   return constant_pool;
4436 }
4437
4438
4439 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4440   ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4441   int size = ConstantPoolArray::SizeFor(small);
4442   HeapObject* result;
4443   {
4444     AllocationResult allocation =
4445         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4446     if (!allocation.To(&result)) return allocation;
4447   }
4448   result->set_map_no_write_barrier(constant_pool_array_map());
4449   ConstantPoolArray::cast(result)->Init(small);
4450   return result;
4451 }
4452
4453
4454 AllocationResult Heap::AllocateSymbol() {
4455   // Statically ensure that it is safe to allocate symbols in paged spaces.
4456   STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4457
4458   HeapObject* result;
4459   AllocationResult allocation =
4460       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4461   if (!allocation.To(&result)) return allocation;
4462
4463   result->set_map_no_write_barrier(symbol_map());
4464
4465   // Generate a random hash value.
4466   int hash;
4467   int attempts = 0;
4468   do {
4469     hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4470     attempts++;
4471   } while (hash == 0 && attempts < 30);
4472   if (hash == 0) hash = 1;  // never return 0
4473
4474   Symbol::cast(result)
4475       ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4476   Symbol::cast(result)->set_name(undefined_value());
4477   Symbol::cast(result)->set_flags(Smi::FromInt(0));
4478
4479   DCHECK(!Symbol::cast(result)->is_private());
4480   return result;
4481 }
4482
4483
4484 AllocationResult Heap::AllocateStruct(InstanceType type) {
4485   Map* map;
4486   switch (type) {
4487 #define MAKE_CASE(NAME, Name, name) \
4488   case NAME##_TYPE:                 \
4489     map = name##_map();             \
4490     break;
4491     STRUCT_LIST(MAKE_CASE)
4492 #undef MAKE_CASE
4493     default:
4494       UNREACHABLE();
4495       return exception();
4496   }
4497   int size = map->instance_size();
4498   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4499   Struct* result;
4500   {
4501     AllocationResult allocation = Allocate(map, space);
4502     if (!allocation.To(&result)) return allocation;
4503   }
4504   result->InitializeBody(size);
4505   return result;
4506 }
4507
4508
4509 bool Heap::IsHeapIterable() {
4510   // TODO(hpayer): This function is not correct. Allocation folding in old
4511   // space breaks the iterability.
4512   return new_space_top_after_last_gc_ == new_space()->top();
4513 }
4514
4515
4516 void Heap::MakeHeapIterable() {
4517   DCHECK(AllowHeapAllocation::IsAllowed());
4518   if (!IsHeapIterable()) {
4519     CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4520   }
4521   if (mark_compact_collector()->sweeping_in_progress()) {
4522     mark_compact_collector()->EnsureSweepingCompleted();
4523   }
4524   DCHECK(IsHeapIterable());
4525 }
4526
4527
4528 void Heap::IdleMarkCompact(const char* message) {
4529   bool uncommit = false;
4530   if (gc_count_at_last_idle_gc_ == gc_count_) {
4531     // No GC since the last full GC, the mutator is probably not active.
4532     isolate_->compilation_cache()->Clear();
4533     uncommit = true;
4534   }
4535   CollectAllGarbage(kReduceMemoryFootprintMask, message);
4536   gc_idle_time_handler_.NotifyIdleMarkCompact();
4537   gc_count_at_last_idle_gc_ = gc_count_;
4538   if (uncommit) {
4539     new_space_.Shrink();
4540     UncommitFromSpace();
4541   }
4542 }
4543
4544
4545 bool Heap::TryFinalizeIdleIncrementalMarking(
4546     double idle_time_in_ms, size_t size_of_objects,
4547     size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
4548   if (incremental_marking()->IsComplete() ||
4549       (mark_compact_collector_.marking_deque()->IsEmpty() &&
4550        gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
4551            static_cast<size_t>(idle_time_in_ms), size_of_objects,
4552            final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4553     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
4554     return true;
4555   }
4556   return false;
4557 }
4558
4559
4560 bool Heap::WorthActivatingIncrementalMarking() {
4561   return incremental_marking()->IsStopped() &&
4562          incremental_marking()->ShouldActivate();
4563 }
4564
4565
4566 static double MonotonicallyIncreasingTimeInMs() {
4567   return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
4568          static_cast<double>(base::Time::kMillisecondsPerSecond);
4569 }
4570
4571
4572 bool Heap::IdleNotification(int idle_time_in_ms) {
4573   return IdleNotification(
4574       V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
4575       (static_cast<double>(idle_time_in_ms) /
4576        static_cast<double>(base::Time::kMillisecondsPerSecond)));
4577 }
4578
4579
4580 bool Heap::IdleNotification(double deadline_in_seconds) {
4581   CHECK(HasBeenSetUp());  // http://crbug.com/425035
4582   double deadline_in_ms =
4583       deadline_in_seconds *
4584       static_cast<double>(base::Time::kMillisecondsPerSecond);
4585   HistogramTimerScope idle_notification_scope(
4586       isolate_->counters()->gc_idle_notification());
4587   double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
4588
4589   GCIdleTimeHandler::HeapState heap_state;
4590   heap_state.contexts_disposed = contexts_disposed_;
4591   heap_state.contexts_disposal_rate =
4592       tracer()->ContextDisposalRateInMilliseconds();
4593   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4594   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4595   // TODO(ulan): Start incremental marking only for large heaps.
4596   intptr_t limit = old_generation_allocation_limit_;
4597   if (static_cast<size_t>(idle_time_in_ms) >
4598       GCIdleTimeHandler::kMinIdleTimeToStartIncrementalMarking) {
4599     limit = idle_old_generation_allocation_limit_;
4600   }
4601
4602   heap_state.can_start_incremental_marking =
4603       incremental_marking()->WorthActivating() &&
4604       NextGCIsLikelyToBeFull(limit) && FLAG_incremental_marking;
4605   heap_state.sweeping_in_progress =
4606       mark_compact_collector()->sweeping_in_progress();
4607   heap_state.mark_compact_speed_in_bytes_per_ms =
4608       static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
4609   heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
4610       tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
4611   heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms =
4612       static_cast<size_t>(
4613           tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
4614   heap_state.scavenge_speed_in_bytes_per_ms =
4615       static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4616   heap_state.used_new_space_size = new_space_.Size();
4617   heap_state.new_space_capacity = new_space_.Capacity();
4618   heap_state.new_space_allocation_throughput_in_bytes_per_ms =
4619       static_cast<size_t>(
4620           tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4621
4622   GCIdleTimeAction action =
4623       gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
4624   isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4625       static_cast<int>(idle_time_in_ms));
4626
4627   bool result = false;
4628   switch (action.type) {
4629     case DONE:
4630       result = true;
4631       break;
4632     case DO_INCREMENTAL_MARKING: {
4633       if (incremental_marking()->IsStopped()) {
4634         incremental_marking()->Start();
4635       }
4636       double remaining_idle_time_in_ms = 0.0;
4637       do {
4638         incremental_marking()->Step(
4639             action.parameter, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
4640             IncrementalMarking::FORCE_MARKING,
4641             IncrementalMarking::DO_NOT_FORCE_COMPLETION);
4642         remaining_idle_time_in_ms =
4643             deadline_in_ms - MonotonicallyIncreasingTimeInMs();
4644       } while (remaining_idle_time_in_ms >=
4645                    2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
4646                !incremental_marking()->IsComplete() &&
4647                !mark_compact_collector_.marking_deque()->IsEmpty());
4648       if (remaining_idle_time_in_ms > 0.0) {
4649         action.additional_work = TryFinalizeIdleIncrementalMarking(
4650             remaining_idle_time_in_ms, heap_state.size_of_objects,
4651             heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
4652       }
4653       break;
4654     }
4655     case DO_FULL_GC: {
4656       if (contexts_disposed_) {
4657         HistogramTimerScope scope(isolate_->counters()->gc_context());
4658         CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
4659         gc_idle_time_handler_.NotifyIdleMarkCompact();
4660         gc_count_at_last_idle_gc_ = gc_count_;
4661       } else {
4662         IdleMarkCompact("idle notification: finalize idle round");
4663       }
4664       break;
4665     }
4666     case DO_SCAVENGE:
4667       CollectGarbage(NEW_SPACE, "idle notification: scavenge");
4668       break;
4669     case DO_FINALIZE_SWEEPING:
4670       mark_compact_collector()->EnsureSweepingCompleted();
4671       break;
4672     case DO_NOTHING:
4673       break;
4674   }
4675
4676   double current_time = MonotonicallyIncreasingTimeInMs();
4677   last_idle_notification_time_ = current_time;
4678   double deadline_difference = deadline_in_ms - current_time;
4679
4680   if (deadline_difference >= 0) {
4681     if (action.type != DONE && action.type != DO_NOTHING) {
4682       isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4683           static_cast<int>(deadline_difference));
4684     }
4685   } else {
4686     isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4687         static_cast<int>(-deadline_difference));
4688   }
4689
4690   if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
4691       FLAG_trace_idle_notification_verbose) {
4692     PrintF(
4693         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
4694         "ms, deadline usage %.2f ms [",
4695         idle_time_in_ms, idle_time_in_ms - deadline_difference,
4696         deadline_difference);
4697     action.Print();
4698     PrintF("]");
4699     if (FLAG_trace_idle_notification_verbose) {
4700       PrintF("[");
4701       heap_state.Print();
4702       PrintF("]");
4703     }
4704     PrintF("\n");
4705   }
4706
4707   contexts_disposed_ = 0;
4708   return result;
4709 }
4710
4711
4712 bool Heap::RecentIdleNotificationHappened() {
4713   return (last_idle_notification_time_ +
4714           GCIdleTimeHandler::kMaxFrameRenderingIdleTime) >
4715          MonotonicallyIncreasingTimeInMs();
4716 }
4717
4718
4719 #ifdef DEBUG
4720
4721 void Heap::Print() {
4722   if (!HasBeenSetUp()) return;
4723   isolate()->PrintStack(stdout);
4724   AllSpaces spaces(this);
4725   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4726     space->Print();
4727   }
4728 }
4729
4730
4731 void Heap::ReportCodeStatistics(const char* title) {
4732   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4733   PagedSpace::ResetCodeStatistics(isolate());
4734   // We do not look for code in new space, map space, or old space.  If code
4735   // somehow ends up in those spaces, we would miss it here.
4736   code_space_->CollectCodeStatistics();
4737   lo_space_->CollectCodeStatistics();
4738   PagedSpace::ReportCodeStatistics(isolate());
4739 }
4740
4741
4742 // This function expects that NewSpace's allocated objects histogram is
4743 // populated (via a call to CollectStatistics or else as a side effect of a
4744 // just-completed scavenge collection).
4745 void Heap::ReportHeapStatistics(const char* title) {
4746   USE(title);
4747   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4748          gc_count_);
4749   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4750          old_generation_allocation_limit_);
4751
4752   PrintF("\n");
4753   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4754   isolate_->global_handles()->PrintStats();
4755   PrintF("\n");
4756
4757   PrintF("Heap statistics : ");
4758   isolate_->memory_allocator()->ReportStatistics();
4759   PrintF("To space : ");
4760   new_space_.ReportStatistics();
4761   PrintF("Old pointer space : ");
4762   old_pointer_space_->ReportStatistics();
4763   PrintF("Old data space : ");
4764   old_data_space_->ReportStatistics();
4765   PrintF("Code space : ");
4766   code_space_->ReportStatistics();
4767   PrintF("Map space : ");
4768   map_space_->ReportStatistics();
4769   PrintF("Cell space : ");
4770   cell_space_->ReportStatistics();
4771   PrintF("PropertyCell space : ");
4772   property_cell_space_->ReportStatistics();
4773   PrintF("Large object space : ");
4774   lo_space_->ReportStatistics();
4775   PrintF(">>>>>> ========================================= >>>>>>\n");
4776 }
4777
4778 #endif  // DEBUG
4779
4780 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4781
4782
4783 bool Heap::Contains(Address addr) {
4784   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4785   return HasBeenSetUp() &&
4786          (new_space_.ToSpaceContains(addr) ||
4787           old_pointer_space_->Contains(addr) ||
4788           old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
4789           map_space_->Contains(addr) || cell_space_->Contains(addr) ||
4790           property_cell_space_->Contains(addr) ||
4791           lo_space_->SlowContains(addr));
4792 }
4793
4794
4795 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4796   return InSpace(value->address(), space);
4797 }
4798
4799
4800 bool Heap::InSpace(Address addr, AllocationSpace space) {
4801   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4802   if (!HasBeenSetUp()) return false;
4803
4804   switch (space) {
4805     case NEW_SPACE:
4806       return new_space_.ToSpaceContains(addr);
4807     case OLD_POINTER_SPACE:
4808       return old_pointer_space_->Contains(addr);
4809     case OLD_DATA_SPACE:
4810       return old_data_space_->Contains(addr);
4811     case CODE_SPACE:
4812       return code_space_->Contains(addr);
4813     case MAP_SPACE:
4814       return map_space_->Contains(addr);
4815     case CELL_SPACE:
4816       return cell_space_->Contains(addr);
4817     case PROPERTY_CELL_SPACE:
4818       return property_cell_space_->Contains(addr);
4819     case LO_SPACE:
4820       return lo_space_->SlowContains(addr);
4821   }
4822   UNREACHABLE();
4823   return false;
4824 }
4825
4826
4827 bool Heap::RootIsImmortalImmovable(int root_index) {
4828   switch (root_index) {
4829 #define CASE(name)               \
4830   case Heap::k##name##RootIndex: \
4831     return true;
4832     IMMORTAL_IMMOVABLE_ROOT_LIST(CASE);
4833 #undef CASE
4834     default:
4835       return false;
4836   }
4837 }
4838
4839
4840 #ifdef VERIFY_HEAP
4841 void Heap::Verify() {
4842   CHECK(HasBeenSetUp());
4843   HandleScope scope(isolate());
4844
4845   store_buffer()->Verify();
4846
4847   if (mark_compact_collector()->sweeping_in_progress()) {
4848     // We have to wait here for the sweeper threads to have an iterable heap.
4849     mark_compact_collector()->EnsureSweepingCompleted();
4850   }
4851
4852   VerifyPointersVisitor visitor;
4853   IterateRoots(&visitor, VISIT_ONLY_STRONG);
4854
4855   VerifySmisVisitor smis_visitor;
4856   IterateSmiRoots(&smis_visitor);
4857
4858   new_space_.Verify();
4859
4860   old_pointer_space_->Verify(&visitor);
4861   map_space_->Verify(&visitor);
4862
4863   VerifyPointersVisitor no_dirty_regions_visitor;
4864   old_data_space_->Verify(&no_dirty_regions_visitor);
4865   code_space_->Verify(&no_dirty_regions_visitor);
4866   cell_space_->Verify(&no_dirty_regions_visitor);
4867   property_cell_space_->Verify(&no_dirty_regions_visitor);
4868
4869   lo_space_->Verify();
4870 }
4871 #endif
4872
4873
4874 void Heap::ZapFromSpace() {
4875   NewSpacePageIterator it(new_space_.FromSpaceStart(),
4876                           new_space_.FromSpaceEnd());
4877   while (it.has_next()) {
4878     NewSpacePage* page = it.next();
4879     for (Address cursor = page->area_start(), limit = page->area_end();
4880          cursor < limit; cursor += kPointerSize) {
4881       Memory::Address_at(cursor) = kFromSpaceZapValue;
4882     }
4883   }
4884 }
4885
4886
4887 void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
4888                                              ObjectSlotCallback callback) {
4889   Address slot_address = start;
4890
4891   // We are not collecting slots on new space objects during mutation
4892   // thus we have to scan for pointers to evacuation candidates when we
4893   // promote objects. But we should not record any slots in non-black
4894   // objects. Grey object's slots would be rescanned.
4895   // White object might not survive until the end of collection
4896   // it would be a violation of the invariant to record it's slots.
4897   bool record_slots = false;
4898   if (incremental_marking()->IsCompacting()) {
4899     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4900     record_slots = Marking::IsBlack(mark_bit);
4901   }
4902
4903   while (slot_address < end) {
4904     Object** slot = reinterpret_cast<Object**>(slot_address);
4905     Object* object = *slot;
4906     // If the store buffer becomes overfull we mark pages as being exempt from
4907     // the store buffer.  These pages are scanned to find pointers that point
4908     // to the new space.  In that case we may hit newly promoted objects and
4909     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
4910     if (object->IsHeapObject()) {
4911       if (Heap::InFromSpace(object)) {
4912         callback(reinterpret_cast<HeapObject**>(slot),
4913                  HeapObject::cast(object));
4914         Object* new_object = *slot;
4915         if (InNewSpace(new_object)) {
4916           SLOW_DCHECK(Heap::InToSpace(new_object));
4917           SLOW_DCHECK(new_object->IsHeapObject());
4918           store_buffer_.EnterDirectlyIntoStoreBuffer(
4919               reinterpret_cast<Address>(slot));
4920         }
4921         SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4922       } else if (record_slots &&
4923                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4924         mark_compact_collector()->RecordSlot(slot, slot, object);
4925       }
4926     }
4927     slot_address += kPointerSize;
4928   }
4929 }
4930
4931
4932 #ifdef DEBUG
4933 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4934
4935
4936 bool IsAMapPointerAddress(Object** addr) {
4937   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4938   int mod = a % Map::kSize;
4939   return mod >= Map::kPointerFieldsBeginOffset &&
4940          mod < Map::kPointerFieldsEndOffset;
4941 }
4942
4943
4944 bool EverythingsAPointer(Object** addr) { return true; }
4945
4946
4947 static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
4948                              Object**** store_buffer_position,
4949                              Object*** store_buffer_top,
4950                              CheckStoreBufferFilter filter,
4951                              Address special_garbage_start,
4952                              Address special_garbage_end) {
4953   Map* free_space_map = heap->free_space_map();
4954   for (; current < limit; current++) {
4955     Object* o = *current;
4956     Address current_address = reinterpret_cast<Address>(current);
4957     // Skip free space.
4958     if (o == free_space_map) {
4959       Address current_address = reinterpret_cast<Address>(current);
4960       FreeSpace* free_space =
4961           FreeSpace::cast(HeapObject::FromAddress(current_address));
4962       int skip = free_space->Size();
4963       DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
4964       DCHECK(skip > 0);
4965       current_address += skip - kPointerSize;
4966       current = reinterpret_cast<Object**>(current_address);
4967       continue;
4968     }
4969     // Skip the current linear allocation space between top and limit which is
4970     // unmarked with the free space map, but can contain junk.
4971     if (current_address == special_garbage_start &&
4972         special_garbage_end != special_garbage_start) {
4973       current_address = special_garbage_end - kPointerSize;
4974       current = reinterpret_cast<Object**>(current_address);
4975       continue;
4976     }
4977     if (!(*filter)(current)) continue;
4978     DCHECK(current_address < special_garbage_start ||
4979            current_address >= special_garbage_end);
4980     DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4981     // We have to check that the pointer does not point into new space
4982     // without trying to cast it to a heap object since the hash field of
4983     // a string can contain values like 1 and 3 which are tagged null
4984     // pointers.
4985     if (!heap->InNewSpace(o)) continue;
4986     while (**store_buffer_position < current &&
4987            *store_buffer_position < store_buffer_top) {
4988       (*store_buffer_position)++;
4989     }
4990     if (**store_buffer_position != current ||
4991         *store_buffer_position == store_buffer_top) {
4992       Object** obj_start = current;
4993       while (!(*obj_start)->IsMap()) obj_start--;
4994       UNREACHABLE();
4995     }
4996   }
4997 }
4998
4999
5000 // Check that the store buffer contains all intergenerational pointers by
5001 // scanning a page and ensuring that all pointers to young space are in the
5002 // store buffer.
5003 void Heap::OldPointerSpaceCheckStoreBuffer() {
5004   OldSpace* space = old_pointer_space();
5005   PageIterator pages(space);
5006
5007   store_buffer()->SortUniq();
5008
5009   while (pages.has_next()) {
5010     Page* page = pages.next();
5011     Object** current = reinterpret_cast<Object**>(page->area_start());
5012
5013     Address end = page->area_end();
5014
5015     Object*** store_buffer_position = store_buffer()->Start();
5016     Object*** store_buffer_top = store_buffer()->Top();
5017
5018     Object** limit = reinterpret_cast<Object**>(end);
5019     CheckStoreBuffer(this, current, limit, &store_buffer_position,
5020                      store_buffer_top, &EverythingsAPointer, space->top(),
5021                      space->limit());
5022   }
5023 }
5024
5025
5026 void Heap::MapSpaceCheckStoreBuffer() {
5027   MapSpace* space = map_space();
5028   PageIterator pages(space);
5029
5030   store_buffer()->SortUniq();
5031
5032   while (pages.has_next()) {
5033     Page* page = pages.next();
5034     Object** current = reinterpret_cast<Object**>(page->area_start());
5035
5036     Address end = page->area_end();
5037
5038     Object*** store_buffer_position = store_buffer()->Start();
5039     Object*** store_buffer_top = store_buffer()->Top();
5040
5041     Object** limit = reinterpret_cast<Object**>(end);
5042     CheckStoreBuffer(this, current, limit, &store_buffer_position,
5043                      store_buffer_top, &IsAMapPointerAddress, space->top(),
5044                      space->limit());
5045   }
5046 }
5047
5048
5049 void Heap::LargeObjectSpaceCheckStoreBuffer() {
5050   LargeObjectIterator it(lo_space());
5051   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
5052     // We only have code, sequential strings, or fixed arrays in large
5053     // object space, and only fixed arrays can possibly contain pointers to
5054     // the young generation.
5055     if (object->IsFixedArray()) {
5056       Object*** store_buffer_position = store_buffer()->Start();
5057       Object*** store_buffer_top = store_buffer()->Top();
5058       Object** current = reinterpret_cast<Object**>(object->address());
5059       Object** limit =
5060           reinterpret_cast<Object**>(object->address() + object->Size());
5061       CheckStoreBuffer(this, current, limit, &store_buffer_position,
5062                        store_buffer_top, &EverythingsAPointer, NULL, NULL);
5063     }
5064   }
5065 }
5066 #endif
5067
5068
5069 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
5070   IterateStrongRoots(v, mode);
5071   IterateWeakRoots(v, mode);
5072 }
5073
5074
5075 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
5076   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
5077   v->Synchronize(VisitorSynchronization::kStringTable);
5078   if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
5079     // Scavenge collections have special processing for this.
5080     external_string_table_.Iterate(v);
5081   }
5082   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
5083 }
5084
5085
5086 void Heap::IterateSmiRoots(ObjectVisitor* v) {
5087   // Acquire execution access since we are going to read stack limit values.
5088   ExecutionAccess access(isolate());
5089   v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
5090   v->Synchronize(VisitorSynchronization::kSmiRootList);
5091 }
5092
5093
5094 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5095   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5096   v->Synchronize(VisitorSynchronization::kStrongRootList);
5097
5098   v->VisitPointer(bit_cast<Object**>(&hidden_string_));
5099   v->Synchronize(VisitorSynchronization::kInternalizedString);
5100
5101   isolate_->bootstrapper()->Iterate(v);
5102   v->Synchronize(VisitorSynchronization::kBootstrapper);
5103   isolate_->Iterate(v);
5104   v->Synchronize(VisitorSynchronization::kTop);
5105   Relocatable::Iterate(isolate_, v);
5106   v->Synchronize(VisitorSynchronization::kRelocatable);
5107
5108   if (isolate_->deoptimizer_data() != NULL) {
5109     isolate_->deoptimizer_data()->Iterate(v);
5110   }
5111   v->Synchronize(VisitorSynchronization::kDebug);
5112   isolate_->compilation_cache()->Iterate(v);
5113   v->Synchronize(VisitorSynchronization::kCompilationCache);
5114
5115   // Iterate over local handles in handle scopes.
5116   isolate_->handle_scope_implementer()->Iterate(v);
5117   isolate_->IterateDeferredHandles(v);
5118   v->Synchronize(VisitorSynchronization::kHandleScope);
5119
5120   // Iterate over the builtin code objects and code stubs in the
5121   // heap. Note that it is not necessary to iterate over code objects
5122   // on scavenge collections.
5123   if (mode != VISIT_ALL_IN_SCAVENGE) {
5124     isolate_->builtins()->IterateBuiltins(v);
5125   }
5126   v->Synchronize(VisitorSynchronization::kBuiltins);
5127
5128   // Iterate over global handles.
5129   switch (mode) {
5130     case VISIT_ONLY_STRONG:
5131       isolate_->global_handles()->IterateStrongRoots(v);
5132       break;
5133     case VISIT_ALL_IN_SCAVENGE:
5134       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5135       break;
5136     case VISIT_ALL_IN_SWEEP_NEWSPACE:
5137     case VISIT_ALL:
5138       isolate_->global_handles()->IterateAllRoots(v);
5139       break;
5140   }
5141   v->Synchronize(VisitorSynchronization::kGlobalHandles);
5142
5143   // Iterate over eternal handles.
5144   if (mode == VISIT_ALL_IN_SCAVENGE) {
5145     isolate_->eternal_handles()->IterateNewSpaceRoots(v);
5146   } else {
5147     isolate_->eternal_handles()->IterateAllRoots(v);
5148   }
5149   v->Synchronize(VisitorSynchronization::kEternalHandles);
5150
5151   // Iterate over pointers being held by inactive threads.
5152   isolate_->thread_manager()->Iterate(v);
5153   v->Synchronize(VisitorSynchronization::kThreadManager);
5154
5155   // Iterate over the pointers the Serialization/Deserialization code is
5156   // holding.
5157   // During garbage collection this keeps the partial snapshot cache alive.
5158   // During deserialization of the startup snapshot this creates the partial
5159   // snapshot cache and deserializes the objects it refers to.  During
5160   // serialization this does nothing, since the partial snapshot cache is
5161   // empty.  However the next thing we do is create the partial snapshot,
5162   // filling up the partial snapshot cache with objects it needs as we go.
5163   SerializerDeserializer::Iterate(isolate_, v);
5164   // We don't do a v->Synchronize call here, because in debug mode that will
5165   // output a flag to the snapshot.  However at this point the serializer and
5166   // deserializer are deliberately a little unsynchronized (see above) so the
5167   // checking of the sync flag in the snapshot would fail.
5168 }
5169
5170
5171 // TODO(1236194): Since the heap size is configurable on the command line
5172 // and through the API, we should gracefully handle the case that the heap
5173 // size is not big enough to fit all the initial objects.
5174 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
5175                          int max_executable_size, size_t code_range_size) {
5176   if (HasBeenSetUp()) return false;
5177
5178   // Overwrite default configuration.
5179   if (max_semi_space_size > 0) {
5180     max_semi_space_size_ = max_semi_space_size * MB;
5181   }
5182   if (max_old_space_size > 0) {
5183     max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
5184   }
5185   if (max_executable_size > 0) {
5186     max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
5187   }
5188
5189   // If max space size flags are specified overwrite the configuration.
5190   if (FLAG_max_semi_space_size > 0) {
5191     max_semi_space_size_ = FLAG_max_semi_space_size * MB;
5192   }
5193   if (FLAG_max_old_space_size > 0) {
5194     max_old_generation_size_ =
5195         static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
5196   }
5197   if (FLAG_max_executable_size > 0) {
5198     max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
5199   }
5200
5201   if (FLAG_stress_compaction) {
5202     // This will cause more frequent GCs when stressing.
5203     max_semi_space_size_ = Page::kPageSize;
5204   }
5205
5206   if (Snapshot::HaveASnapshotToStartFrom()) {
5207     // If we are using a snapshot we always reserve the default amount
5208     // of memory for each semispace because code in the snapshot has
5209     // write-barrier code that relies on the size and alignment of new
5210     // space.  We therefore cannot use a larger max semispace size
5211     // than the default reserved semispace size.
5212     if (max_semi_space_size_ > reserved_semispace_size_) {
5213       max_semi_space_size_ = reserved_semispace_size_;
5214       if (FLAG_trace_gc) {
5215         PrintPID("Max semi-space size cannot be more than %d kbytes\n",
5216                  reserved_semispace_size_ >> 10);
5217       }
5218     }
5219   } else {
5220     // If we are not using snapshots we reserve space for the actual
5221     // max semispace size.
5222     reserved_semispace_size_ = max_semi_space_size_;
5223   }
5224
5225   // The max executable size must be less than or equal to the max old
5226   // generation size.
5227   if (max_executable_size_ > max_old_generation_size_) {
5228     max_executable_size_ = max_old_generation_size_;
5229   }
5230
5231   // The new space size must be a power of two to support single-bit testing
5232   // for containment.
5233   max_semi_space_size_ =
5234       base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
5235   reserved_semispace_size_ =
5236       base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
5237
5238   if (FLAG_min_semi_space_size > 0) {
5239     int initial_semispace_size = FLAG_min_semi_space_size * MB;
5240     if (initial_semispace_size > max_semi_space_size_) {
5241       initial_semispace_size_ = max_semi_space_size_;
5242       if (FLAG_trace_gc) {
5243         PrintPID(
5244             "Min semi-space size cannot be more than the maximum "
5245             "semi-space size of %d MB\n",
5246             max_semi_space_size_ / MB);
5247       }
5248     } else {
5249       initial_semispace_size_ = initial_semispace_size;
5250     }
5251   }
5252
5253   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
5254
5255   if (FLAG_target_semi_space_size > 0) {
5256     int target_semispace_size = FLAG_target_semi_space_size * MB;
5257     if (target_semispace_size < initial_semispace_size_) {
5258       target_semispace_size_ = initial_semispace_size_;
5259       if (FLAG_trace_gc) {
5260         PrintPID(
5261             "Target semi-space size cannot be less than the minimum "
5262             "semi-space size of %d MB\n",
5263             initial_semispace_size_ / MB);
5264       }
5265     } else if (target_semispace_size > max_semi_space_size_) {
5266       target_semispace_size_ = max_semi_space_size_;
5267       if (FLAG_trace_gc) {
5268         PrintPID(
5269             "Target semi-space size cannot be less than the maximum "
5270             "semi-space size of %d MB\n",
5271             max_semi_space_size_ / MB);
5272       }
5273     } else {
5274       target_semispace_size_ = target_semispace_size;
5275     }
5276   }
5277
5278   target_semispace_size_ = Max(initial_semispace_size_, target_semispace_size_);
5279
5280   if (FLAG_semi_space_growth_factor < 2) {
5281     FLAG_semi_space_growth_factor = 2;
5282   }
5283
5284   // The old generation is paged and needs at least one page for each space.
5285   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5286   max_old_generation_size_ =
5287       Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
5288           max_old_generation_size_);
5289
5290   if (FLAG_initial_old_space_size > 0) {
5291     initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
5292   } else {
5293     initial_old_generation_size_ =
5294         max_old_generation_size_ / kInitalOldGenerationLimitFactor;
5295   }
5296   old_generation_allocation_limit_ = initial_old_generation_size_;
5297
5298   // We rely on being able to allocate new arrays in paged spaces.
5299   DCHECK(Page::kMaxRegularHeapObjectSize >=
5300          (JSArray::kSize +
5301           FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
5302           AllocationMemento::kSize));
5303
5304   code_range_size_ = code_range_size * MB;
5305
5306   configured_ = true;
5307   return true;
5308 }
5309
5310
5311 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
5312
5313
5314 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5315   *stats->start_marker = HeapStats::kStartMarker;
5316   *stats->end_marker = HeapStats::kEndMarker;
5317   *stats->new_space_size = new_space_.SizeAsInt();
5318   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5319   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
5320   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5321   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
5322   *stats->old_data_space_capacity = old_data_space_->Capacity();
5323   *stats->code_space_size = code_space_->SizeOfObjects();
5324   *stats->code_space_capacity = code_space_->Capacity();
5325   *stats->map_space_size = map_space_->SizeOfObjects();
5326   *stats->map_space_capacity = map_space_->Capacity();
5327   *stats->cell_space_size = cell_space_->SizeOfObjects();
5328   *stats->cell_space_capacity = cell_space_->Capacity();
5329   *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
5330   *stats->property_cell_space_capacity = property_cell_space_->Capacity();
5331   *stats->lo_space_size = lo_space_->Size();
5332   isolate_->global_handles()->RecordStats(stats);
5333   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5334   *stats->memory_allocator_capacity =
5335       isolate()->memory_allocator()->Size() +
5336       isolate()->memory_allocator()->Available();
5337   *stats->os_error = base::OS::GetLastError();
5338   isolate()->memory_allocator()->Available();
5339   if (take_snapshot) {
5340     HeapIterator iterator(this);
5341     for (HeapObject* obj = iterator.next(); obj != NULL;
5342          obj = iterator.next()) {
5343       InstanceType type = obj->map()->instance_type();
5344       DCHECK(0 <= type && type <= LAST_TYPE);
5345       stats->objects_per_type[type]++;
5346       stats->size_per_type[type] += obj->Size();
5347     }
5348   }
5349 }
5350
5351
5352 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5353   return old_pointer_space_->SizeOfObjects() +
5354          old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5355          map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
5356          property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5357 }
5358
5359
5360 int64_t Heap::PromotedExternalMemorySize() {
5361   if (amount_of_external_allocated_memory_ <=
5362       amount_of_external_allocated_memory_at_last_global_gc_)
5363     return 0;
5364   return amount_of_external_allocated_memory_ -
5365          amount_of_external_allocated_memory_at_last_global_gc_;
5366 }
5367
5368
5369 intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
5370                                                      intptr_t old_gen_size) {
5371   CHECK(factor > 1.0);
5372   CHECK(old_gen_size > 0);
5373   intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5374   limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5375   limit += new_space_.Capacity();
5376   intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5377   return Min(limit, halfway_to_the_max);
5378 }
5379
5380
5381 void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
5382                                            int freed_global_handles) {
5383   const int kMaxHandles = 1000;
5384   const int kMinHandles = 100;
5385   const double min_factor = 1.1;
5386   double max_factor = 4;
5387   const double idle_max_factor = 1.5;
5388   // We set the old generation growing factor to 2 to grow the heap slower on
5389   // memory-constrained devices.
5390   if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5391     max_factor = 2;
5392   }
5393
5394   // If there are many freed global handles, then the next full GC will
5395   // likely collect a lot of garbage. Choose the heap growing factor
5396   // depending on freed global handles.
5397   // TODO(ulan, hpayer): Take into account mutator utilization.
5398   // TODO(hpayer): The idle factor could make the handles heuristic obsolete.
5399   // Look into that.
5400   double factor;
5401   if (freed_global_handles <= kMinHandles) {
5402     factor = max_factor;
5403   } else if (freed_global_handles >= kMaxHandles) {
5404     factor = min_factor;
5405   } else {
5406     // Compute factor using linear interpolation between points
5407     // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5408     factor = max_factor -
5409              (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5410                  (kMaxHandles - kMinHandles);
5411   }
5412
5413   if (FLAG_stress_compaction ||
5414       mark_compact_collector()->reduce_memory_footprint_) {
5415     factor = min_factor;
5416   }
5417
5418   old_generation_allocation_limit_ =
5419       CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5420   idle_old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
5421       Min(factor, idle_max_factor), old_gen_size);
5422 }
5423
5424
5425 void Heap::EnableInlineAllocation() {
5426   if (!inline_allocation_disabled_) return;
5427   inline_allocation_disabled_ = false;
5428
5429   // Update inline allocation limit for new space.
5430   new_space()->UpdateInlineAllocationLimit(0);
5431 }
5432
5433
5434 void Heap::DisableInlineAllocation() {
5435   if (inline_allocation_disabled_) return;
5436   inline_allocation_disabled_ = true;
5437
5438   // Update inline allocation limit for new space.
5439   new_space()->UpdateInlineAllocationLimit(0);
5440
5441   // Update inline allocation limit for old spaces.
5442   PagedSpaces spaces(this);
5443   for (PagedSpace* space = spaces.next(); space != NULL;
5444        space = spaces.next()) {
5445     space->EmptyAllocationInfo();
5446   }
5447 }
5448
5449
5450 V8_DECLARE_ONCE(initialize_gc_once);
5451
5452 static void InitializeGCOnce() {
5453   InitializeScavengingVisitorsTables();
5454   NewSpaceScavenger::Initialize();
5455   MarkCompactCollector::Initialize();
5456 }
5457
5458
5459 bool Heap::SetUp() {
5460 #ifdef DEBUG
5461   allocation_timeout_ = FLAG_gc_interval;
5462 #endif
5463
5464   // Initialize heap spaces and initial maps and objects. Whenever something
5465   // goes wrong, just return false. The caller should check the results and
5466   // call Heap::TearDown() to release allocated memory.
5467   //
5468   // If the heap is not yet configured (e.g. through the API), configure it.
5469   // Configuration is based on the flags new-space-size (really the semispace
5470   // size) and old-space-size if set or the initial values of semispace_size_
5471   // and old_generation_size_ otherwise.
5472   if (!configured_) {
5473     if (!ConfigureHeapDefault()) return false;
5474   }
5475
5476   base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5477
5478   MarkMapPointersAsEncoded(false);
5479
5480   // Set up memory allocator.
5481   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5482     return false;
5483
5484   // Set up new space.
5485   if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5486     return false;
5487   }
5488   new_space_top_after_last_gc_ = new_space()->top();
5489
5490   // Initialize old pointer space.
5491   old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
5492                                     OLD_POINTER_SPACE, NOT_EXECUTABLE);
5493   if (old_pointer_space_ == NULL) return false;
5494   if (!old_pointer_space_->SetUp()) return false;
5495
5496   // Initialize old data space.
5497   old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
5498                                  NOT_EXECUTABLE);
5499   if (old_data_space_ == NULL) return false;
5500   if (!old_data_space_->SetUp()) return false;
5501
5502   if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5503
5504   // Initialize the code space, set its maximum capacity to the old
5505   // generation size. It needs executable memory.
5506   code_space_ =
5507       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5508   if (code_space_ == NULL) return false;
5509   if (!code_space_->SetUp()) return false;
5510
5511   // Initialize map space.
5512   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5513   if (map_space_ == NULL) return false;
5514   if (!map_space_->SetUp()) return false;
5515
5516   // Initialize simple cell space.
5517   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5518   if (cell_space_ == NULL) return false;
5519   if (!cell_space_->SetUp()) return false;
5520
5521   // Initialize global property cell space.
5522   property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5523                                                PROPERTY_CELL_SPACE);
5524   if (property_cell_space_ == NULL) return false;
5525   if (!property_cell_space_->SetUp()) return false;
5526
5527   // The large object code space may contain code or data.  We set the memory
5528   // to be non-executable here for safety, but this means we need to enable it
5529   // explicitly when allocating large code objects.
5530   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5531   if (lo_space_ == NULL) return false;
5532   if (!lo_space_->SetUp()) return false;
5533
5534   // Set up the seed that is used to randomize the string hash function.
5535   DCHECK(hash_seed() == 0);
5536   if (FLAG_randomize_hashes) {
5537     if (FLAG_hash_seed == 0) {
5538       int rnd = isolate()->random_number_generator()->NextInt();
5539       set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5540     } else {
5541       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5542     }
5543   }
5544
5545   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5546   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5547
5548   store_buffer()->SetUp();
5549
5550   mark_compact_collector()->SetUp();
5551
5552   return true;
5553 }
5554
5555
5556 bool Heap::CreateHeapObjects() {
5557   // Create initial maps.
5558   if (!CreateInitialMaps()) return false;
5559   CreateApiObjects();
5560
5561   // Create initial objects
5562   CreateInitialObjects();
5563   CHECK_EQ(0u, gc_count_);
5564
5565   set_native_contexts_list(undefined_value());
5566   set_array_buffers_list(undefined_value());
5567   set_allocation_sites_list(undefined_value());
5568   return true;
5569 }
5570
5571
5572 void Heap::SetStackLimits() {
5573   DCHECK(isolate_ != NULL);
5574   DCHECK(isolate_ == isolate());
5575   // On 64 bit machines, pointers are generally out of range of Smis.  We write
5576   // something that looks like an out of range Smi to the GC.
5577
5578   // Set up the special root array entries containing the stack limits.
5579   // These are actually addresses, but the tag makes the GC ignore it.
5580   roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5581       (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5582   roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5583       (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5584 }
5585
5586
5587 void Heap::NotifyDeserializationComplete() {
5588   deserialization_complete_ = true;
5589 #ifdef DEBUG
5590   // All pages right after bootstrapping must be marked as never-evacuate.
5591   PagedSpaces spaces(this);
5592   for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
5593     PageIterator it(s);
5594     while (it.has_next()) CHECK(it.next()->NeverEvacuate());
5595   }
5596 #endif  // DEBUG
5597 }
5598
5599
5600 void Heap::TearDown() {
5601 #ifdef VERIFY_HEAP
5602   if (FLAG_verify_heap) {
5603     Verify();
5604   }
5605 #endif
5606
5607   UpdateMaximumCommitted();
5608
5609   if (FLAG_print_cumulative_gc_stat) {
5610     PrintF("\n");
5611     PrintF("gc_count=%d ", gc_count_);
5612     PrintF("mark_sweep_count=%d ", ms_count_);
5613     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5614     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5615     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5616     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5617     PrintF("total_marking_time=%.1f ", tracer_.cumulative_marking_duration());
5618     PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5619     PrintF("\n\n");
5620   }
5621
5622   if (FLAG_print_max_heap_committed) {
5623     PrintF("\n");
5624     PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5625            MaximumCommittedMemory());
5626     PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5627            new_space_.MaximumCommittedMemory());
5628     PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5629            old_data_space_->MaximumCommittedMemory());
5630     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5631            old_pointer_space_->MaximumCommittedMemory());
5632     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5633            old_pointer_space_->MaximumCommittedMemory());
5634     PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5635            code_space_->MaximumCommittedMemory());
5636     PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5637            map_space_->MaximumCommittedMemory());
5638     PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5639            cell_space_->MaximumCommittedMemory());
5640     PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5641            property_cell_space_->MaximumCommittedMemory());
5642     PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5643            lo_space_->MaximumCommittedMemory());
5644     PrintF("\n\n");
5645   }
5646
5647   if (FLAG_verify_predictable) {
5648     PrintAlloctionsHash();
5649   }
5650
5651   TearDownArrayBuffers();
5652
5653   isolate_->global_handles()->TearDown();
5654
5655   external_string_table_.TearDown();
5656
5657   mark_compact_collector()->TearDown();
5658
5659   new_space_.TearDown();
5660
5661   if (old_pointer_space_ != NULL) {
5662     old_pointer_space_->TearDown();
5663     delete old_pointer_space_;
5664     old_pointer_space_ = NULL;
5665   }
5666
5667   if (old_data_space_ != NULL) {
5668     old_data_space_->TearDown();
5669     delete old_data_space_;
5670     old_data_space_ = NULL;
5671   }
5672
5673   if (code_space_ != NULL) {
5674     code_space_->TearDown();
5675     delete code_space_;
5676     code_space_ = NULL;
5677   }
5678
5679   if (map_space_ != NULL) {
5680     map_space_->TearDown();
5681     delete map_space_;
5682     map_space_ = NULL;
5683   }
5684
5685   if (cell_space_ != NULL) {
5686     cell_space_->TearDown();
5687     delete cell_space_;
5688     cell_space_ = NULL;
5689   }
5690
5691   if (property_cell_space_ != NULL) {
5692     property_cell_space_->TearDown();
5693     delete property_cell_space_;
5694     property_cell_space_ = NULL;
5695   }
5696
5697   if (lo_space_ != NULL) {
5698     lo_space_->TearDown();
5699     delete lo_space_;
5700     lo_space_ = NULL;
5701   }
5702
5703   store_buffer()->TearDown();
5704
5705   isolate_->memory_allocator()->TearDown();
5706 }
5707
5708
5709 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5710                                  GCType gc_type, bool pass_isolate) {
5711   DCHECK(callback != NULL);
5712   GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5713   DCHECK(!gc_prologue_callbacks_.Contains(pair));
5714   return gc_prologue_callbacks_.Add(pair);
5715 }
5716
5717
5718 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5719   DCHECK(callback != NULL);
5720   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5721     if (gc_prologue_callbacks_[i].callback == callback) {
5722       gc_prologue_callbacks_.Remove(i);
5723       return;
5724     }
5725   }
5726   UNREACHABLE();
5727 }
5728
5729
5730 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5731                                  GCType gc_type, bool pass_isolate) {
5732   DCHECK(callback != NULL);
5733   GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5734   DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5735   return gc_epilogue_callbacks_.Add(pair);
5736 }
5737
5738
5739 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5740   DCHECK(callback != NULL);
5741   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5742     if (gc_epilogue_callbacks_[i].callback == callback) {
5743       gc_epilogue_callbacks_.Remove(i);
5744       return;
5745     }
5746   }
5747   UNREACHABLE();
5748 }
5749
5750
5751 // TODO(ishell): Find a better place for this.
5752 void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
5753                                          Handle<DependentCode> dep) {
5754   DCHECK(!InNewSpace(*obj));
5755   DCHECK(!InNewSpace(*dep));
5756   Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
5757   table = WeakHashTable::Put(table, obj, dep);
5758   if (*table != weak_object_to_code_table())
5759     set_weak_object_to_code_table(*table);
5760   DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
5761 }
5762
5763
5764 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
5765   Object* dep = weak_object_to_code_table()->Lookup(obj);
5766   if (dep->IsDependentCode()) return DependentCode::cast(dep);
5767   return DependentCode::cast(empty_fixed_array());
5768 }
5769
5770
5771 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5772   v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5773 }
5774
5775 #ifdef DEBUG
5776
5777 class PrintHandleVisitor : public ObjectVisitor {
5778  public:
5779   void VisitPointers(Object** start, Object** end) {
5780     for (Object** p = start; p < end; p++)
5781       PrintF("  handle %p to %p\n", reinterpret_cast<void*>(p),
5782              reinterpret_cast<void*>(*p));
5783   }
5784 };
5785
5786
5787 void Heap::PrintHandles() {
5788   PrintF("Handles:\n");
5789   PrintHandleVisitor v;
5790   isolate_->handle_scope_implementer()->Iterate(&v);
5791 }
5792
5793 #endif
5794
5795
5796 Space* AllSpaces::next() {
5797   switch (counter_++) {
5798     case NEW_SPACE:
5799       return heap_->new_space();
5800     case OLD_POINTER_SPACE:
5801       return heap_->old_pointer_space();
5802     case OLD_DATA_SPACE:
5803       return heap_->old_data_space();
5804     case CODE_SPACE:
5805       return heap_->code_space();
5806     case MAP_SPACE:
5807       return heap_->map_space();
5808     case CELL_SPACE:
5809       return heap_->cell_space();
5810     case PROPERTY_CELL_SPACE:
5811       return heap_->property_cell_space();
5812     case LO_SPACE:
5813       return heap_->lo_space();
5814     default:
5815       return NULL;
5816   }
5817 }
5818
5819
5820 PagedSpace* PagedSpaces::next() {
5821   switch (counter_++) {
5822     case OLD_POINTER_SPACE:
5823       return heap_->old_pointer_space();
5824     case OLD_DATA_SPACE:
5825       return heap_->old_data_space();
5826     case CODE_SPACE:
5827       return heap_->code_space();
5828     case MAP_SPACE:
5829       return heap_->map_space();
5830     case CELL_SPACE:
5831       return heap_->cell_space();
5832     case PROPERTY_CELL_SPACE:
5833       return heap_->property_cell_space();
5834     default:
5835       return NULL;
5836   }
5837 }
5838
5839
5840 OldSpace* OldSpaces::next() {
5841   switch (counter_++) {
5842     case OLD_POINTER_SPACE:
5843       return heap_->old_pointer_space();
5844     case OLD_DATA_SPACE:
5845       return heap_->old_data_space();
5846     case CODE_SPACE:
5847       return heap_->code_space();
5848     default:
5849       return NULL;
5850   }
5851 }
5852
5853
5854 SpaceIterator::SpaceIterator(Heap* heap)
5855     : heap_(heap),
5856       current_space_(FIRST_SPACE),
5857       iterator_(NULL),
5858       size_func_(NULL) {}
5859
5860
5861 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5862     : heap_(heap),
5863       current_space_(FIRST_SPACE),
5864       iterator_(NULL),
5865       size_func_(size_func) {}
5866
5867
5868 SpaceIterator::~SpaceIterator() {
5869   // Delete active iterator if any.
5870   delete iterator_;
5871 }
5872
5873
5874 bool SpaceIterator::has_next() {
5875   // Iterate until no more spaces.
5876   return current_space_ != LAST_SPACE;
5877 }
5878
5879
5880 ObjectIterator* SpaceIterator::next() {
5881   if (iterator_ != NULL) {
5882     delete iterator_;
5883     iterator_ = NULL;
5884     // Move to the next space
5885     current_space_++;
5886     if (current_space_ > LAST_SPACE) {
5887       return NULL;
5888     }
5889   }
5890
5891   // Return iterator for the new current space.
5892   return CreateIterator();
5893 }
5894
5895
5896 // Create an iterator for the space to iterate.
5897 ObjectIterator* SpaceIterator::CreateIterator() {
5898   DCHECK(iterator_ == NULL);
5899
5900   switch (current_space_) {
5901     case NEW_SPACE:
5902       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5903       break;
5904     case OLD_POINTER_SPACE:
5905       iterator_ =
5906           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5907       break;
5908     case OLD_DATA_SPACE:
5909       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5910       break;
5911     case CODE_SPACE:
5912       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5913       break;
5914     case MAP_SPACE:
5915       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5916       break;
5917     case CELL_SPACE:
5918       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5919       break;
5920     case PROPERTY_CELL_SPACE:
5921       iterator_ =
5922           new HeapObjectIterator(heap_->property_cell_space(), size_func_);
5923       break;
5924     case LO_SPACE:
5925       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5926       break;
5927   }
5928
5929   // Return the newly allocated iterator;
5930   DCHECK(iterator_ != NULL);
5931   return iterator_;
5932 }
5933
5934
5935 class HeapObjectsFilter {
5936  public:
5937   virtual ~HeapObjectsFilter() {}
5938   virtual bool SkipObject(HeapObject* object) = 0;
5939 };
5940
5941
5942 class UnreachableObjectsFilter : public HeapObjectsFilter {
5943  public:
5944   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5945     MarkReachableObjects();
5946   }
5947
5948   ~UnreachableObjectsFilter() {
5949     heap_->mark_compact_collector()->ClearMarkbits();
5950   }
5951
5952   bool SkipObject(HeapObject* object) {
5953     MarkBit mark_bit = Marking::MarkBitFrom(object);
5954     return !mark_bit.Get();
5955   }
5956
5957  private:
5958   class MarkingVisitor : public ObjectVisitor {
5959    public:
5960     MarkingVisitor() : marking_stack_(10) {}
5961
5962     void VisitPointers(Object** start, Object** end) {
5963       for (Object** p = start; p < end; p++) {
5964         if (!(*p)->IsHeapObject()) continue;
5965         HeapObject* obj = HeapObject::cast(*p);
5966         MarkBit mark_bit = Marking::MarkBitFrom(obj);
5967         if (!mark_bit.Get()) {
5968           mark_bit.Set();
5969           marking_stack_.Add(obj);
5970         }
5971       }
5972     }
5973
5974     void TransitiveClosure() {
5975       while (!marking_stack_.is_empty()) {
5976         HeapObject* obj = marking_stack_.RemoveLast();
5977         obj->Iterate(this);
5978       }
5979     }
5980
5981    private:
5982     List<HeapObject*> marking_stack_;
5983   };
5984
5985   void MarkReachableObjects() {
5986     MarkingVisitor visitor;
5987     heap_->IterateRoots(&visitor, VISIT_ALL);
5988     visitor.TransitiveClosure();
5989   }
5990
5991   Heap* heap_;
5992   DisallowHeapAllocation no_allocation_;
5993 };
5994
5995
5996 HeapIterator::HeapIterator(Heap* heap)
5997     : make_heap_iterable_helper_(heap),
5998       no_heap_allocation_(),
5999       heap_(heap),
6000       filtering_(HeapIterator::kNoFiltering),
6001       filter_(NULL) {
6002   Init();
6003 }
6004
6005
6006 HeapIterator::HeapIterator(Heap* heap,
6007                            HeapIterator::HeapObjectsFiltering filtering)
6008     : make_heap_iterable_helper_(heap),
6009       no_heap_allocation_(),
6010       heap_(heap),
6011       filtering_(filtering),
6012       filter_(NULL) {
6013   Init();
6014 }
6015
6016
6017 HeapIterator::~HeapIterator() { Shutdown(); }
6018
6019
6020 void HeapIterator::Init() {
6021   // Start the iteration.
6022   space_iterator_ = new SpaceIterator(heap_);
6023   switch (filtering_) {
6024     case kFilterUnreachable:
6025       filter_ = new UnreachableObjectsFilter(heap_);
6026       break;
6027     default:
6028       break;
6029   }
6030   object_iterator_ = space_iterator_->next();
6031 }
6032
6033
6034 void HeapIterator::Shutdown() {
6035 #ifdef DEBUG
6036   // Assert that in filtering mode we have iterated through all
6037   // objects. Otherwise, heap will be left in an inconsistent state.
6038   if (filtering_ != kNoFiltering) {
6039     DCHECK(object_iterator_ == NULL);
6040   }
6041 #endif
6042   // Make sure the last iterator is deallocated.
6043   delete space_iterator_;
6044   space_iterator_ = NULL;
6045   object_iterator_ = NULL;
6046   delete filter_;
6047   filter_ = NULL;
6048 }
6049
6050
6051 HeapObject* HeapIterator::next() {
6052   if (filter_ == NULL) return NextObject();
6053
6054   HeapObject* obj = NextObject();
6055   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6056   return obj;
6057 }
6058
6059
6060 HeapObject* HeapIterator::NextObject() {
6061   // No iterator means we are done.
6062   if (object_iterator_ == NULL) return NULL;
6063
6064   if (HeapObject* obj = object_iterator_->next_object()) {
6065     // If the current iterator has more objects we are fine.
6066     return obj;
6067   } else {
6068     // Go though the spaces looking for one that has objects.
6069     while (space_iterator_->has_next()) {
6070       object_iterator_ = space_iterator_->next();
6071       if (HeapObject* obj = object_iterator_->next_object()) {
6072         return obj;
6073       }
6074     }
6075   }
6076   // Done with the last space.
6077   object_iterator_ = NULL;
6078   return NULL;
6079 }
6080
6081
6082 void HeapIterator::reset() {
6083   // Restart the iterator.
6084   Shutdown();
6085   Init();
6086 }
6087
6088
6089 #ifdef DEBUG
6090
6091 Object* const PathTracer::kAnyGlobalObject = NULL;
6092
6093 class PathTracer::MarkVisitor : public ObjectVisitor {
6094  public:
6095   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6096   void VisitPointers(Object** start, Object** end) {
6097     // Scan all HeapObject pointers in [start, end)
6098     for (Object** p = start; !tracer_->found() && (p < end); p++) {
6099       if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
6100     }
6101   }
6102
6103  private:
6104   PathTracer* tracer_;
6105 };
6106
6107
6108 class PathTracer::UnmarkVisitor : public ObjectVisitor {
6109  public:
6110   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6111   void VisitPointers(Object** start, Object** end) {
6112     // Scan all HeapObject pointers in [start, end)
6113     for (Object** p = start; p < end; p++) {
6114       if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
6115     }
6116   }
6117
6118  private:
6119   PathTracer* tracer_;
6120 };
6121
6122
6123 void PathTracer::VisitPointers(Object** start, Object** end) {
6124   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
6125   // Visit all HeapObject pointers in [start, end)
6126   for (Object** p = start; !done && (p < end); p++) {
6127     if ((*p)->IsHeapObject()) {
6128       TracePathFrom(p);
6129       done = ((what_to_find_ == FIND_FIRST) && found_target_);
6130     }
6131   }
6132 }
6133
6134
6135 void PathTracer::Reset() {
6136   found_target_ = false;
6137   object_stack_.Clear();
6138 }
6139
6140
6141 void PathTracer::TracePathFrom(Object** root) {
6142   DCHECK((search_target_ == kAnyGlobalObject) ||
6143          search_target_->IsHeapObject());
6144   found_target_in_trace_ = false;
6145   Reset();
6146
6147   MarkVisitor mark_visitor(this);
6148   MarkRecursively(root, &mark_visitor);
6149
6150   UnmarkVisitor unmark_visitor(this);
6151   UnmarkRecursively(root, &unmark_visitor);
6152
6153   ProcessResults();
6154 }
6155
6156
6157 static bool SafeIsNativeContext(HeapObject* obj) {
6158   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
6159 }
6160
6161
6162 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6163   if (!(*p)->IsHeapObject()) return;
6164
6165   HeapObject* obj = HeapObject::cast(*p);
6166
6167   MapWord map_word = obj->map_word();
6168   if (!map_word.ToMap()->IsHeapObject()) return;  // visited before
6169
6170   if (found_target_in_trace_) return;  // stop if target found
6171   object_stack_.Add(obj);
6172   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6173       (obj == search_target_)) {
6174     found_target_in_trace_ = true;
6175     found_target_ = true;
6176     return;
6177   }
6178
6179   bool is_native_context = SafeIsNativeContext(obj);
6180
6181   // not visited yet
6182   Map* map = Map::cast(map_word.ToMap());
6183
6184   MapWord marked_map_word =
6185       MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
6186   obj->set_map_word(marked_map_word);
6187
6188   // Scan the object body.
6189   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6190     // This is specialized to scan Context's properly.
6191     Object** start =
6192         reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
6193     Object** end =
6194         reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
6195                                    Context::FIRST_WEAK_SLOT * kPointerSize);
6196     mark_visitor->VisitPointers(start, end);
6197   } else {
6198     obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
6199   }
6200
6201   // Scan the map after the body because the body is a lot more interesting
6202   // when doing leak detection.
6203   MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
6204
6205   if (!found_target_in_trace_) {  // don't pop if found the target
6206     object_stack_.RemoveLast();
6207   }
6208 }
6209
6210
6211 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6212   if (!(*p)->IsHeapObject()) return;
6213
6214   HeapObject* obj = HeapObject::cast(*p);
6215
6216   MapWord map_word = obj->map_word();
6217   if (map_word.ToMap()->IsHeapObject()) return;  // unmarked already
6218
6219   MapWord unmarked_map_word =
6220       MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
6221   obj->set_map_word(unmarked_map_word);
6222
6223   Map* map = Map::cast(unmarked_map_word.ToMap());
6224
6225   UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
6226
6227   obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
6228 }
6229
6230
6231 void PathTracer::ProcessResults() {
6232   if (found_target_) {
6233     OFStream os(stdout);
6234     os << "=====================================\n"
6235        << "====        Path to object       ====\n"
6236        << "=====================================\n\n";
6237
6238     DCHECK(!object_stack_.is_empty());
6239     for (int i = 0; i < object_stack_.length(); i++) {
6240       if (i > 0) os << "\n     |\n     |\n     V\n\n";
6241       object_stack_[i]->Print(os);
6242     }
6243     os << "=====================================\n";
6244   }
6245 }
6246
6247
6248 // Triggers a depth-first traversal of reachable objects from one
6249 // given root object and finds a path to a specific heap object and
6250 // prints it.
6251 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
6252   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6253   tracer.VisitPointer(&root);
6254 }
6255
6256
6257 // Triggers a depth-first traversal of reachable objects from roots
6258 // and finds a path to a specific heap object and prints it.
6259 void Heap::TracePathToObject(Object* target) {
6260   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6261   IterateRoots(&tracer, VISIT_ONLY_STRONG);
6262 }
6263
6264
6265 // Triggers a depth-first traversal of reachable objects from roots
6266 // and finds a path to any global object and prints it. Useful for
6267 // determining the source for leaks of global objects.
6268 void Heap::TracePathToGlobal() {
6269   PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
6270                     VISIT_ALL);
6271   IterateRoots(&tracer, VISIT_ONLY_STRONG);
6272 }
6273 #endif
6274
6275
6276 void Heap::UpdateCumulativeGCStatistics(double duration,
6277                                         double spent_in_mutator,
6278                                         double marking_time) {
6279   if (FLAG_print_cumulative_gc_stat) {
6280     total_gc_time_ms_ += duration;
6281     max_gc_pause_ = Max(max_gc_pause_, duration);
6282     max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
6283     min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
6284   } else if (FLAG_trace_gc_verbose) {
6285     total_gc_time_ms_ += duration;
6286   }
6287
6288   marking_time_ += marking_time;
6289 }
6290
6291
6292 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
6293   DisallowHeapAllocation no_gc;
6294   // Uses only lower 32 bits if pointers are larger.
6295   uintptr_t addr_hash =
6296       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
6297   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6298 }
6299
6300
6301 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
6302   DisallowHeapAllocation no_gc;
6303   int index = (Hash(map, name) & kHashMask);
6304   for (int i = 0; i < kEntriesPerBucket; i++) {
6305     Key& key = keys_[index + i];
6306     if ((key.map == *map) && key.name->Equals(*name)) {
6307       return field_offsets_[index + i];
6308     }
6309   }
6310   return kNotFound;
6311 }
6312
6313
6314 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
6315                               int field_offset) {
6316   DisallowHeapAllocation no_gc;
6317   if (!name->IsUniqueName()) {
6318     if (!StringTable::InternalizeStringIfExists(
6319              name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
6320       return;
6321     }
6322   }
6323   // This cache is cleared only between mark compact passes, so we expect the
6324   // cache to only contain old space names.
6325   DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
6326
6327   int index = (Hash(map, name) & kHashMask);
6328   // After a GC there will be free slots, so we use them in order (this may
6329   // help to get the most frequently used one in position 0).
6330   for (int i = 0; i < kEntriesPerBucket; i++) {
6331     Key& key = keys_[index];
6332     Object* free_entry_indicator = NULL;
6333     if (key.map == free_entry_indicator) {
6334       key.map = *map;
6335       key.name = *name;
6336       field_offsets_[index + i] = field_offset;
6337       return;
6338     }
6339   }
6340   // No free entry found in this bucket, so we move them all down one and
6341   // put the new entry at position zero.
6342   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
6343     Key& key = keys_[index + i];
6344     Key& key2 = keys_[index + i - 1];
6345     key = key2;
6346     field_offsets_[index + i] = field_offsets_[index + i - 1];
6347   }
6348
6349   // Write the new first entry.
6350   Key& key = keys_[index];
6351   key.map = *map;
6352   key.name = *name;
6353   field_offsets_[index] = field_offset;
6354 }
6355
6356
6357 void KeyedLookupCache::Clear() {
6358   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6359 }
6360
6361
6362 void DescriptorLookupCache::Clear() {
6363   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6364 }
6365
6366
6367 void ExternalStringTable::CleanUp() {
6368   int last = 0;
6369   for (int i = 0; i < new_space_strings_.length(); ++i) {
6370     if (new_space_strings_[i] == heap_->the_hole_value()) {
6371       continue;
6372     }
6373     DCHECK(new_space_strings_[i]->IsExternalString());
6374     if (heap_->InNewSpace(new_space_strings_[i])) {
6375       new_space_strings_[last++] = new_space_strings_[i];
6376     } else {
6377       old_space_strings_.Add(new_space_strings_[i]);
6378     }
6379   }
6380   new_space_strings_.Rewind(last);
6381   new_space_strings_.Trim();
6382
6383   last = 0;
6384   for (int i = 0; i < old_space_strings_.length(); ++i) {
6385     if (old_space_strings_[i] == heap_->the_hole_value()) {
6386       continue;
6387     }
6388     DCHECK(old_space_strings_[i]->IsExternalString());
6389     DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6390     old_space_strings_[last++] = old_space_strings_[i];
6391   }
6392   old_space_strings_.Rewind(last);
6393   old_space_strings_.Trim();
6394 #ifdef VERIFY_HEAP
6395   if (FLAG_verify_heap) {
6396     Verify();
6397   }
6398 #endif
6399 }
6400
6401
6402 void ExternalStringTable::TearDown() {
6403   for (int i = 0; i < new_space_strings_.length(); ++i) {
6404     heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6405   }
6406   new_space_strings_.Free();
6407   for (int i = 0; i < old_space_strings_.length(); ++i) {
6408     heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6409   }
6410   old_space_strings_.Free();
6411 }
6412
6413
6414 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6415   chunk->set_next_chunk(chunks_queued_for_free_);
6416   chunks_queued_for_free_ = chunk;
6417 }
6418
6419
6420 void Heap::FreeQueuedChunks() {
6421   if (chunks_queued_for_free_ == NULL) return;
6422   MemoryChunk* next;
6423   MemoryChunk* chunk;
6424   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6425     next = chunk->next_chunk();
6426     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6427
6428     if (chunk->owner()->identity() == LO_SPACE) {
6429       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6430       // If FromAnyPointerAddress encounters a slot that belongs to a large
6431       // chunk queued for deletion it will fail to find the chunk because
6432       // it try to perform a search in the list of pages owned by of the large
6433       // object space and queued chunks were detached from that list.
6434       // To work around this we split large chunk into normal kPageSize aligned
6435       // pieces and initialize size, owner and flags field of every piece.
6436       // If FromAnyPointerAddress encounters a slot that belongs to one of
6437       // these smaller pieces it will treat it as a slot on a normal Page.
6438       Address chunk_end = chunk->address() + chunk->size();
6439       MemoryChunk* inner =
6440           MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
6441       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6442       while (inner <= inner_last) {
6443         // Size of a large chunk is always a multiple of
6444         // OS::AllocateAlignment() so there is always
6445         // enough space for a fake MemoryChunk header.
6446         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6447         // Guard against overflow.
6448         if (area_end < inner->address()) area_end = chunk_end;
6449         inner->SetArea(inner->address(), area_end);
6450         inner->set_size(Page::kPageSize);
6451         inner->set_owner(lo_space());
6452         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6453         inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6454       }
6455     }
6456   }
6457   isolate_->heap()->store_buffer()->Compact();
6458   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6459   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6460     next = chunk->next_chunk();
6461     isolate_->memory_allocator()->Free(chunk);
6462   }
6463   chunks_queued_for_free_ = NULL;
6464 }
6465
6466
6467 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6468   uintptr_t p = reinterpret_cast<uintptr_t>(page);
6469   // Tag the page pointer to make it findable in the dump file.
6470   if (compacted) {
6471     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
6472   } else {
6473     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
6474   }
6475   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6476       reinterpret_cast<Address>(p);
6477   remembered_unmapped_pages_index_++;
6478   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6479 }
6480
6481
6482 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6483   memset(object_counts_, 0, sizeof(object_counts_));
6484   memset(object_sizes_, 0, sizeof(object_sizes_));
6485   if (clear_last_time_stats) {
6486     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6487     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6488   }
6489 }
6490
6491
6492 static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6493
6494
6495 void Heap::CheckpointObjectStats() {
6496   base::LockGuard<base::Mutex> lock_guard(
6497       checkpoint_object_stats_mutex.Pointer());
6498   Counters* counters = isolate()->counters();
6499 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)              \
6500   counters->count_of_##name()->Increment(                \
6501       static_cast<int>(object_counts_[name]));           \
6502   counters->count_of_##name()->Decrement(                \
6503       static_cast<int>(object_counts_last_time_[name])); \
6504   counters->size_of_##name()->Increment(                 \
6505       static_cast<int>(object_sizes_[name]));            \
6506   counters->size_of_##name()->Decrement(                 \
6507       static_cast<int>(object_sizes_last_time_[name]));
6508   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6509 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6510   int index;
6511 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
6512   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
6513   counters->count_of_CODE_TYPE_##name()->Increment(       \
6514       static_cast<int>(object_counts_[index]));           \
6515   counters->count_of_CODE_TYPE_##name()->Decrement(       \
6516       static_cast<int>(object_counts_last_time_[index])); \
6517   counters->size_of_CODE_TYPE_##name()->Increment(        \
6518       static_cast<int>(object_sizes_[index]));            \
6519   counters->size_of_CODE_TYPE_##name()->Decrement(        \
6520       static_cast<int>(object_sizes_last_time_[index]));
6521   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6522 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6523 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
6524   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
6525   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
6526       static_cast<int>(object_counts_[index]));           \
6527   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
6528       static_cast<int>(object_counts_last_time_[index])); \
6529   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
6530       static_cast<int>(object_sizes_[index]));            \
6531   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
6532       static_cast<int>(object_sizes_last_time_[index]));
6533   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6534 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6535 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
6536   index =                                                                     \
6537       FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6538   counters->count_of_CODE_AGE_##name()->Increment(                            \
6539       static_cast<int>(object_counts_[index]));                               \
6540   counters->count_of_CODE_AGE_##name()->Decrement(                            \
6541       static_cast<int>(object_counts_last_time_[index]));                     \
6542   counters->size_of_CODE_AGE_##name()->Increment(                             \
6543       static_cast<int>(object_sizes_[index]));                                \
6544   counters->size_of_CODE_AGE_##name()->Decrement(                             \
6545       static_cast<int>(object_sizes_last_time_[index]));
6546   CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6547 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6548
6549   MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6550   MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6551   ClearObjectStats();
6552 }
6553 }
6554 }  // namespace v8::internal