deps: update v8 to 4.3.61.21
[platform/upstream/nodejs.git] / deps / v8 / src / heap / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/base/bits.h"
10 #include "src/base/once.h"
11 #include "src/base/utils/random-number-generator.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/compilation-cache.h"
15 #include "src/conversions.h"
16 #include "src/cpu-profiler.h"
17 #include "src/debug.h"
18 #include "src/deoptimizer.h"
19 #include "src/global-handles.h"
20 #include "src/heap/gc-idle-time-handler.h"
21 #include "src/heap/incremental-marking.h"
22 #include "src/heap/mark-compact.h"
23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/objects-visiting.h"
25 #include "src/heap/store-buffer.h"
26 #include "src/heap-profiler.h"
27 #include "src/isolate-inl.h"
28 #include "src/runtime-profiler.h"
29 #include "src/scopeinfo.h"
30 #include "src/snapshot/natives.h"
31 #include "src/snapshot/serialize.h"
32 #include "src/snapshot/snapshot.h"
33 #include "src/utils.h"
34 #include "src/v8threads.h"
35 #include "src/vm-state-inl.h"
36
37 #if V8_TARGET_ARCH_PPC && !V8_INTERPRETED_REGEXP
38 #include "src/regexp-macro-assembler.h"          // NOLINT
39 #include "src/ppc/regexp-macro-assembler-ppc.h"  // NOLINT
40 #endif
41 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
42 #include "src/regexp-macro-assembler.h"          // NOLINT
43 #include "src/arm/regexp-macro-assembler-arm.h"  // NOLINT
44 #endif
45 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
46 #include "src/regexp-macro-assembler.h"            // NOLINT
47 #include "src/mips/regexp-macro-assembler-mips.h"  // NOLINT
48 #endif
49 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
50 #include "src/regexp-macro-assembler.h"
51 #include "src/mips64/regexp-macro-assembler-mips64.h"
52 #endif
53
54 namespace v8 {
55 namespace internal {
56
57
58 Heap::Heap()
59     : amount_of_external_allocated_memory_(0),
60       amount_of_external_allocated_memory_at_last_global_gc_(0),
61       isolate_(NULL),
62       code_range_size_(0),
63       // semispace_size_ should be a power of 2 and old_generation_size_ should
64       // be a multiple of Page::kPageSize.
65       reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
66       max_semi_space_size_(8 * (kPointerSize / 4) * MB),
67       initial_semispace_size_(Page::kPageSize),
68       target_semispace_size_(Page::kPageSize),
69       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
70       initial_old_generation_size_(max_old_generation_size_ /
71                                    kInitalOldGenerationLimitFactor),
72       old_generation_size_configured_(false),
73       max_executable_size_(256ul * (kPointerSize / 4) * MB),
74       // Variables set based on semispace_size_ and old_generation_size_ in
75       // ConfigureHeap.
76       // Will be 4 * reserved_semispace_size_ to ensure that young
77       // generation can be aligned to its size.
78       maximum_committed_(0),
79       survived_since_last_expansion_(0),
80       survived_last_scavenge_(0),
81       sweep_generation_(0),
82       always_allocate_scope_depth_(0),
83       contexts_disposed_(0),
84       global_ic_age_(0),
85       scan_on_scavenge_pages_(0),
86       new_space_(this),
87       old_pointer_space_(NULL),
88       old_data_space_(NULL),
89       code_space_(NULL),
90       map_space_(NULL),
91       cell_space_(NULL),
92       lo_space_(NULL),
93       gc_state_(NOT_IN_GC),
94       gc_post_processing_depth_(0),
95       allocations_count_(0),
96       raw_allocations_hash_(0),
97       dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
98       ms_count_(0),
99       gc_count_(0),
100       remembered_unmapped_pages_index_(0),
101       unflattened_strings_length_(0),
102 #ifdef DEBUG
103       allocation_timeout_(0),
104 #endif  // DEBUG
105       old_generation_allocation_limit_(initial_old_generation_size_),
106       idle_old_generation_allocation_limit_(
107           kMinimumOldGenerationAllocationLimit),
108       old_gen_exhausted_(false),
109       inline_allocation_disabled_(false),
110       store_buffer_rebuilder_(store_buffer()),
111       hidden_string_(NULL),
112       gc_safe_size_of_old_object_(NULL),
113       total_regexp_code_generated_(0),
114       tracer_(this),
115       high_survival_rate_period_length_(0),
116       promoted_objects_size_(0),
117       promotion_ratio_(0),
118       semi_space_copied_object_size_(0),
119       previous_semi_space_copied_object_size_(0),
120       semi_space_copied_rate_(0),
121       nodes_died_in_new_space_(0),
122       nodes_copied_in_new_space_(0),
123       nodes_promoted_(0),
124       maximum_size_scavenges_(0),
125       max_gc_pause_(0.0),
126       total_gc_time_ms_(0.0),
127       max_alive_after_gc_(0),
128       min_in_mutator_(kMaxInt),
129       marking_time_(0.0),
130       sweeping_time_(0.0),
131       last_idle_notification_time_(0.0),
132       mark_compact_collector_(this),
133       store_buffer_(this),
134       marking_(this),
135       incremental_marking_(this),
136       gc_count_at_last_idle_gc_(0),
137       full_codegen_bytes_generated_(0),
138       crankshaft_codegen_bytes_generated_(0),
139       gcs_since_last_deopt_(0),
140       allocation_sites_scratchpad_length_(0),
141       promotion_queue_(this),
142       configured_(false),
143       external_string_table_(this),
144       chunks_queued_for_free_(NULL),
145       gc_callbacks_depth_(0),
146       deserialization_complete_(false),
147       concurrent_sweeping_enabled_(false),
148       migration_failure_(false),
149       previous_migration_failure_(false) {
150 // Allow build-time customization of the max semispace size. Building
151 // V8 with snapshots and a non-default max semispace size is much
152 // easier if you can define it as part of the build environment.
153 #if defined(V8_MAX_SEMISPACE_SIZE)
154   max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
155 #endif
156
157   // Ensure old_generation_size_ is a multiple of kPageSize.
158   DCHECK(MB >= Page::kPageSize);
159
160   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
161   set_native_contexts_list(NULL);
162   set_array_buffers_list(Smi::FromInt(0));
163   set_allocation_sites_list(Smi::FromInt(0));
164   set_encountered_weak_collections(Smi::FromInt(0));
165   set_encountered_weak_cells(Smi::FromInt(0));
166   // Put a dummy entry in the remembered pages so we can find the list the
167   // minidump even if there are no real unmapped pages.
168   RememberUnmappedPage(NULL, false);
169
170   ClearObjectStats(true);
171 }
172
173
174 intptr_t Heap::Capacity() {
175   if (!HasBeenSetUp()) return 0;
176
177   return new_space_.Capacity() + old_pointer_space_->Capacity() +
178          old_data_space_->Capacity() + code_space_->Capacity() +
179          map_space_->Capacity() + cell_space_->Capacity();
180 }
181
182
183 intptr_t Heap::CommittedOldGenerationMemory() {
184   if (!HasBeenSetUp()) return 0;
185
186   return old_pointer_space_->CommittedMemory() +
187          old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
188          map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
189          lo_space_->Size();
190 }
191
192
193 intptr_t Heap::CommittedMemory() {
194   if (!HasBeenSetUp()) return 0;
195
196   return new_space_.CommittedMemory() + CommittedOldGenerationMemory();
197 }
198
199
200 size_t Heap::CommittedPhysicalMemory() {
201   if (!HasBeenSetUp()) return 0;
202
203   return new_space_.CommittedPhysicalMemory() +
204          old_pointer_space_->CommittedPhysicalMemory() +
205          old_data_space_->CommittedPhysicalMemory() +
206          code_space_->CommittedPhysicalMemory() +
207          map_space_->CommittedPhysicalMemory() +
208          cell_space_->CommittedPhysicalMemory() +
209          lo_space_->CommittedPhysicalMemory();
210 }
211
212
213 intptr_t Heap::CommittedMemoryExecutable() {
214   if (!HasBeenSetUp()) return 0;
215
216   return isolate()->memory_allocator()->SizeExecutable();
217 }
218
219
220 void Heap::UpdateMaximumCommitted() {
221   if (!HasBeenSetUp()) return;
222
223   intptr_t current_committed_memory = CommittedMemory();
224   if (current_committed_memory > maximum_committed_) {
225     maximum_committed_ = current_committed_memory;
226   }
227 }
228
229
230 intptr_t Heap::Available() {
231   if (!HasBeenSetUp()) return 0;
232
233   return new_space_.Available() + old_pointer_space_->Available() +
234          old_data_space_->Available() + code_space_->Available() +
235          map_space_->Available() + cell_space_->Available();
236 }
237
238
239 bool Heap::HasBeenSetUp() {
240   return old_pointer_space_ != NULL && old_data_space_ != NULL &&
241          code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
242          lo_space_ != NULL;
243 }
244
245
246 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
247   if (IntrusiveMarking::IsMarked(object)) {
248     return IntrusiveMarking::SizeOfMarkedObject(object);
249   }
250   return object->SizeFromMap(object->map());
251 }
252
253
254 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
255                                               const char** reason) {
256   // Is global GC requested?
257   if (space != NEW_SPACE) {
258     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
259     *reason = "GC in old space requested";
260     return MARK_COMPACTOR;
261   }
262
263   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
264     *reason = "GC in old space forced by flags";
265     return MARK_COMPACTOR;
266   }
267
268   // Is enough data promoted to justify a global GC?
269   if (OldGenerationAllocationLimitReached()) {
270     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
271     *reason = "promotion limit reached";
272     return MARK_COMPACTOR;
273   }
274
275   // Have allocation in OLD and LO failed?
276   if (old_gen_exhausted_) {
277     isolate_->counters()
278         ->gc_compactor_caused_by_oldspace_exhaustion()
279         ->Increment();
280     *reason = "old generations exhausted";
281     return MARK_COMPACTOR;
282   }
283
284   // Is there enough space left in OLD to guarantee that a scavenge can
285   // succeed?
286   //
287   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
288   // for object promotion. It counts only the bytes that the memory
289   // allocator has not yet allocated from the OS and assigned to any space,
290   // and does not count available bytes already in the old space or code
291   // space.  Undercounting is safe---we may get an unrequested full GC when
292   // a scavenge would have succeeded.
293   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
294     isolate_->counters()
295         ->gc_compactor_caused_by_oldspace_exhaustion()
296         ->Increment();
297     *reason = "scavenge might not succeed";
298     return MARK_COMPACTOR;
299   }
300
301   // Default
302   *reason = NULL;
303   return SCAVENGER;
304 }
305
306
307 // TODO(1238405): Combine the infrastructure for --heap-stats and
308 // --log-gc to avoid the complicated preprocessor and flag testing.
309 void Heap::ReportStatisticsBeforeGC() {
310 // Heap::ReportHeapStatistics will also log NewSpace statistics when
311 // compiled --log-gc is set.  The following logic is used to avoid
312 // double logging.
313 #ifdef DEBUG
314   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
315   if (FLAG_heap_stats) {
316     ReportHeapStatistics("Before GC");
317   } else if (FLAG_log_gc) {
318     new_space_.ReportStatistics();
319   }
320   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
321 #else
322   if (FLAG_log_gc) {
323     new_space_.CollectStatistics();
324     new_space_.ReportStatistics();
325     new_space_.ClearHistograms();
326   }
327 #endif  // DEBUG
328 }
329
330
331 void Heap::PrintShortHeapStatistics() {
332   if (!FLAG_trace_gc_verbose) return;
333   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX
334            "d KB"
335            ", available: %6" V8_PTR_PREFIX "d KB\n",
336            isolate_->memory_allocator()->Size() / KB,
337            isolate_->memory_allocator()->Available() / KB);
338   PrintPID("New space,          used: %6" V8_PTR_PREFIX
339            "d KB"
340            ", available: %6" V8_PTR_PREFIX
341            "d KB"
342            ", committed: %6" V8_PTR_PREFIX "d KB\n",
343            new_space_.Size() / KB, new_space_.Available() / KB,
344            new_space_.CommittedMemory() / KB);
345   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX
346            "d KB"
347            ", available: %6" V8_PTR_PREFIX
348            "d KB"
349            ", committed: %6" V8_PTR_PREFIX "d KB\n",
350            old_pointer_space_->SizeOfObjects() / KB,
351            old_pointer_space_->Available() / KB,
352            old_pointer_space_->CommittedMemory() / KB);
353   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX
354            "d KB"
355            ", available: %6" V8_PTR_PREFIX
356            "d KB"
357            ", committed: %6" V8_PTR_PREFIX "d KB\n",
358            old_data_space_->SizeOfObjects() / KB,
359            old_data_space_->Available() / KB,
360            old_data_space_->CommittedMemory() / KB);
361   PrintPID("Code space,         used: %6" V8_PTR_PREFIX
362            "d KB"
363            ", available: %6" V8_PTR_PREFIX
364            "d KB"
365            ", committed: %6" V8_PTR_PREFIX "d KB\n",
366            code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
367            code_space_->CommittedMemory() / KB);
368   PrintPID("Map space,          used: %6" V8_PTR_PREFIX
369            "d KB"
370            ", available: %6" V8_PTR_PREFIX
371            "d KB"
372            ", committed: %6" V8_PTR_PREFIX "d KB\n",
373            map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
374            map_space_->CommittedMemory() / KB);
375   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX
376            "d KB"
377            ", available: %6" V8_PTR_PREFIX
378            "d KB"
379            ", committed: %6" V8_PTR_PREFIX "d KB\n",
380            cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
381            cell_space_->CommittedMemory() / KB);
382   PrintPID("Large object space, used: %6" V8_PTR_PREFIX
383            "d KB"
384            ", available: %6" V8_PTR_PREFIX
385            "d KB"
386            ", committed: %6" V8_PTR_PREFIX "d KB\n",
387            lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
388            lo_space_->CommittedMemory() / KB);
389   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX
390            "d KB"
391            ", available: %6" V8_PTR_PREFIX
392            "d KB"
393            ", committed: %6" V8_PTR_PREFIX "d KB\n",
394            this->SizeOfObjects() / KB, this->Available() / KB,
395            this->CommittedMemory() / KB);
396   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
397            static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
398   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
399 }
400
401
402 // TODO(1238405): Combine the infrastructure for --heap-stats and
403 // --log-gc to avoid the complicated preprocessor and flag testing.
404 void Heap::ReportStatisticsAfterGC() {
405 // Similar to the before GC, we use some complicated logic to ensure that
406 // NewSpace statistics are logged exactly once when --log-gc is turned on.
407 #if defined(DEBUG)
408   if (FLAG_heap_stats) {
409     new_space_.CollectStatistics();
410     ReportHeapStatistics("After GC");
411   } else if (FLAG_log_gc) {
412     new_space_.ReportStatistics();
413   }
414 #else
415   if (FLAG_log_gc) new_space_.ReportStatistics();
416 #endif  // DEBUG
417 }
418
419
420 void Heap::GarbageCollectionPrologue() {
421   {
422     AllowHeapAllocation for_the_first_part_of_prologue;
423     ClearJSFunctionResultCaches();
424     gc_count_++;
425     unflattened_strings_length_ = 0;
426
427     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
428       mark_compact_collector()->EnableCodeFlushing(true);
429     }
430
431 #ifdef VERIFY_HEAP
432     if (FLAG_verify_heap) {
433       Verify();
434     }
435 #endif
436   }
437
438   // Reset GC statistics.
439   promoted_objects_size_ = 0;
440   previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
441   semi_space_copied_object_size_ = 0;
442   nodes_died_in_new_space_ = 0;
443   nodes_copied_in_new_space_ = 0;
444   nodes_promoted_ = 0;
445
446   UpdateMaximumCommitted();
447
448 #ifdef DEBUG
449   DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
450
451   if (FLAG_gc_verbose) Print();
452
453   ReportStatisticsBeforeGC();
454 #endif  // DEBUG
455
456   store_buffer()->GCPrologue();
457
458   if (isolate()->concurrent_osr_enabled()) {
459     isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
460   }
461
462   if (new_space_.IsAtMaximumCapacity()) {
463     maximum_size_scavenges_++;
464   } else {
465     maximum_size_scavenges_ = 0;
466   }
467   CheckNewSpaceExpansionCriteria();
468 }
469
470
471 intptr_t Heap::SizeOfObjects() {
472   intptr_t total = 0;
473   AllSpaces spaces(this);
474   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
475     total += space->SizeOfObjects();
476   }
477   return total;
478 }
479
480
481 void Heap::ClearAllICsByKind(Code::Kind kind) {
482   HeapObjectIterator it(code_space());
483
484   for (Object* object = it.Next(); object != NULL; object = it.Next()) {
485     Code* code = Code::cast(object);
486     Code::Kind current_kind = code->kind();
487     if (current_kind == Code::FUNCTION ||
488         current_kind == Code::OPTIMIZED_FUNCTION) {
489       code->ClearInlineCaches(kind);
490     }
491   }
492 }
493
494
495 void Heap::RepairFreeListsAfterDeserialization() {
496   PagedSpaces spaces(this);
497   for (PagedSpace* space = spaces.next(); space != NULL;
498        space = spaces.next()) {
499     space->RepairFreeListsAfterDeserialization();
500   }
501 }
502
503
504 void Heap::ProcessPretenuringFeedback() {
505   if (FLAG_allocation_site_pretenuring) {
506     int tenure_decisions = 0;
507     int dont_tenure_decisions = 0;
508     int allocation_mementos_found = 0;
509     int allocation_sites = 0;
510     int active_allocation_sites = 0;
511
512     // If the scratchpad overflowed, we have to iterate over the allocation
513     // sites list.
514     // TODO(hpayer): We iterate over the whole list of allocation sites when
515     // we grew to the maximum semi-space size to deopt maybe tenured
516     // allocation sites. We could hold the maybe tenured allocation sites
517     // in a seperate data structure if this is a performance problem.
518     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
519     bool use_scratchpad =
520         allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
521         !deopt_maybe_tenured;
522
523     int i = 0;
524     Object* list_element = allocation_sites_list();
525     bool trigger_deoptimization = false;
526     bool maximum_size_scavenge = MaximumSizeScavenge();
527     while (use_scratchpad ? i < allocation_sites_scratchpad_length_
528                           : list_element->IsAllocationSite()) {
529       AllocationSite* site =
530           use_scratchpad
531               ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
532               : AllocationSite::cast(list_element);
533       allocation_mementos_found += site->memento_found_count();
534       if (site->memento_found_count() > 0) {
535         active_allocation_sites++;
536         if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
537           trigger_deoptimization = true;
538         }
539         if (site->GetPretenureMode() == TENURED) {
540           tenure_decisions++;
541         } else {
542           dont_tenure_decisions++;
543         }
544         allocation_sites++;
545       }
546
547       if (deopt_maybe_tenured && site->IsMaybeTenure()) {
548         site->set_deopt_dependent_code(true);
549         trigger_deoptimization = true;
550       }
551
552       if (use_scratchpad) {
553         i++;
554       } else {
555         list_element = site->weak_next();
556       }
557     }
558
559     if (trigger_deoptimization) {
560       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
561     }
562
563     FlushAllocationSitesScratchpad();
564
565     if (FLAG_trace_pretenuring_statistics &&
566         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
567          dont_tenure_decisions > 0)) {
568       PrintF(
569           "GC: (mode, #visited allocation sites, #active allocation sites, "
570           "#mementos, #tenure decisions, #donttenure decisions) "
571           "(%s, %d, %d, %d, %d, %d)\n",
572           use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
573           active_allocation_sites, allocation_mementos_found, tenure_decisions,
574           dont_tenure_decisions);
575     }
576   }
577 }
578
579
580 void Heap::DeoptMarkedAllocationSites() {
581   // TODO(hpayer): If iterating over the allocation sites list becomes a
582   // performance issue, use a cache heap data structure instead (similar to the
583   // allocation sites scratchpad).
584   Object* list_element = allocation_sites_list();
585   while (list_element->IsAllocationSite()) {
586     AllocationSite* site = AllocationSite::cast(list_element);
587     if (site->deopt_dependent_code()) {
588       site->dependent_code()->MarkCodeForDeoptimization(
589           isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
590       site->set_deopt_dependent_code(false);
591     }
592     list_element = site->weak_next();
593   }
594   Deoptimizer::DeoptimizeMarkedCode(isolate_);
595 }
596
597
598 void Heap::GarbageCollectionEpilogue() {
599   store_buffer()->GCEpilogue();
600
601   // In release mode, we only zap the from space under heap verification.
602   if (Heap::ShouldZapGarbage()) {
603     ZapFromSpace();
604   }
605
606   // Process pretenuring feedback and update allocation sites.
607   ProcessPretenuringFeedback();
608
609 #ifdef VERIFY_HEAP
610   if (FLAG_verify_heap) {
611     Verify();
612   }
613 #endif
614
615   AllowHeapAllocation for_the_rest_of_the_epilogue;
616
617 #ifdef DEBUG
618   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
619   if (FLAG_print_handles) PrintHandles();
620   if (FLAG_gc_verbose) Print();
621   if (FLAG_code_stats) ReportCodeStatistics("After GC");
622 #endif
623   if (FLAG_deopt_every_n_garbage_collections > 0) {
624     // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
625     // the topmost optimized frame can be deoptimized safely, because it
626     // might not have a lazy bailout point right after its current PC.
627     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
628       Deoptimizer::DeoptimizeAll(isolate());
629       gcs_since_last_deopt_ = 0;
630     }
631   }
632
633   UpdateMaximumCommitted();
634
635   isolate_->counters()->alive_after_last_gc()->Set(
636       static_cast<int>(SizeOfObjects()));
637
638   isolate_->counters()->string_table_capacity()->Set(
639       string_table()->Capacity());
640   isolate_->counters()->number_of_symbols()->Set(
641       string_table()->NumberOfElements());
642
643   if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
644     isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
645         static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
646                          (crankshaft_codegen_bytes_generated_ +
647                           full_codegen_bytes_generated_)));
648   }
649
650   if (CommittedMemory() > 0) {
651     isolate_->counters()->external_fragmentation_total()->AddSample(
652         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
653
654     isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
655         (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
656     isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
657         static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
658                          CommittedMemory()));
659     isolate_->counters()->heap_fraction_old_data_space()->AddSample(
660         static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
661                          CommittedMemory()));
662     isolate_->counters()->heap_fraction_code_space()->AddSample(
663         static_cast<int>((code_space()->CommittedMemory() * 100.0) /
664                          CommittedMemory()));
665     isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
666         (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
667     isolate_->counters()->heap_fraction_cell_space()->AddSample(
668         static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
669                          CommittedMemory()));
670     isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
671         (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
672
673     isolate_->counters()->heap_sample_total_committed()->AddSample(
674         static_cast<int>(CommittedMemory() / KB));
675     isolate_->counters()->heap_sample_total_used()->AddSample(
676         static_cast<int>(SizeOfObjects() / KB));
677     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
678         static_cast<int>(map_space()->CommittedMemory() / KB));
679     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
680         static_cast<int>(cell_space()->CommittedMemory() / KB));
681     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
682         static_cast<int>(code_space()->CommittedMemory() / KB));
683
684     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
685         static_cast<int>(MaximumCommittedMemory() / KB));
686   }
687
688 #define UPDATE_COUNTERS_FOR_SPACE(space)                \
689   isolate_->counters()->space##_bytes_available()->Set( \
690       static_cast<int>(space()->Available()));          \
691   isolate_->counters()->space##_bytes_committed()->Set( \
692       static_cast<int>(space()->CommittedMemory()));    \
693   isolate_->counters()->space##_bytes_used()->Set(      \
694       static_cast<int>(space()->SizeOfObjects()));
695 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
696   if (space()->CommittedMemory() > 0) {                                \
697     isolate_->counters()->external_fragmentation_##space()->AddSample( \
698         static_cast<int>(100 -                                         \
699                          (space()->SizeOfObjects() * 100.0) /          \
700                              space()->CommittedMemory()));             \
701   }
702 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
703   UPDATE_COUNTERS_FOR_SPACE(space)                         \
704   UPDATE_FRAGMENTATION_FOR_SPACE(space)
705
706   UPDATE_COUNTERS_FOR_SPACE(new_space)
707   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
708   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
709   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
710   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
711   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
712   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
713 #undef UPDATE_COUNTERS_FOR_SPACE
714 #undef UPDATE_FRAGMENTATION_FOR_SPACE
715 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
716
717 #ifdef DEBUG
718   ReportStatisticsAfterGC();
719 #endif  // DEBUG
720
721   // Remember the last top pointer so that we can later find out
722   // whether we allocated in new space since the last GC.
723   new_space_top_after_last_gc_ = new_space()->top();
724
725   if (migration_failure_) {
726     set_previous_migration_failure(true);
727   } else {
728     set_previous_migration_failure(false);
729   }
730   set_migration_failure(false);
731 }
732
733
734 void Heap::HandleGCRequest() {
735   if (incremental_marking()->request_type() ==
736       IncrementalMarking::COMPLETE_MARKING) {
737     CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt");
738     return;
739   }
740   DCHECK(FLAG_overapproximate_weak_closure);
741   if (!incremental_marking()->weak_closure_was_overapproximated()) {
742     OverApproximateWeakClosure("GC interrupt");
743   }
744 }
745
746
747 void Heap::OverApproximateWeakClosure(const char* gc_reason) {
748   if (FLAG_trace_incremental_marking) {
749     PrintF("[IncrementalMarking] Overapproximate weak closure (%s).\n",
750            gc_reason);
751   }
752
753   GCTracer::Scope gc_scope(tracer(),
754                            GCTracer::Scope::MC_INCREMENTAL_WEAKCLOSURE);
755
756   {
757     GCCallbacksScope scope(this);
758     if (scope.CheckReenter()) {
759       AllowHeapAllocation allow_allocation;
760       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
761       VMState<EXTERNAL> state(isolate_);
762       HandleScope handle_scope(isolate_);
763       CallGCPrologueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
764     }
765   }
766   incremental_marking()->MarkObjectGroups();
767   {
768     GCCallbacksScope scope(this);
769     if (scope.CheckReenter()) {
770       AllowHeapAllocation allow_allocation;
771       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
772       VMState<EXTERNAL> state(isolate_);
773       HandleScope handle_scope(isolate_);
774       CallGCEpilogueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
775     }
776   }
777 }
778
779
780 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
781                              const v8::GCCallbackFlags gc_callback_flags) {
782   // Since we are ignoring the return value, the exact choice of space does
783   // not matter, so long as we do not specify NEW_SPACE, which would not
784   // cause a full GC.
785   mark_compact_collector_.SetFlags(flags);
786   CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
787   mark_compact_collector_.SetFlags(kNoGCFlags);
788 }
789
790
791 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
792   // Since we are ignoring the return value, the exact choice of space does
793   // not matter, so long as we do not specify NEW_SPACE, which would not
794   // cause a full GC.
795   // Major GC would invoke weak handle callbacks on weakly reachable
796   // handles, but won't collect weakly reachable objects until next
797   // major GC.  Therefore if we collect aggressively and weak handle callback
798   // has been invoked, we rerun major GC to release objects which become
799   // garbage.
800   // Note: as weak callbacks can execute arbitrary code, we cannot
801   // hope that eventually there will be no weak callbacks invocations.
802   // Therefore stop recollecting after several attempts.
803   if (isolate()->concurrent_recompilation_enabled()) {
804     // The optimizing compiler may be unnecessarily holding on to memory.
805     DisallowHeapAllocation no_recursive_gc;
806     isolate()->optimizing_compiler_thread()->Flush();
807   }
808   isolate()->ClearSerializerData();
809   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
810                                      kReduceMemoryFootprintMask);
811   isolate_->compilation_cache()->Clear();
812   const int kMaxNumberOfAttempts = 7;
813   const int kMinNumberOfAttempts = 2;
814   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
815     if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
816         attempt + 1 >= kMinNumberOfAttempts) {
817       break;
818     }
819   }
820   mark_compact_collector()->SetFlags(kNoGCFlags);
821   new_space_.Shrink();
822   UncommitFromSpace();
823 }
824
825
826 void Heap::EnsureFillerObjectAtTop() {
827   // There may be an allocation memento behind every object in new space.
828   // If we evacuate a not full new space or if we are on the last page of
829   // the new space, then there may be uninitialized memory behind the top
830   // pointer of the new space page. We store a filler object there to
831   // identify the unused space.
832   Address from_top = new_space_.top();
833   // Check that from_top is inside its page (i.e., not at the end).
834   Address space_end = new_space_.ToSpaceEnd();
835   if (from_top < space_end) {
836     Page* page = Page::FromAddress(from_top);
837     if (page->Contains(from_top)) {
838       int remaining_in_page = static_cast<int>(page->area_end() - from_top);
839       CreateFillerObjectAt(from_top, remaining_in_page);
840     }
841   }
842 }
843
844
845 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
846                           const char* collector_reason,
847                           const v8::GCCallbackFlags gc_callback_flags) {
848   // The VM is in the GC state until exiting this function.
849   VMState<GC> state(isolate_);
850
851 #ifdef DEBUG
852   // Reset the allocation timeout to the GC interval, but make sure to
853   // allow at least a few allocations after a collection. The reason
854   // for this is that we have a lot of allocation sequences and we
855   // assume that a garbage collection will allow the subsequent
856   // allocation attempts to go through.
857   allocation_timeout_ = Max(6, FLAG_gc_interval);
858 #endif
859
860   EnsureFillerObjectAtTop();
861
862   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
863     if (FLAG_trace_incremental_marking) {
864       PrintF("[IncrementalMarking] Scavenge during marking.\n");
865     }
866   }
867
868   if (collector == MARK_COMPACTOR &&
869       !mark_compact_collector()->abort_incremental_marking() &&
870       !incremental_marking()->IsStopped() &&
871       !incremental_marking()->should_hurry() &&
872       FLAG_incremental_marking_steps) {
873     // Make progress in incremental marking.
874     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
875     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
876                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
877     if (!incremental_marking()->IsComplete() &&
878         !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) {
879       if (FLAG_trace_incremental_marking) {
880         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
881       }
882       collector = SCAVENGER;
883       collector_reason = "incremental marking delaying mark-sweep";
884     }
885   }
886
887   bool next_gc_likely_to_collect_more = false;
888
889   {
890     tracer()->Start(collector, gc_reason, collector_reason);
891     DCHECK(AllowHeapAllocation::IsAllowed());
892     DisallowHeapAllocation no_allocation_during_gc;
893     GarbageCollectionPrologue();
894
895     {
896       HistogramTimerScope histogram_timer_scope(
897           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
898                                    : isolate_->counters()->gc_compactor());
899       next_gc_likely_to_collect_more =
900           PerformGarbageCollection(collector, gc_callback_flags);
901     }
902
903     GarbageCollectionEpilogue();
904     if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) {
905       isolate()->CheckDetachedContextsAfterGC();
906     }
907     tracer()->Stop(collector);
908   }
909
910   // Start incremental marking for the next cycle. The heap snapshot
911   // generator needs incremental marking to stay off after it aborted.
912   if (!mark_compact_collector()->abort_incremental_marking() &&
913       WorthActivatingIncrementalMarking()) {
914     incremental_marking()->Start();
915   }
916
917   return next_gc_likely_to_collect_more;
918 }
919
920
921 int Heap::NotifyContextDisposed(bool dependant_context) {
922   if (!dependant_context) {
923     tracer()->ResetSurvivalEvents();
924     old_generation_size_configured_ = false;
925   }
926   if (isolate()->concurrent_recompilation_enabled()) {
927     // Flush the queued recompilation tasks.
928     isolate()->optimizing_compiler_thread()->Flush();
929   }
930   AgeInlineCaches();
931   set_retained_maps(ArrayList::cast(empty_fixed_array()));
932   tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
933   return ++contexts_disposed_;
934 }
935
936
937 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
938                         int len) {
939   if (len == 0) return;
940
941   DCHECK(array->map() != fixed_cow_array_map());
942   Object** dst_objects = array->data_start() + dst_index;
943   MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
944   if (!InNewSpace(array)) {
945     for (int i = 0; i < len; i++) {
946       // TODO(hpayer): check store buffer for entries
947       if (InNewSpace(dst_objects[i])) {
948         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
949       }
950     }
951   }
952   incremental_marking()->RecordWrites(array);
953 }
954
955
956 #ifdef VERIFY_HEAP
957 // Helper class for verifying the string table.
958 class StringTableVerifier : public ObjectVisitor {
959  public:
960   void VisitPointers(Object** start, Object** end) {
961     // Visit all HeapObject pointers in [start, end).
962     for (Object** p = start; p < end; p++) {
963       if ((*p)->IsHeapObject()) {
964         // Check that the string is actually internalized.
965         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
966               (*p)->IsInternalizedString());
967       }
968     }
969   }
970 };
971
972
973 static void VerifyStringTable(Heap* heap) {
974   StringTableVerifier verifier;
975   heap->string_table()->IterateElements(&verifier);
976 }
977 #endif  // VERIFY_HEAP
978
979
980 bool Heap::ReserveSpace(Reservation* reservations) {
981   bool gc_performed = true;
982   int counter = 0;
983   static const int kThreshold = 20;
984   while (gc_performed && counter++ < kThreshold) {
985     gc_performed = false;
986     for (int space = NEW_SPACE; space < Serializer::kNumberOfSpaces; space++) {
987       Reservation* reservation = &reservations[space];
988       DCHECK_LE(1, reservation->length());
989       if (reservation->at(0).size == 0) continue;
990       bool perform_gc = false;
991       if (space == LO_SPACE) {
992         DCHECK_EQ(1, reservation->length());
993         perform_gc = !lo_space()->CanAllocateSize(reservation->at(0).size);
994       } else {
995         for (auto& chunk : *reservation) {
996           AllocationResult allocation;
997           int size = chunk.size;
998           DCHECK_LE(size, MemoryAllocator::PageAreaSize(
999                               static_cast<AllocationSpace>(space)));
1000           if (space == NEW_SPACE) {
1001             allocation = new_space()->AllocateRaw(size);
1002           } else {
1003             allocation = paged_space(space)->AllocateRaw(size);
1004           }
1005           HeapObject* free_space;
1006           if (allocation.To(&free_space)) {
1007             // Mark with a free list node, in case we have a GC before
1008             // deserializing.
1009             Address free_space_address = free_space->address();
1010             CreateFillerObjectAt(free_space_address, size);
1011             DCHECK(space < Serializer::kNumberOfPreallocatedSpaces);
1012             chunk.start = free_space_address;
1013             chunk.end = free_space_address + size;
1014           } else {
1015             perform_gc = true;
1016             break;
1017           }
1018         }
1019       }
1020       if (perform_gc) {
1021         if (space == NEW_SPACE) {
1022           CollectGarbage(NEW_SPACE, "failed to reserve space in the new space");
1023         } else {
1024           if (counter > 1) {
1025             CollectAllGarbage(
1026                 kReduceMemoryFootprintMask,
1027                 "failed to reserve space in paged or large "
1028                 "object space, trying to reduce memory footprint");
1029           } else {
1030             CollectAllGarbage(
1031                 kAbortIncrementalMarkingMask,
1032                 "failed to reserve space in paged or large object space");
1033           }
1034         }
1035         gc_performed = true;
1036         break;  // Abort for-loop over spaces and retry.
1037       }
1038     }
1039   }
1040
1041   return !gc_performed;
1042 }
1043
1044
1045 void Heap::EnsureFromSpaceIsCommitted() {
1046   if (new_space_.CommitFromSpaceIfNeeded()) return;
1047
1048   // Committing memory to from space failed.
1049   // Memory is exhausted and we will die.
1050   V8::FatalProcessOutOfMemory("Committing semi space failed.");
1051 }
1052
1053
1054 void Heap::ClearJSFunctionResultCaches() {
1055   if (isolate_->bootstrapper()->IsActive()) return;
1056
1057   Object* context = native_contexts_list();
1058   while (!context->IsUndefined()) {
1059     // Get the caches for this context. GC can happen when the context
1060     // is not fully initialized, so the caches can be undefined.
1061     Object* caches_or_undefined =
1062         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
1063     if (!caches_or_undefined->IsUndefined()) {
1064       FixedArray* caches = FixedArray::cast(caches_or_undefined);
1065       // Clear the caches:
1066       int length = caches->length();
1067       for (int i = 0; i < length; i++) {
1068         JSFunctionResultCache::cast(caches->get(i))->Clear();
1069       }
1070     }
1071     // Get the next context:
1072     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1073   }
1074 }
1075
1076
1077 void Heap::ClearNormalizedMapCaches() {
1078   if (isolate_->bootstrapper()->IsActive() &&
1079       !incremental_marking()->IsMarking()) {
1080     return;
1081   }
1082
1083   Object* context = native_contexts_list();
1084   while (!context->IsUndefined()) {
1085     // GC can happen when the context is not fully initialized,
1086     // so the cache can be undefined.
1087     Object* cache =
1088         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1089     if (!cache->IsUndefined()) {
1090       NormalizedMapCache::cast(cache)->Clear();
1091     }
1092     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1093   }
1094 }
1095
1096
1097 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1098   if (start_new_space_size == 0) return;
1099
1100   promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
1101                       static_cast<double>(start_new_space_size) * 100);
1102
1103   if (previous_semi_space_copied_object_size_ > 0) {
1104     promotion_rate_ =
1105         (static_cast<double>(promoted_objects_size_) /
1106          static_cast<double>(previous_semi_space_copied_object_size_) * 100);
1107   } else {
1108     promotion_rate_ = 0;
1109   }
1110
1111   semi_space_copied_rate_ =
1112       (static_cast<double>(semi_space_copied_object_size_) /
1113        static_cast<double>(start_new_space_size) * 100);
1114
1115   double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
1116   tracer()->AddSurvivalRatio(survival_rate);
1117   if (survival_rate > kYoungSurvivalRateHighThreshold) {
1118     high_survival_rate_period_length_++;
1119   } else {
1120     high_survival_rate_period_length_ = 0;
1121   }
1122 }
1123
1124 bool Heap::PerformGarbageCollection(
1125     GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1126   int freed_global_handles = 0;
1127
1128   if (collector != SCAVENGER) {
1129     PROFILE(isolate_, CodeMovingGCEvent());
1130   }
1131
1132 #ifdef VERIFY_HEAP
1133   if (FLAG_verify_heap) {
1134     VerifyStringTable(this);
1135   }
1136 #endif
1137
1138   GCType gc_type =
1139       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1140
1141   {
1142     GCCallbacksScope scope(this);
1143     if (scope.CheckReenter()) {
1144       AllowHeapAllocation allow_allocation;
1145       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1146       VMState<EXTERNAL> state(isolate_);
1147       HandleScope handle_scope(isolate_);
1148       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1149     }
1150   }
1151
1152   EnsureFromSpaceIsCommitted();
1153
1154   int start_new_space_size = Heap::new_space()->SizeAsInt();
1155
1156   if (IsHighSurvivalRate()) {
1157     // We speed up the incremental marker if it is running so that it
1158     // does not fall behind the rate of promotion, which would cause a
1159     // constantly growing old space.
1160     incremental_marking()->NotifyOfHighPromotionRate();
1161   }
1162
1163   if (collector == MARK_COMPACTOR) {
1164     // Perform mark-sweep with optional compaction.
1165     MarkCompact();
1166     sweep_generation_++;
1167     // Temporarily set the limit for case when PostGarbageCollectionProcessing
1168     // allocates and triggers GC. The real limit is set at after
1169     // PostGarbageCollectionProcessing.
1170     SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1171     old_gen_exhausted_ = false;
1172     old_generation_size_configured_ = true;
1173   } else {
1174     Scavenge();
1175   }
1176
1177   UpdateSurvivalStatistics(start_new_space_size);
1178   ConfigureInitialOldGenerationSize();
1179
1180   isolate_->counters()->objs_since_last_young()->Set(0);
1181
1182   // Callbacks that fire after this point might trigger nested GCs and
1183   // restart incremental marking, the assertion can't be moved down.
1184   DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1185
1186   gc_post_processing_depth_++;
1187   {
1188     AllowHeapAllocation allow_allocation;
1189     GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1190     freed_global_handles =
1191         isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
1192   }
1193   gc_post_processing_depth_--;
1194
1195   isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1196
1197   // Update relocatables.
1198   Relocatable::PostGarbageCollectionProcessing(isolate_);
1199
1200   if (collector == MARK_COMPACTOR) {
1201     // Register the amount of external allocated memory.
1202     amount_of_external_allocated_memory_at_last_global_gc_ =
1203         amount_of_external_allocated_memory_;
1204     SetOldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
1205                                     freed_global_handles);
1206     // We finished a marking cycle. We can uncommit the marking deque until
1207     // we start marking again.
1208     mark_compact_collector_.UncommitMarkingDeque();
1209   }
1210
1211   {
1212     GCCallbacksScope scope(this);
1213     if (scope.CheckReenter()) {
1214       AllowHeapAllocation allow_allocation;
1215       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1216       VMState<EXTERNAL> state(isolate_);
1217       HandleScope handle_scope(isolate_);
1218       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1219     }
1220   }
1221
1222 #ifdef VERIFY_HEAP
1223   if (FLAG_verify_heap) {
1224     VerifyStringTable(this);
1225   }
1226 #endif
1227
1228   return freed_global_handles > 0;
1229 }
1230
1231
1232 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1233   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1234     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1235       if (!gc_prologue_callbacks_[i].pass_isolate_) {
1236         v8::GCPrologueCallback callback =
1237             reinterpret_cast<v8::GCPrologueCallback>(
1238                 gc_prologue_callbacks_[i].callback);
1239         callback(gc_type, flags);
1240       } else {
1241         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1242         gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1243       }
1244     }
1245   }
1246 }
1247
1248
1249 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1250                                    GCCallbackFlags gc_callback_flags) {
1251   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1252     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1253       if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1254         v8::GCPrologueCallback callback =
1255             reinterpret_cast<v8::GCPrologueCallback>(
1256                 gc_epilogue_callbacks_[i].callback);
1257         callback(gc_type, gc_callback_flags);
1258       } else {
1259         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1260         gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1261       }
1262     }
1263   }
1264 }
1265
1266
1267 void Heap::MarkCompact() {
1268   gc_state_ = MARK_COMPACT;
1269   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1270
1271   uint64_t size_of_objects_before_gc = SizeOfObjects();
1272
1273   mark_compact_collector_.Prepare();
1274
1275   ms_count_++;
1276
1277   MarkCompactPrologue();
1278
1279   mark_compact_collector_.CollectGarbage();
1280
1281   LOG(isolate_, ResourceEvent("markcompact", "end"));
1282
1283   MarkCompactEpilogue();
1284
1285   if (FLAG_allocation_site_pretenuring) {
1286     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1287   }
1288 }
1289
1290
1291 void Heap::MarkCompactEpilogue() {
1292   gc_state_ = NOT_IN_GC;
1293
1294   isolate_->counters()->objs_since_last_full()->Set(0);
1295
1296   incremental_marking()->Epilogue();
1297 }
1298
1299
1300 void Heap::MarkCompactPrologue() {
1301   // At any old GC clear the keyed lookup cache to enable collection of unused
1302   // maps.
1303   isolate_->keyed_lookup_cache()->Clear();
1304   isolate_->context_slot_cache()->Clear();
1305   isolate_->descriptor_lookup_cache()->Clear();
1306   RegExpResultsCache::Clear(string_split_cache());
1307   RegExpResultsCache::Clear(regexp_multiple_cache());
1308
1309   isolate_->compilation_cache()->MarkCompactPrologue();
1310
1311   CompletelyClearInstanceofCache();
1312
1313   FlushNumberStringCache();
1314   if (FLAG_cleanup_code_caches_at_gc) {
1315     polymorphic_code_cache()->set_cache(undefined_value());
1316   }
1317
1318   ClearNormalizedMapCaches();
1319 }
1320
1321
1322 // Helper class for copying HeapObjects
1323 class ScavengeVisitor : public ObjectVisitor {
1324  public:
1325   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1326
1327   void VisitPointer(Object** p) { ScavengePointer(p); }
1328
1329   void VisitPointers(Object** start, Object** end) {
1330     // Copy all HeapObject pointers in [start, end)
1331     for (Object** p = start; p < end; p++) ScavengePointer(p);
1332   }
1333
1334  private:
1335   void ScavengePointer(Object** p) {
1336     Object* object = *p;
1337     if (!heap_->InNewSpace(object)) return;
1338     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1339                          reinterpret_cast<HeapObject*>(object));
1340   }
1341
1342   Heap* heap_;
1343 };
1344
1345
1346 #ifdef VERIFY_HEAP
1347 // Visitor class to verify pointers in code or data space do not point into
1348 // new space.
1349 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1350  public:
1351   explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1352   void VisitPointers(Object** start, Object** end) {
1353     for (Object** current = start; current < end; current++) {
1354       if ((*current)->IsHeapObject()) {
1355         CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1356       }
1357     }
1358   }
1359
1360  private:
1361   Heap* heap_;
1362 };
1363
1364
1365 static void VerifyNonPointerSpacePointers(Heap* heap) {
1366   // Verify that there are no pointers to new space in spaces where we
1367   // do not expect them.
1368   VerifyNonPointerSpacePointersVisitor v(heap);
1369   HeapObjectIterator code_it(heap->code_space());
1370   for (HeapObject* object = code_it.Next(); object != NULL;
1371        object = code_it.Next())
1372     object->Iterate(&v);
1373
1374   HeapObjectIterator data_it(heap->old_data_space());
1375   for (HeapObject* object = data_it.Next(); object != NULL;
1376        object = data_it.Next())
1377     object->Iterate(&v);
1378 }
1379 #endif  // VERIFY_HEAP
1380
1381
1382 void Heap::CheckNewSpaceExpansionCriteria() {
1383   if (FLAG_experimental_new_space_growth_heuristic) {
1384     if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1385         survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
1386       // Grow the size of new space if there is room to grow, and more than 10%
1387       // have survived the last scavenge.
1388       new_space_.Grow();
1389       survived_since_last_expansion_ = 0;
1390     }
1391   } else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1392              survived_since_last_expansion_ > new_space_.TotalCapacity()) {
1393     // Grow the size of new space if there is room to grow, and enough data
1394     // has survived scavenge since the last expansion.
1395     new_space_.Grow();
1396     survived_since_last_expansion_ = 0;
1397   }
1398 }
1399
1400
1401 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1402   return heap->InNewSpace(*p) &&
1403          !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1404 }
1405
1406
1407 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1408                                        StoreBufferEvent event) {
1409   heap->store_buffer_rebuilder_.Callback(page, event);
1410 }
1411
1412
1413 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1414   if (event == kStoreBufferStartScanningPagesEvent) {
1415     start_of_current_page_ = NULL;
1416     current_page_ = NULL;
1417   } else if (event == kStoreBufferScanningPageEvent) {
1418     if (current_page_ != NULL) {
1419       // If this page already overflowed the store buffer during this iteration.
1420       if (current_page_->scan_on_scavenge()) {
1421         // Then we should wipe out the entries that have been added for it.
1422         store_buffer_->SetTop(start_of_current_page_);
1423       } else if (store_buffer_->Top() - start_of_current_page_ >=
1424                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1425         // Did we find too many pointers in the previous page?  The heuristic is
1426         // that no page can take more then 1/5 the remaining slots in the store
1427         // buffer.
1428         current_page_->set_scan_on_scavenge(true);
1429         store_buffer_->SetTop(start_of_current_page_);
1430       } else {
1431         // In this case the page we scanned took a reasonable number of slots in
1432         // the store buffer.  It has now been rehabilitated and is no longer
1433         // marked scan_on_scavenge.
1434         DCHECK(!current_page_->scan_on_scavenge());
1435       }
1436     }
1437     start_of_current_page_ = store_buffer_->Top();
1438     current_page_ = page;
1439   } else if (event == kStoreBufferFullEvent) {
1440     // The current page overflowed the store buffer again.  Wipe out its entries
1441     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1442     // several times while scanning.
1443     if (current_page_ == NULL) {
1444       // Store Buffer overflowed while scanning promoted objects.  These are not
1445       // in any particular page, though they are likely to be clustered by the
1446       // allocation routines.
1447       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1448     } else {
1449       // Store Buffer overflowed while scanning a particular old space page for
1450       // pointers to new space.
1451       DCHECK(current_page_ == page);
1452       DCHECK(page != NULL);
1453       current_page_->set_scan_on_scavenge(true);
1454       DCHECK(start_of_current_page_ != store_buffer_->Top());
1455       store_buffer_->SetTop(start_of_current_page_);
1456     }
1457   } else {
1458     UNREACHABLE();
1459   }
1460 }
1461
1462
1463 void PromotionQueue::Initialize() {
1464   // The last to-space page may be used for promotion queue. On promotion
1465   // conflict, we use the emergency stack.
1466   DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1467          0);
1468   front_ = rear_ =
1469       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1470   limit_ = reinterpret_cast<intptr_t*>(
1471       Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
1472   emergency_stack_ = NULL;
1473 }
1474
1475
1476 void PromotionQueue::RelocateQueueHead() {
1477   DCHECK(emergency_stack_ == NULL);
1478
1479   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1480   intptr_t* head_start = rear_;
1481   intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1482
1483   int entries_count =
1484       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1485
1486   emergency_stack_ = new List<Entry>(2 * entries_count);
1487
1488   while (head_start != head_end) {
1489     int size = static_cast<int>(*(head_start++));
1490     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1491     // New space allocation in SemiSpaceCopyObject marked the region
1492     // overlapping with promotion queue as uninitialized.
1493     MSAN_MEMORY_IS_INITIALIZED(&size, sizeof(size));
1494     MSAN_MEMORY_IS_INITIALIZED(&obj, sizeof(obj));
1495     emergency_stack_->Add(Entry(obj, size));
1496   }
1497   rear_ = head_end;
1498 }
1499
1500
1501 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1502  public:
1503   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1504
1505   virtual Object* RetainAs(Object* object) {
1506     if (!heap_->InFromSpace(object)) {
1507       return object;
1508     }
1509
1510     MapWord map_word = HeapObject::cast(object)->map_word();
1511     if (map_word.IsForwardingAddress()) {
1512       return map_word.ToForwardingAddress();
1513     }
1514     return NULL;
1515   }
1516
1517  private:
1518   Heap* heap_;
1519 };
1520
1521
1522 void Heap::Scavenge() {
1523   RelocationLock relocation_lock(this);
1524   // There are soft limits in the allocation code, designed to trigger a mark
1525   // sweep collection by failing allocations. There is no sense in trying to
1526   // trigger one during scavenge: scavenges allocation should always succeed.
1527   AlwaysAllocateScope scope(isolate());
1528
1529 #ifdef VERIFY_HEAP
1530   if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1531 #endif
1532
1533   gc_state_ = SCAVENGE;
1534
1535   // Implements Cheney's copying algorithm
1536   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1537
1538   // Clear descriptor cache.
1539   isolate_->descriptor_lookup_cache()->Clear();
1540
1541   // Used for updating survived_since_last_expansion_ at function end.
1542   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1543
1544   SelectScavengingVisitorsTable();
1545
1546   incremental_marking()->PrepareForScavenge();
1547
1548   // Flip the semispaces.  After flipping, to space is empty, from space has
1549   // live objects.
1550   new_space_.Flip();
1551   new_space_.ResetAllocationInfo();
1552
1553   // We need to sweep newly copied objects which can be either in the
1554   // to space or promoted to the old generation.  For to-space
1555   // objects, we treat the bottom of the to space as a queue.  Newly
1556   // copied and unswept objects lie between a 'front' mark and the
1557   // allocation pointer.
1558   //
1559   // Promoted objects can go into various old-generation spaces, and
1560   // can be allocated internally in the spaces (from the free list).
1561   // We treat the top of the to space as a queue of addresses of
1562   // promoted objects.  The addresses of newly promoted and unswept
1563   // objects lie between a 'front' mark and a 'rear' mark that is
1564   // updated as a side effect of promoting an object.
1565   //
1566   // There is guaranteed to be enough room at the top of the to space
1567   // for the addresses of promoted objects: every object promoted
1568   // frees up its size in bytes from the top of the new space, and
1569   // objects are at least one pointer in size.
1570   Address new_space_front = new_space_.ToSpaceStart();
1571   promotion_queue_.Initialize();
1572
1573   ScavengeVisitor scavenge_visitor(this);
1574   // Copy roots.
1575   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1576
1577   // Copy objects reachable from the old generation.
1578   {
1579     StoreBufferRebuildScope scope(this, store_buffer(),
1580                                   &ScavengeStoreBufferCallback);
1581     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1582   }
1583
1584   // Copy objects reachable from simple cells by scavenging cell values
1585   // directly.
1586   HeapObjectIterator cell_iterator(cell_space_);
1587   for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
1588        heap_object = cell_iterator.Next()) {
1589     if (heap_object->IsCell()) {
1590       Cell* cell = Cell::cast(heap_object);
1591       Address value_address = cell->ValueAddress();
1592       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1593     }
1594   }
1595
1596   // Copy objects reachable from the encountered weak collections list.
1597   scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1598   // Copy objects reachable from the encountered weak cells.
1599   scavenge_visitor.VisitPointer(&encountered_weak_cells_);
1600
1601   // Copy objects reachable from the code flushing candidates list.
1602   MarkCompactCollector* collector = mark_compact_collector();
1603   if (collector->is_code_flushing_enabled()) {
1604     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1605   }
1606
1607   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1608
1609   while (isolate()->global_handles()->IterateObjectGroups(
1610       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1611     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1612   }
1613   isolate()->global_handles()->RemoveObjectGroups();
1614   isolate()->global_handles()->RemoveImplicitRefGroups();
1615
1616   isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1617       &IsUnscavengedHeapObject);
1618
1619   isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
1620       &scavenge_visitor);
1621   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1622
1623   UpdateNewSpaceReferencesInExternalStringTable(
1624       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1625
1626   promotion_queue_.Destroy();
1627
1628   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1629
1630   ScavengeWeakObjectRetainer weak_object_retainer(this);
1631   ProcessYoungWeakReferences(&weak_object_retainer);
1632
1633   // Collects callback info for handles referenced by young generation that are
1634   // pending (about to be collected) and either phantom or internal-fields.
1635   // Releases the global handles.  See also PostGarbageCollectionProcessing.
1636   isolate()->global_handles()->CollectYoungPhantomCallbackData();
1637
1638   DCHECK(new_space_front == new_space_.top());
1639
1640   // Set age mark.
1641   new_space_.set_age_mark(new_space_.top());
1642
1643   new_space_.LowerInlineAllocationLimit(
1644       new_space_.inline_allocation_limit_step());
1645
1646   // Update how much has survived scavenge.
1647   IncrementYoungSurvivorsCounter(static_cast<int>(
1648       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1649
1650   LOG(isolate_, ResourceEvent("scavenge", "end"));
1651
1652   gc_state_ = NOT_IN_GC;
1653
1654   gc_idle_time_handler_.NotifyScavenge();
1655 }
1656
1657
1658 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1659                                                                 Object** p) {
1660   MapWord first_word = HeapObject::cast(*p)->map_word();
1661
1662   if (!first_word.IsForwardingAddress()) {
1663     // Unreachable external string can be finalized.
1664     heap->FinalizeExternalString(String::cast(*p));
1665     return NULL;
1666   }
1667
1668   // String is still reachable.
1669   return String::cast(first_word.ToForwardingAddress());
1670 }
1671
1672
1673 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1674     ExternalStringTableUpdaterCallback updater_func) {
1675 #ifdef VERIFY_HEAP
1676   if (FLAG_verify_heap) {
1677     external_string_table_.Verify();
1678   }
1679 #endif
1680
1681   if (external_string_table_.new_space_strings_.is_empty()) return;
1682
1683   Object** start = &external_string_table_.new_space_strings_[0];
1684   Object** end = start + external_string_table_.new_space_strings_.length();
1685   Object** last = start;
1686
1687   for (Object** p = start; p < end; ++p) {
1688     DCHECK(InFromSpace(*p));
1689     String* target = updater_func(this, p);
1690
1691     if (target == NULL) continue;
1692
1693     DCHECK(target->IsExternalString());
1694
1695     if (InNewSpace(target)) {
1696       // String is still in new space.  Update the table entry.
1697       *last = target;
1698       ++last;
1699     } else {
1700       // String got promoted.  Move it to the old string list.
1701       external_string_table_.AddOldString(target);
1702     }
1703   }
1704
1705   DCHECK(last <= end);
1706   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1707 }
1708
1709
1710 void Heap::UpdateReferencesInExternalStringTable(
1711     ExternalStringTableUpdaterCallback updater_func) {
1712   // Update old space string references.
1713   if (external_string_table_.old_space_strings_.length() > 0) {
1714     Object** start = &external_string_table_.old_space_strings_[0];
1715     Object** end = start + external_string_table_.old_space_strings_.length();
1716     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1717   }
1718
1719   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1720 }
1721
1722
1723 void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
1724   ProcessArrayBuffers(retainer, false);
1725   ProcessNewArrayBufferViews(retainer);
1726   ProcessNativeContexts(retainer);
1727   ProcessAllocationSites(retainer);
1728 }
1729
1730
1731 void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
1732   ProcessArrayBuffers(retainer, true);
1733   ProcessNewArrayBufferViews(retainer);
1734   ProcessNativeContexts(retainer);
1735 }
1736
1737
1738 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1739   Object* head =
1740       VisitWeakList<Context>(this, native_contexts_list(), retainer, false);
1741   // Update the head of the list of contexts.
1742   set_native_contexts_list(head);
1743 }
1744
1745
1746 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1747                                bool stop_after_young) {
1748   Object* array_buffer_obj = VisitWeakList<JSArrayBuffer>(
1749       this, array_buffers_list(), retainer, stop_after_young);
1750   set_array_buffers_list(array_buffer_obj);
1751
1752 #ifdef DEBUG
1753   // Verify invariant that young array buffers come before old array buffers
1754   // in array buffers list if there was no promotion failure.
1755   Object* undefined = undefined_value();
1756   Object* next = array_buffers_list();
1757   bool old_objects_recorded = false;
1758   if (migration_failure()) return;
1759   while (next != undefined) {
1760     if (!old_objects_recorded) {
1761       old_objects_recorded = !InNewSpace(next);
1762     }
1763     DCHECK((InNewSpace(next) && !old_objects_recorded) || !InNewSpace(next));
1764     next = JSArrayBuffer::cast(next)->weak_next();
1765   }
1766 #endif
1767 }
1768
1769
1770 void Heap::ProcessNewArrayBufferViews(WeakObjectRetainer* retainer) {
1771   // Retain the list of new space views.
1772   Object* typed_array_obj = VisitWeakList<JSArrayBufferView>(
1773       this, new_array_buffer_views_list_, retainer, false);
1774   set_new_array_buffer_views_list(typed_array_obj);
1775
1776   // Some objects in the list may be in old space now. Find them
1777   // and move them to the corresponding array buffer.
1778   Object* view = VisitNewArrayBufferViewsWeakList(
1779       this, new_array_buffer_views_list_, retainer);
1780   set_new_array_buffer_views_list(view);
1781 }
1782
1783
1784 void Heap::TearDownArrayBuffers() {
1785   Object* undefined = undefined_value();
1786   for (Object* o = array_buffers_list(); o != undefined;) {
1787     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1788     Runtime::FreeArrayBuffer(isolate(), buffer);
1789     o = buffer->weak_next();
1790   }
1791   set_array_buffers_list(undefined);
1792 }
1793
1794
1795 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1796   Object* allocation_site_obj = VisitWeakList<AllocationSite>(
1797       this, allocation_sites_list(), retainer, false);
1798   set_allocation_sites_list(allocation_site_obj);
1799 }
1800
1801
1802 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1803   DisallowHeapAllocation no_allocation_scope;
1804   Object* cur = allocation_sites_list();
1805   bool marked = false;
1806   while (cur->IsAllocationSite()) {
1807     AllocationSite* casted = AllocationSite::cast(cur);
1808     if (casted->GetPretenureMode() == flag) {
1809       casted->ResetPretenureDecision();
1810       casted->set_deopt_dependent_code(true);
1811       marked = true;
1812     }
1813     cur = casted->weak_next();
1814   }
1815   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1816 }
1817
1818
1819 void Heap::EvaluateOldSpaceLocalPretenuring(
1820     uint64_t size_of_objects_before_gc) {
1821   uint64_t size_of_objects_after_gc = SizeOfObjects();
1822   double old_generation_survival_rate =
1823       (static_cast<double>(size_of_objects_after_gc) * 100) /
1824       static_cast<double>(size_of_objects_before_gc);
1825
1826   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1827     // Too many objects died in the old generation, pretenuring of wrong
1828     // allocation sites may be the cause for that. We have to deopt all
1829     // dependent code registered in the allocation sites to re-evaluate
1830     // our pretenuring decisions.
1831     ResetAllAllocationSitesDependentCode(TENURED);
1832     if (FLAG_trace_pretenuring) {
1833       PrintF(
1834           "Deopt all allocation sites dependent code due to low survival "
1835           "rate in the old generation %f\n",
1836           old_generation_survival_rate);
1837     }
1838   }
1839 }
1840
1841
1842 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1843   DisallowHeapAllocation no_allocation;
1844   // All external strings are listed in the external string table.
1845
1846   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1847    public:
1848     explicit ExternalStringTableVisitorAdapter(
1849         v8::ExternalResourceVisitor* visitor)
1850         : visitor_(visitor) {}
1851     virtual void VisitPointers(Object** start, Object** end) {
1852       for (Object** p = start; p < end; p++) {
1853         DCHECK((*p)->IsExternalString());
1854         visitor_->VisitExternalString(
1855             Utils::ToLocal(Handle<String>(String::cast(*p))));
1856       }
1857     }
1858
1859    private:
1860     v8::ExternalResourceVisitor* visitor_;
1861   } external_string_table_visitor(visitor);
1862
1863   external_string_table_.Iterate(&external_string_table_visitor);
1864 }
1865
1866
1867 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1868  public:
1869   static inline void VisitPointer(Heap* heap, Object** p) {
1870     Object* object = *p;
1871     if (!heap->InNewSpace(object)) return;
1872     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1873                          reinterpret_cast<HeapObject*>(object));
1874   }
1875 };
1876
1877
1878 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1879                          Address new_space_front) {
1880   do {
1881     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1882     // The addresses new_space_front and new_space_.top() define a
1883     // queue of unprocessed copied objects.  Process them until the
1884     // queue is empty.
1885     while (new_space_front != new_space_.top()) {
1886       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1887         HeapObject* object = HeapObject::FromAddress(new_space_front);
1888         new_space_front +=
1889             NewSpaceScavenger::IterateBody(object->map(), object);
1890       } else {
1891         new_space_front =
1892             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1893       }
1894     }
1895
1896     // Promote and process all the to-be-promoted objects.
1897     {
1898       StoreBufferRebuildScope scope(this, store_buffer(),
1899                                     &ScavengeStoreBufferCallback);
1900       while (!promotion_queue()->is_empty()) {
1901         HeapObject* target;
1902         int size;
1903         promotion_queue()->remove(&target, &size);
1904
1905         // Promoted object might be already partially visited
1906         // during old space pointer iteration. Thus we search specifically
1907         // for pointers to from semispace instead of looking for pointers
1908         // to new space.
1909         DCHECK(!target->IsMap());
1910         Address obj_address = target->address();
1911
1912         // We are not collecting slots on new space objects during mutation
1913         // thus we have to scan for pointers to evacuation candidates when we
1914         // promote objects. But we should not record any slots in non-black
1915         // objects. Grey object's slots would be rescanned.
1916         // White object might not survive until the end of collection
1917         // it would be a violation of the invariant to record it's slots.
1918         bool record_slots = false;
1919         if (incremental_marking()->IsCompacting()) {
1920           MarkBit mark_bit = Marking::MarkBitFrom(target);
1921           record_slots = Marking::IsBlack(mark_bit);
1922         }
1923 #if V8_DOUBLE_FIELDS_UNBOXING
1924         LayoutDescriptorHelper helper(target->map());
1925         bool has_only_tagged_fields = helper.all_fields_tagged();
1926
1927         if (!has_only_tagged_fields) {
1928           for (int offset = 0; offset < size;) {
1929             int end_of_region_offset;
1930             if (helper.IsTagged(offset, size, &end_of_region_offset)) {
1931               IterateAndMarkPointersToFromSpace(
1932                   record_slots, obj_address + offset,
1933                   obj_address + end_of_region_offset, &ScavengeObject);
1934             }
1935             offset = end_of_region_offset;
1936           }
1937         } else {
1938 #endif
1939           IterateAndMarkPointersToFromSpace(
1940               record_slots, obj_address, obj_address + size, &ScavengeObject);
1941 #if V8_DOUBLE_FIELDS_UNBOXING
1942         }
1943 #endif
1944       }
1945     }
1946
1947     // Take another spin if there are now unswept objects in new space
1948     // (there are currently no more unswept promoted objects).
1949   } while (new_space_front != new_space_.top());
1950
1951   return new_space_front;
1952 }
1953
1954
1955 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1956               0);  // NOLINT
1957 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
1958               0);  // NOLINT
1959 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1960                kDoubleAlignmentMask) == 0);  // NOLINT
1961
1962
1963 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1964                                               int size));
1965
1966 static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1967                                        int size) {
1968   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1969     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1970     return HeapObject::FromAddress(object->address() + kPointerSize);
1971   } else {
1972     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1973                                kPointerSize);
1974     return object;
1975   }
1976 }
1977
1978
1979 HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
1980   return EnsureDoubleAligned(this, object, size);
1981 }
1982
1983
1984 enum LoggingAndProfiling {
1985   LOGGING_AND_PROFILING_ENABLED,
1986   LOGGING_AND_PROFILING_DISABLED
1987 };
1988
1989
1990 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1991
1992
1993 template <MarksHandling marks_handling,
1994           LoggingAndProfiling logging_and_profiling_mode>
1995 class ScavengingVisitor : public StaticVisitorBase {
1996  public:
1997   static void Initialize() {
1998     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1999     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
2000     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
2001     table_.Register(kVisitByteArray, &EvacuateByteArray);
2002     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
2003     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
2004     table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
2005     table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
2006
2007     table_.Register(
2008         kVisitNativeContext,
2009         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2010             Context::kSize>);
2011
2012     table_.Register(
2013         kVisitConsString,
2014         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2015             ConsString::kSize>);
2016
2017     table_.Register(
2018         kVisitSlicedString,
2019         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2020             SlicedString::kSize>);
2021
2022     table_.Register(
2023         kVisitSymbol,
2024         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2025             Symbol::kSize>);
2026
2027     table_.Register(
2028         kVisitSharedFunctionInfo,
2029         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2030             SharedFunctionInfo::kSize>);
2031
2032     table_.Register(kVisitJSWeakCollection,
2033                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2034
2035     table_.Register(kVisitJSArrayBuffer,
2036                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2037
2038     table_.Register(kVisitJSTypedArray,
2039                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2040
2041     table_.Register(kVisitJSDataView,
2042                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2043
2044     table_.Register(kVisitJSRegExp,
2045                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
2046
2047     if (marks_handling == IGNORE_MARKS) {
2048       table_.Register(
2049           kVisitJSFunction,
2050           &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2051               JSFunction::kSize>);
2052     } else {
2053       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2054     }
2055
2056     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2057                                    kVisitDataObject, kVisitDataObjectGeneric>();
2058
2059     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2060                                    kVisitJSObject, kVisitJSObjectGeneric>();
2061
2062     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2063                                    kVisitStruct, kVisitStructGeneric>();
2064   }
2065
2066   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2067     return &table_;
2068   }
2069
2070  private:
2071   enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
2072
2073   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2074     bool should_record = false;
2075 #ifdef DEBUG
2076     should_record = FLAG_heap_stats;
2077 #endif
2078     should_record = should_record || FLAG_log_gc;
2079     if (should_record) {
2080       if (heap->new_space()->Contains(obj)) {
2081         heap->new_space()->RecordAllocation(obj);
2082       } else {
2083         heap->new_space()->RecordPromotion(obj);
2084       }
2085     }
2086   }
2087
2088   // Helper function used by CopyObject to copy a source object to an
2089   // allocated target object and update the forwarding pointer in the source
2090   // object.  Returns the target object.
2091   INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
2092                                    HeapObject* target, int size)) {
2093     // If we migrate into to-space, then the to-space top pointer should be
2094     // right after the target object. Incorporate double alignment
2095     // over-allocation.
2096     DCHECK(!heap->InToSpace(target) ||
2097            target->address() + size == heap->new_space()->top() ||
2098            target->address() + size + kPointerSize == heap->new_space()->top());
2099
2100     // Make sure that we do not overwrite the promotion queue which is at
2101     // the end of to-space.
2102     DCHECK(!heap->InToSpace(target) ||
2103            heap->promotion_queue()->IsBelowPromotionQueue(
2104                heap->new_space()->top()));
2105
2106     // Copy the content of source to target.
2107     heap->CopyBlock(target->address(), source->address(), size);
2108
2109     // Set the forwarding address.
2110     source->set_map_word(MapWord::FromForwardingAddress(target));
2111
2112     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2113       // Update NewSpace stats if necessary.
2114       RecordCopiedObject(heap, target);
2115       heap->OnMoveEvent(target, source, size);
2116     }
2117
2118     if (marks_handling == TRANSFER_MARKS) {
2119       if (Marking::TransferColor(source, target)) {
2120         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2121       }
2122     }
2123   }
2124
2125   template <int alignment>
2126   static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
2127                                          HeapObject* object, int object_size) {
2128     Heap* heap = map->GetHeap();
2129
2130     int allocation_size = object_size;
2131     if (alignment != kObjectAlignment) {
2132       DCHECK(alignment == kDoubleAlignment);
2133       allocation_size += kPointerSize;
2134     }
2135
2136     DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
2137     AllocationResult allocation =
2138         heap->new_space()->AllocateRaw(allocation_size);
2139
2140     HeapObject* target = NULL;  // Initialization to please compiler.
2141     if (allocation.To(&target)) {
2142       // Order is important here: Set the promotion limit before storing a
2143       // filler for double alignment or migrating the object. Otherwise we
2144       // may end up overwriting promotion queue entries when we migrate the
2145       // object.
2146       heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2147
2148       if (alignment != kObjectAlignment) {
2149         target = EnsureDoubleAligned(heap, target, allocation_size);
2150       }
2151       MigrateObject(heap, object, target, object_size);
2152
2153       // Update slot to new target.
2154       *slot = target;
2155
2156       heap->IncrementSemiSpaceCopiedObjectSize(object_size);
2157       return true;
2158     }
2159     return false;
2160   }
2161
2162
2163   template <ObjectContents object_contents, int alignment>
2164   static inline bool PromoteObject(Map* map, HeapObject** slot,
2165                                    HeapObject* object, int object_size) {
2166     Heap* heap = map->GetHeap();
2167
2168     int allocation_size = object_size;
2169     if (alignment != kObjectAlignment) {
2170       DCHECK(alignment == kDoubleAlignment);
2171       allocation_size += kPointerSize;
2172     }
2173
2174     AllocationResult allocation;
2175     if (object_contents == DATA_OBJECT) {
2176       DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2177       allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2178     } else {
2179       DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2180       allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2181     }
2182
2183     HeapObject* target = NULL;  // Initialization to please compiler.
2184     if (allocation.To(&target)) {
2185       if (alignment != kObjectAlignment) {
2186         target = EnsureDoubleAligned(heap, target, allocation_size);
2187       }
2188       MigrateObject(heap, object, target, object_size);
2189
2190       // Update slot to new target.
2191       *slot = target;
2192
2193       if (object_contents == POINTER_OBJECT) {
2194         if (map->instance_type() == JS_FUNCTION_TYPE) {
2195           heap->promotion_queue()->insert(target,
2196                                           JSFunction::kNonWeakFieldsEndOffset);
2197         } else {
2198           heap->promotion_queue()->insert(target, object_size);
2199         }
2200       }
2201       heap->IncrementPromotedObjectsSize(object_size);
2202       return true;
2203     }
2204     return false;
2205   }
2206
2207
2208   template <ObjectContents object_contents, int alignment>
2209   static inline void EvacuateObject(Map* map, HeapObject** slot,
2210                                     HeapObject* object, int object_size) {
2211     SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2212     SLOW_DCHECK(object->Size() == object_size);
2213     Heap* heap = map->GetHeap();
2214
2215     if (!heap->ShouldBePromoted(object->address(), object_size)) {
2216       // A semi-space copy may fail due to fragmentation. In that case, we
2217       // try to promote the object.
2218       if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2219         return;
2220       }
2221       heap->set_migration_failure(true);
2222     }
2223
2224     if (PromoteObject<object_contents, alignment>(map, slot, object,
2225                                                   object_size)) {
2226       return;
2227     }
2228
2229     // If promotion failed, we try to copy the object to the other semi-space
2230     if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
2231
2232     UNREACHABLE();
2233   }
2234
2235
2236   static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2237                                         HeapObject* object) {
2238     ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2239         JSFunction::kSize>(map, slot, object);
2240
2241     MapWord map_word = object->map_word();
2242     DCHECK(map_word.IsForwardingAddress());
2243     HeapObject* target = map_word.ToForwardingAddress();
2244
2245     MarkBit mark_bit = Marking::MarkBitFrom(target);
2246     if (Marking::IsBlack(mark_bit)) {
2247       // This object is black and it might not be rescanned by marker.
2248       // We should explicitly record code entry slot for compaction because
2249       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2250       // miss it as it is not HeapObject-tagged.
2251       Address code_entry_slot =
2252           target->address() + JSFunction::kCodeEntryOffset;
2253       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2254       map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2255           code_entry_slot, code);
2256     }
2257   }
2258
2259
2260   static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2261                                         HeapObject* object) {
2262     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2263     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2264                                                      object_size);
2265   }
2266
2267
2268   static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2269                                               HeapObject* object) {
2270     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2271     int object_size = FixedDoubleArray::SizeFor(length);
2272     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2273                                                   object_size);
2274   }
2275
2276
2277   static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2278                                              HeapObject* object) {
2279     int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2280     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2281                                                   object_size);
2282   }
2283
2284
2285   static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2286                                                HeapObject* object) {
2287     int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2288     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2289                                                   object_size);
2290   }
2291
2292
2293   static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2294                                        HeapObject* object) {
2295     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2296     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2297                                                   object_size);
2298   }
2299
2300
2301   static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
2302                                               HeapObject* object) {
2303     int object_size = SeqOneByteString::cast(object)
2304                           ->SeqOneByteStringSize(map->instance_type());
2305     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2306                                                   object_size);
2307   }
2308
2309
2310   static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2311                                               HeapObject* object) {
2312     int object_size = SeqTwoByteString::cast(object)
2313                           ->SeqTwoByteStringSize(map->instance_type());
2314     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2315                                                   object_size);
2316   }
2317
2318
2319   static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2320                                                HeapObject* object) {
2321     DCHECK(IsShortcutCandidate(map->instance_type()));
2322
2323     Heap* heap = map->GetHeap();
2324
2325     if (marks_handling == IGNORE_MARKS &&
2326         ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2327       HeapObject* first =
2328           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2329
2330       *slot = first;
2331
2332       if (!heap->InNewSpace(first)) {
2333         object->set_map_word(MapWord::FromForwardingAddress(first));
2334         return;
2335       }
2336
2337       MapWord first_word = first->map_word();
2338       if (first_word.IsForwardingAddress()) {
2339         HeapObject* target = first_word.ToForwardingAddress();
2340
2341         *slot = target;
2342         object->set_map_word(MapWord::FromForwardingAddress(target));
2343         return;
2344       }
2345
2346       heap->DoScavengeObject(first->map(), slot, first);
2347       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2348       return;
2349     }
2350
2351     int object_size = ConsString::kSize;
2352     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2353                                                      object_size);
2354   }
2355
2356   template <ObjectContents object_contents>
2357   class ObjectEvacuationStrategy {
2358    public:
2359     template <int object_size>
2360     static inline void VisitSpecialized(Map* map, HeapObject** slot,
2361                                         HeapObject* object) {
2362       EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2363                                                         object_size);
2364     }
2365
2366     static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2367       int object_size = map->instance_size();
2368       EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2369                                                         object_size);
2370     }
2371   };
2372
2373   static VisitorDispatchTable<ScavengingCallback> table_;
2374 };
2375
2376
2377 template <MarksHandling marks_handling,
2378           LoggingAndProfiling logging_and_profiling_mode>
2379 VisitorDispatchTable<ScavengingCallback>
2380     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2381
2382
2383 static void InitializeScavengingVisitorsTables() {
2384   ScavengingVisitor<TRANSFER_MARKS,
2385                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2386   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2387   ScavengingVisitor<TRANSFER_MARKS,
2388                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2389   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2390 }
2391
2392
2393 void Heap::SelectScavengingVisitorsTable() {
2394   bool logging_and_profiling =
2395       FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2396       isolate()->cpu_profiler()->is_profiling() ||
2397       (isolate()->heap_profiler() != NULL &&
2398        isolate()->heap_profiler()->is_tracking_object_moves());
2399
2400   if (!incremental_marking()->IsMarking()) {
2401     if (!logging_and_profiling) {
2402       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2403           IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2404     } else {
2405       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2406           IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2407     }
2408   } else {
2409     if (!logging_and_profiling) {
2410       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2411           TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2412     } else {
2413       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2414           TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2415     }
2416
2417     if (incremental_marking()->IsCompacting()) {
2418       // When compacting forbid short-circuiting of cons-strings.
2419       // Scavenging code relies on the fact that new space object
2420       // can't be evacuated into evacuation candidate but
2421       // short-circuiting violates this assumption.
2422       scavenging_visitors_table_.Register(
2423           StaticVisitorBase::kVisitShortcutCandidate,
2424           scavenging_visitors_table_.GetVisitorById(
2425               StaticVisitorBase::kVisitConsString));
2426     }
2427   }
2428 }
2429
2430
2431 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2432   SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2433   MapWord first_word = object->map_word();
2434   SLOW_DCHECK(!first_word.IsForwardingAddress());
2435   Map* map = first_word.ToMap();
2436   // TODO(jochen): Remove again after fixing http://crbug.com/452095
2437   CHECK((*p)->IsHeapObject() == object->IsHeapObject());
2438   map->GetHeap()->DoScavengeObject(map, p, object);
2439 }
2440
2441
2442 void Heap::ConfigureInitialOldGenerationSize() {
2443   if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
2444     old_generation_allocation_limit_ =
2445         Max(kMinimumOldGenerationAllocationLimit,
2446             static_cast<intptr_t>(
2447                 static_cast<double>(old_generation_allocation_limit_) *
2448                 (tracer()->AverageSurvivalRatio() / 100)));
2449   }
2450 }
2451
2452
2453 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2454                                           int instance_size) {
2455   Object* result;
2456   AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2457   if (!allocation.To(&result)) return allocation;
2458
2459   // Map::cast cannot be used due to uninitialized map field.
2460   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2461   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2462   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2463   // Initialize to only containing tagged fields.
2464   reinterpret_cast<Map*>(result)->set_visitor_id(
2465       StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
2466   if (FLAG_unbox_double_fields) {
2467     reinterpret_cast<Map*>(result)
2468         ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2469   }
2470   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2471   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2472   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2473   reinterpret_cast<Map*>(result)->set_bit_field(0);
2474   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2475   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2476                    Map::OwnsDescriptors::encode(true) |
2477                    Map::Counter::encode(Map::kRetainingCounterStart);
2478   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2479   reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
2480   return result;
2481 }
2482
2483
2484 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2485                                    int instance_size,
2486                                    ElementsKind elements_kind) {
2487   HeapObject* result;
2488   AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2489   if (!allocation.To(&result)) return allocation;
2490
2491   result->set_map_no_write_barrier(meta_map());
2492   Map* map = Map::cast(result);
2493   map->set_instance_type(instance_type);
2494   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2495   map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
2496   map->set_instance_size(instance_size);
2497   map->set_inobject_properties(0);
2498   map->set_pre_allocated_property_fields(0);
2499   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2500   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2501                           SKIP_WRITE_BARRIER);
2502   map->set_weak_cell_cache(Smi::FromInt(0));
2503   map->set_raw_transitions(Smi::FromInt(0));
2504   map->set_unused_property_fields(0);
2505   map->set_instance_descriptors(empty_descriptor_array());
2506   if (FLAG_unbox_double_fields) {
2507     map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2508   }
2509   // Must be called only after |instance_type|, |instance_size| and
2510   // |layout_descriptor| are set.
2511   map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
2512   map->set_bit_field(0);
2513   map->set_bit_field2(1 << Map::kIsExtensible);
2514   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2515                    Map::OwnsDescriptors::encode(true) |
2516                    Map::Counter::encode(Map::kRetainingCounterStart);
2517   map->set_bit_field3(bit_field3);
2518   map->set_elements_kind(elements_kind);
2519
2520   return map;
2521 }
2522
2523
2524 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2525                                             AllocationSpace space) {
2526   HeapObject* obj;
2527   {
2528     AllocationResult allocation = AllocateRaw(size, space, space);
2529     if (!allocation.To(&obj)) return allocation;
2530   }
2531 #ifdef DEBUG
2532   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2533   DCHECK(chunk->owner()->identity() == space);
2534 #endif
2535   CreateFillerObjectAt(obj->address(), size);
2536   return obj;
2537 }
2538
2539
2540 const Heap::StringTypeTable Heap::string_type_table[] = {
2541 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2542   { type, size, k##camel_name##MapRootIndex }             \
2543   ,
2544     STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2545 #undef STRING_TYPE_ELEMENT
2546 };
2547
2548
2549 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2550 #define CONSTANT_STRING_ELEMENT(name, contents) \
2551   { contents, k##name##RootIndex }              \
2552   ,
2553     INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2554 #undef CONSTANT_STRING_ELEMENT
2555 };
2556
2557
2558 const Heap::StructTable Heap::struct_table[] = {
2559 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)        \
2560   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2561   ,
2562     STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2563 #undef STRUCT_TABLE_ELEMENT
2564 };
2565
2566
2567 bool Heap::CreateInitialMaps() {
2568   HeapObject* obj;
2569   {
2570     AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2571     if (!allocation.To(&obj)) return false;
2572   }
2573   // Map::cast cannot be used due to uninitialized map field.
2574   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2575   set_meta_map(new_meta_map);
2576   new_meta_map->set_map(new_meta_map);
2577
2578   {  // Partial map allocation
2579 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)                \
2580   {                                                                          \
2581     Map* map;                                                                \
2582     if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2583     set_##field_name##_map(map);                                             \
2584   }
2585
2586     ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2587     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2588     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2589     ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2590                          constant_pool_array);
2591
2592 #undef ALLOCATE_PARTIAL_MAP
2593   }
2594
2595   // Allocate the empty array.
2596   {
2597     AllocationResult allocation = AllocateEmptyFixedArray();
2598     if (!allocation.To(&obj)) return false;
2599   }
2600   set_empty_fixed_array(FixedArray::cast(obj));
2601
2602   {
2603     AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2604     if (!allocation.To(&obj)) return false;
2605   }
2606   set_null_value(Oddball::cast(obj));
2607   Oddball::cast(obj)->set_kind(Oddball::kNull);
2608
2609   {
2610     AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2611     if (!allocation.To(&obj)) return false;
2612   }
2613   set_undefined_value(Oddball::cast(obj));
2614   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2615   DCHECK(!InNewSpace(undefined_value()));
2616
2617   // Set preliminary exception sentinel value before actually initializing it.
2618   set_exception(null_value());
2619
2620   // Allocate the empty descriptor array.
2621   {
2622     AllocationResult allocation = AllocateEmptyFixedArray();
2623     if (!allocation.To(&obj)) return false;
2624   }
2625   set_empty_descriptor_array(DescriptorArray::cast(obj));
2626
2627   // Allocate the constant pool array.
2628   {
2629     AllocationResult allocation = AllocateEmptyConstantPoolArray();
2630     if (!allocation.To(&obj)) return false;
2631   }
2632   set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2633
2634   // Fix the instance_descriptors for the existing maps.
2635   meta_map()->set_code_cache(empty_fixed_array());
2636   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2637   meta_map()->set_raw_transitions(Smi::FromInt(0));
2638   meta_map()->set_instance_descriptors(empty_descriptor_array());
2639   if (FLAG_unbox_double_fields) {
2640     meta_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2641   }
2642
2643   fixed_array_map()->set_code_cache(empty_fixed_array());
2644   fixed_array_map()->set_dependent_code(
2645       DependentCode::cast(empty_fixed_array()));
2646   fixed_array_map()->set_raw_transitions(Smi::FromInt(0));
2647   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2648   if (FLAG_unbox_double_fields) {
2649     fixed_array_map()->set_layout_descriptor(
2650         LayoutDescriptor::FastPointerLayout());
2651   }
2652
2653   undefined_map()->set_code_cache(empty_fixed_array());
2654   undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2655   undefined_map()->set_raw_transitions(Smi::FromInt(0));
2656   undefined_map()->set_instance_descriptors(empty_descriptor_array());
2657   if (FLAG_unbox_double_fields) {
2658     undefined_map()->set_layout_descriptor(
2659         LayoutDescriptor::FastPointerLayout());
2660   }
2661
2662   null_map()->set_code_cache(empty_fixed_array());
2663   null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2664   null_map()->set_raw_transitions(Smi::FromInt(0));
2665   null_map()->set_instance_descriptors(empty_descriptor_array());
2666   if (FLAG_unbox_double_fields) {
2667     null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
2668   }
2669
2670   constant_pool_array_map()->set_code_cache(empty_fixed_array());
2671   constant_pool_array_map()->set_dependent_code(
2672       DependentCode::cast(empty_fixed_array()));
2673   constant_pool_array_map()->set_raw_transitions(Smi::FromInt(0));
2674   constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2675   if (FLAG_unbox_double_fields) {
2676     constant_pool_array_map()->set_layout_descriptor(
2677         LayoutDescriptor::FastPointerLayout());
2678   }
2679
2680   // Fix prototype object for existing maps.
2681   meta_map()->set_prototype(null_value());
2682   meta_map()->set_constructor_or_backpointer(null_value());
2683
2684   fixed_array_map()->set_prototype(null_value());
2685   fixed_array_map()->set_constructor_or_backpointer(null_value());
2686
2687   undefined_map()->set_prototype(null_value());
2688   undefined_map()->set_constructor_or_backpointer(null_value());
2689
2690   null_map()->set_prototype(null_value());
2691   null_map()->set_constructor_or_backpointer(null_value());
2692
2693   constant_pool_array_map()->set_prototype(null_value());
2694   constant_pool_array_map()->set_constructor_or_backpointer(null_value());
2695
2696   {  // Map allocation
2697 #define ALLOCATE_MAP(instance_type, size, field_name)               \
2698   {                                                                 \
2699     Map* map;                                                       \
2700     if (!AllocateMap((instance_type), size).To(&map)) return false; \
2701     set_##field_name##_map(map);                                    \
2702   }
2703
2704 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2705   ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2706
2707     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2708     DCHECK(fixed_array_map() != fixed_cow_array_map());
2709
2710     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2711     ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2712     ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2713                  mutable_heap_number)
2714     ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2715     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2716
2717     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2718     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2719     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2720     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2721     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2722     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2723     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2724
2725     for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2726       const StringTypeTable& entry = string_type_table[i];
2727       {
2728         AllocationResult allocation = AllocateMap(entry.type, entry.size);
2729         if (!allocation.To(&obj)) return false;
2730       }
2731       // Mark cons string maps as unstable, because their objects can change
2732       // maps during GC.
2733       Map* map = Map::cast(obj);
2734       if (StringShape(entry.type).IsCons()) map->mark_unstable();
2735       roots_[entry.index] = map;
2736     }
2737
2738     {  // Create a separate external one byte string map for native sources.
2739       AllocationResult allocation = AllocateMap(EXTERNAL_ONE_BYTE_STRING_TYPE,
2740                                                 ExternalOneByteString::kSize);
2741       if (!allocation.To(&obj)) return false;
2742       set_native_source_string_map(Map::cast(obj));
2743     }
2744
2745     ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2746     ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2747     ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2748
2749 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)        \
2750   ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2751                external_##type##_array)
2752
2753     TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2754 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2755
2756 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2757   ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2758
2759     TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2760 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2761
2762     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2763
2764     ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2765
2766     ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2767     ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2768     ALLOCATE_MAP(WEAK_CELL_TYPE, WeakCell::kSize, weak_cell)
2769     ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2770     ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2771
2772
2773     for (unsigned i = 0; i < arraysize(struct_table); i++) {
2774       const StructTable& entry = struct_table[i];
2775       Map* map;
2776       if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2777       roots_[entry.index] = map;
2778     }
2779
2780     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2781     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2782
2783     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2784     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2785     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2786     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2787     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2788     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
2789     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
2790
2791     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2792     native_context_map()->set_dictionary_map(true);
2793     native_context_map()->set_visitor_id(
2794         StaticVisitorBase::kVisitNativeContext);
2795
2796     ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2797                  shared_function_info)
2798
2799     ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2800     ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2801     external_map()->set_is_extensible(false);
2802 #undef ALLOCATE_VARSIZE_MAP
2803 #undef ALLOCATE_MAP
2804   }
2805
2806   {  // Empty arrays
2807     {
2808       ByteArray* byte_array;
2809       if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2810       set_empty_byte_array(byte_array);
2811     }
2812
2813 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)  \
2814   {                                                                   \
2815     ExternalArray* obj;                                               \
2816     if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2817       return false;                                                   \
2818     set_empty_external_##type##_array(obj);                           \
2819   }
2820
2821     TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2822 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2823
2824 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2825   {                                                                     \
2826     FixedTypedArrayBase* obj;                                           \
2827     if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2828       return false;                                                     \
2829     set_empty_fixed_##type##_array(obj);                                \
2830   }
2831
2832     TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2833 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2834   }
2835   DCHECK(!InNewSpace(empty_fixed_array()));
2836   return true;
2837 }
2838
2839
2840 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2841                                           PretenureFlag pretenure) {
2842   // Statically ensure that it is safe to allocate heap numbers in paged
2843   // spaces.
2844   int size = HeapNumber::kSize;
2845   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2846
2847   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2848
2849   HeapObject* result;
2850   {
2851     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2852     if (!allocation.To(&result)) return allocation;
2853   }
2854
2855   Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2856   HeapObject::cast(result)->set_map_no_write_barrier(map);
2857   HeapNumber::cast(result)->set_value(value);
2858   return result;
2859 }
2860
2861
2862 AllocationResult Heap::AllocateCell(Object* value) {
2863   int size = Cell::kSize;
2864   STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2865
2866   HeapObject* result;
2867   {
2868     AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2869     if (!allocation.To(&result)) return allocation;
2870   }
2871   result->set_map_no_write_barrier(cell_map());
2872   Cell::cast(result)->set_value(value);
2873   return result;
2874 }
2875
2876
2877 AllocationResult Heap::AllocatePropertyCell() {
2878   int size = PropertyCell::kSize;
2879   STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2880
2881   HeapObject* result;
2882   AllocationResult allocation =
2883       AllocateRaw(size, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
2884   if (!allocation.To(&result)) return allocation;
2885
2886   result->set_map_no_write_barrier(global_property_cell_map());
2887   PropertyCell* cell = PropertyCell::cast(result);
2888   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2889                            SKIP_WRITE_BARRIER);
2890   cell->set_value(the_hole_value());
2891   return result;
2892 }
2893
2894
2895 AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
2896   int size = WeakCell::kSize;
2897   STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
2898   HeapObject* result = NULL;
2899   {
2900     AllocationResult allocation =
2901         AllocateRaw(size, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
2902     if (!allocation.To(&result)) return allocation;
2903   }
2904   result->set_map_no_write_barrier(weak_cell_map());
2905   WeakCell::cast(result)->initialize(value);
2906   WeakCell::cast(result)->set_next(undefined_value(), SKIP_WRITE_BARRIER);
2907   return result;
2908 }
2909
2910
2911 void Heap::CreateApiObjects() {
2912   HandleScope scope(isolate());
2913   Factory* factory = isolate()->factory();
2914   Handle<Map> new_neander_map =
2915       factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2916
2917   // Don't use Smi-only elements optimizations for objects with the neander
2918   // map. There are too many cases where element values are set directly with a
2919   // bottleneck to trap the Smi-only -> fast elements transition, and there
2920   // appears to be no benefit for optimize this case.
2921   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2922   set_neander_map(*new_neander_map);
2923
2924   Handle<JSObject> listeners = factory->NewNeanderObject();
2925   Handle<FixedArray> elements = factory->NewFixedArray(2);
2926   elements->set(0, Smi::FromInt(0));
2927   listeners->set_elements(*elements);
2928   set_message_listeners(*listeners);
2929 }
2930
2931
2932 void Heap::CreateJSEntryStub() {
2933   JSEntryStub stub(isolate(), StackFrame::ENTRY);
2934   set_js_entry_code(*stub.GetCode());
2935 }
2936
2937
2938 void Heap::CreateJSConstructEntryStub() {
2939   JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2940   set_js_construct_entry_code(*stub.GetCode());
2941 }
2942
2943
2944 void Heap::CreateFixedStubs() {
2945   // Here we create roots for fixed stubs. They are needed at GC
2946   // for cooking and uncooking (check out frames.cc).
2947   // The eliminates the need for doing dictionary lookup in the
2948   // stub cache for these stubs.
2949   HandleScope scope(isolate());
2950
2951   // Create stubs that should be there, so we don't unexpectedly have to
2952   // create them if we need them during the creation of another stub.
2953   // Stub creation mixes raw pointers and handles in an unsafe manner so
2954   // we cannot create stubs while we are creating stubs.
2955   CodeStub::GenerateStubsAheadOfTime(isolate());
2956
2957   // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2958   // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2959   // is created.
2960
2961   // gcc-4.4 has problem generating correct code of following snippet:
2962   // {  JSEntryStub stub;
2963   //    js_entry_code_ = *stub.GetCode();
2964   // }
2965   // {  JSConstructEntryStub stub;
2966   //    js_construct_entry_code_ = *stub.GetCode();
2967   // }
2968   // To workaround the problem, make separate functions without inlining.
2969   Heap::CreateJSEntryStub();
2970   Heap::CreateJSConstructEntryStub();
2971 }
2972
2973
2974 void Heap::CreateInitialObjects() {
2975   HandleScope scope(isolate());
2976   Factory* factory = isolate()->factory();
2977
2978   // The -0 value must be set before NewNumber works.
2979   set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2980   DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2981
2982   set_nan_value(*factory->NewHeapNumber(
2983       std::numeric_limits<double>::quiet_NaN(), IMMUTABLE, TENURED));
2984   set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2985
2986   // The hole has not been created yet, but we want to put something
2987   // predictable in the gaps in the string table, so lets make that Smi zero.
2988   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2989
2990   // Allocate initial string table.
2991   set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2992
2993   // Finish initializing oddballs after creating the string table.
2994   Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2995                       factory->nan_value(), Oddball::kUndefined);
2996
2997   // Initialize the null_value.
2998   Oddball::Initialize(isolate(), factory->null_value(), "null",
2999                       handle(Smi::FromInt(0), isolate()), Oddball::kNull);
3000
3001   set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
3002                                       handle(Smi::FromInt(1), isolate()),
3003                                       Oddball::kTrue));
3004
3005   set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
3006                                        handle(Smi::FromInt(0), isolate()),
3007                                        Oddball::kFalse));
3008
3009   set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
3010                                           handle(Smi::FromInt(-1), isolate()),
3011                                           Oddball::kTheHole));
3012
3013   set_uninitialized_value(*factory->NewOddball(
3014       factory->uninitialized_map(), "uninitialized",
3015       handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
3016
3017   set_arguments_marker(*factory->NewOddball(
3018       factory->arguments_marker_map(), "arguments_marker",
3019       handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
3020
3021   set_no_interceptor_result_sentinel(*factory->NewOddball(
3022       factory->no_interceptor_result_sentinel_map(),
3023       "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
3024       Oddball::kOther));
3025
3026   set_termination_exception(*factory->NewOddball(
3027       factory->termination_exception_map(), "termination_exception",
3028       handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
3029
3030   set_exception(*factory->NewOddball(factory->exception_map(), "exception",
3031                                      handle(Smi::FromInt(-5), isolate()),
3032                                      Oddball::kException));
3033
3034   for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
3035     Handle<String> str =
3036         factory->InternalizeUtf8String(constant_string_table[i].contents);
3037     roots_[constant_string_table[i].index] = *str;
3038   }
3039
3040   // Allocate the hidden string which is used to identify the hidden properties
3041   // in JSObjects. The hash code has a special value so that it will not match
3042   // the empty string when searching for the property. It cannot be part of the
3043   // loop above because it needs to be allocated manually with the special
3044   // hash code in place. The hash code for the hidden_string is zero to ensure
3045   // that it will always be at the first entry in property descriptors.
3046   hidden_string_ = *factory->NewOneByteInternalizedString(
3047       OneByteVector("", 0), String::kEmptyStringHash);
3048
3049   // Create the code_stubs dictionary. The initial size is set to avoid
3050   // expanding the dictionary during bootstrapping.
3051   set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
3052
3053   // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
3054   // is set to avoid expanding the dictionary during bootstrapping.
3055   set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
3056
3057   set_polymorphic_code_cache(PolymorphicCodeCache::cast(
3058       *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
3059
3060   set_instanceof_cache_function(Smi::FromInt(0));
3061   set_instanceof_cache_map(Smi::FromInt(0));
3062   set_instanceof_cache_answer(Smi::FromInt(0));
3063
3064   {
3065     HandleScope scope(isolate());
3066 #define SYMBOL_INIT(name)                               \
3067   Handle<Symbol> name = factory->NewPrivateOwnSymbol(); \
3068   roots_[k##name##RootIndex] = *name;
3069     PRIVATE_SYMBOL_LIST(SYMBOL_INIT)
3070 #undef SYMBOL_INIT
3071   }
3072
3073   {
3074     HandleScope scope(isolate());
3075 #define SYMBOL_INIT(name, varname, description)                             \
3076   Handle<Symbol> name = factory->NewSymbol();                               \
3077   Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
3078   name->set_name(*name##d);                                                 \
3079   roots_[k##name##RootIndex] = *name;
3080     PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
3081 #undef SYMBOL_INIT
3082   }
3083
3084   CreateFixedStubs();
3085
3086   // Allocate the dictionary of intrinsic function names.
3087   Handle<NameDictionary> intrinsic_names =
3088       NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
3089   Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
3090   set_intrinsic_function_names(*intrinsic_names);
3091
3092   set_number_string_cache(
3093       *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
3094
3095   // Allocate cache for single character one byte strings.
3096   set_single_character_string_cache(
3097       *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
3098
3099   // Allocate cache for string split and regexp-multiple.
3100   set_string_split_cache(*factory->NewFixedArray(
3101       RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
3102   set_regexp_multiple_cache(*factory->NewFixedArray(
3103       RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
3104
3105   // Allocate cache for external strings pointing to native source code.
3106   set_natives_source_cache(
3107       *factory->NewFixedArray(Natives::GetBuiltinsCount()));
3108
3109   set_undefined_cell(*factory->NewCell(factory->undefined_value()));
3110
3111   // The symbol registry is initialized lazily.
3112   set_symbol_registry(Smi::FromInt(0));
3113
3114   // Allocate object to hold object observation state.
3115   set_observation_state(*factory->NewJSObjectFromMap(
3116       factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
3117
3118   // Microtask queue uses the empty fixed array as a sentinel for "empty".
3119   // Number of queued microtasks stored in Isolate::pending_microtask_count().
3120   set_microtask_queue(empty_fixed_array());
3121
3122   if (FLAG_vector_ics) {
3123     FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
3124     Handle<TypeFeedbackVector> dummy_vector =
3125         factory->NewTypeFeedbackVector(&spec);
3126     dummy_vector->Set(FeedbackVectorICSlot(0),
3127                       *TypeFeedbackVector::MegamorphicSentinel(isolate()),
3128                       SKIP_WRITE_BARRIER);
3129     set_keyed_load_dummy_vector(*dummy_vector);
3130   } else {
3131     set_keyed_load_dummy_vector(empty_fixed_array());
3132   }
3133
3134   set_detached_contexts(empty_fixed_array());
3135   set_retained_maps(ArrayList::cast(empty_fixed_array()));
3136
3137   set_weak_object_to_code_table(
3138       *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
3139                           TENURED));
3140
3141   Handle<SeededNumberDictionary> slow_element_dictionary =
3142       SeededNumberDictionary::New(isolate(), 0, TENURED);
3143   slow_element_dictionary->set_requires_slow_elements();
3144   set_empty_slow_element_dictionary(*slow_element_dictionary);
3145
3146   set_materialized_objects(*factory->NewFixedArray(0, TENURED));
3147
3148   // Handling of script id generation is in Factory::NewScript.
3149   set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
3150
3151   set_allocation_sites_scratchpad(
3152       *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
3153   InitializeAllocationSitesScratchpad();
3154
3155   // Initialize keyed lookup cache.
3156   isolate_->keyed_lookup_cache()->Clear();
3157
3158   // Initialize context slot cache.
3159   isolate_->context_slot_cache()->Clear();
3160
3161   // Initialize descriptor cache.
3162   isolate_->descriptor_lookup_cache()->Clear();
3163
3164   // Initialize compilation cache.
3165   isolate_->compilation_cache()->Clear();
3166 }
3167
3168
3169 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3170   switch (root_index) {
3171     case kStoreBufferTopRootIndex:
3172     case kNumberStringCacheRootIndex:
3173     case kInstanceofCacheFunctionRootIndex:
3174     case kInstanceofCacheMapRootIndex:
3175     case kInstanceofCacheAnswerRootIndex:
3176     case kCodeStubsRootIndex:
3177     case kNonMonomorphicCacheRootIndex:
3178     case kPolymorphicCodeCacheRootIndex:
3179     case kEmptyScriptRootIndex:
3180     case kSymbolRegistryRootIndex:
3181     case kMaterializedObjectsRootIndex:
3182     case kAllocationSitesScratchpadRootIndex:
3183     case kMicrotaskQueueRootIndex:
3184     case kDetachedContextsRootIndex:
3185     case kWeakObjectToCodeTableRootIndex:
3186     case kRetainedMapsRootIndex:
3187 // Smi values
3188 #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
3189       SMI_ROOT_LIST(SMI_ENTRY)
3190 #undef SMI_ENTRY
3191     // String table
3192     case kStringTableRootIndex:
3193       return true;
3194
3195     default:
3196       return false;
3197   }
3198 }
3199
3200
3201 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
3202   return !RootCanBeWrittenAfterInitialization(root_index) &&
3203          !InNewSpace(roots_array_start()[root_index]);
3204 }
3205
3206
3207 Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
3208                                    Object* key_pattern, ResultsCacheType type) {
3209   FixedArray* cache;
3210   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3211   if (type == STRING_SPLIT_SUBSTRINGS) {
3212     DCHECK(key_pattern->IsString());
3213     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3214     cache = heap->string_split_cache();
3215   } else {
3216     DCHECK(type == REGEXP_MULTIPLE_INDICES);
3217     DCHECK(key_pattern->IsFixedArray());
3218     cache = heap->regexp_multiple_cache();
3219   }
3220
3221   uint32_t hash = key_string->Hash();
3222   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3223                     ~(kArrayEntriesPerCacheEntry - 1));
3224   if (cache->get(index + kStringOffset) == key_string &&
3225       cache->get(index + kPatternOffset) == key_pattern) {
3226     return cache->get(index + kArrayOffset);
3227   }
3228   index =
3229       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3230   if (cache->get(index + kStringOffset) == key_string &&
3231       cache->get(index + kPatternOffset) == key_pattern) {
3232     return cache->get(index + kArrayOffset);
3233   }
3234   return Smi::FromInt(0);
3235 }
3236
3237
3238 void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
3239                                Handle<Object> key_pattern,
3240                                Handle<FixedArray> value_array,
3241                                ResultsCacheType type) {
3242   Factory* factory = isolate->factory();
3243   Handle<FixedArray> cache;
3244   if (!key_string->IsInternalizedString()) return;
3245   if (type == STRING_SPLIT_SUBSTRINGS) {
3246     DCHECK(key_pattern->IsString());
3247     if (!key_pattern->IsInternalizedString()) return;
3248     cache = factory->string_split_cache();
3249   } else {
3250     DCHECK(type == REGEXP_MULTIPLE_INDICES);
3251     DCHECK(key_pattern->IsFixedArray());
3252     cache = factory->regexp_multiple_cache();
3253   }
3254
3255   uint32_t hash = key_string->Hash();
3256   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3257                     ~(kArrayEntriesPerCacheEntry - 1));
3258   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3259     cache->set(index + kStringOffset, *key_string);
3260     cache->set(index + kPatternOffset, *key_pattern);
3261     cache->set(index + kArrayOffset, *value_array);
3262   } else {
3263     uint32_t index2 =
3264         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3265     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3266       cache->set(index2 + kStringOffset, *key_string);
3267       cache->set(index2 + kPatternOffset, *key_pattern);
3268       cache->set(index2 + kArrayOffset, *value_array);
3269     } else {
3270       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3271       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3272       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3273       cache->set(index + kStringOffset, *key_string);
3274       cache->set(index + kPatternOffset, *key_pattern);
3275       cache->set(index + kArrayOffset, *value_array);
3276     }
3277   }
3278   // If the array is a reasonably short list of substrings, convert it into a
3279   // list of internalized strings.
3280   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3281     for (int i = 0; i < value_array->length(); i++) {
3282       Handle<String> str(String::cast(value_array->get(i)), isolate);
3283       Handle<String> internalized_str = factory->InternalizeString(str);
3284       value_array->set(i, *internalized_str);
3285     }
3286   }
3287   // Convert backing store to a copy-on-write array.
3288   value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3289 }
3290
3291
3292 void RegExpResultsCache::Clear(FixedArray* cache) {
3293   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3294     cache->set(i, Smi::FromInt(0));
3295   }
3296 }
3297
3298
3299 int Heap::FullSizeNumberStringCacheLength() {
3300   // Compute the size of the number string cache based on the max newspace size.
3301   // The number string cache has a minimum size based on twice the initial cache
3302   // size to ensure that it is bigger after being made 'full size'.
3303   int number_string_cache_size = max_semi_space_size_ / 512;
3304   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3305                                  Min(0x4000, number_string_cache_size));
3306   // There is a string and a number per entry so the length is twice the number
3307   // of entries.
3308   return number_string_cache_size * 2;
3309 }
3310
3311
3312 void Heap::FlushNumberStringCache() {
3313   // Flush the number to string cache.
3314   int len = number_string_cache()->length();
3315   for (int i = 0; i < len; i++) {
3316     number_string_cache()->set_undefined(i);
3317   }
3318 }
3319
3320
3321 void Heap::FlushAllocationSitesScratchpad() {
3322   for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3323     allocation_sites_scratchpad()->set_undefined(i);
3324   }
3325   allocation_sites_scratchpad_length_ = 0;
3326 }
3327
3328
3329 void Heap::InitializeAllocationSitesScratchpad() {
3330   DCHECK(allocation_sites_scratchpad()->length() ==
3331          kAllocationSiteScratchpadSize);
3332   for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3333     allocation_sites_scratchpad()->set_undefined(i);
3334   }
3335 }
3336
3337
3338 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3339                                          ScratchpadSlotMode mode) {
3340   if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3341     // We cannot use the normal write-barrier because slots need to be
3342     // recorded with non-incremental marking as well. We have to explicitly
3343     // record the slot to take evacuation candidates into account.
3344     allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3345                                        site, SKIP_WRITE_BARRIER);
3346     Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3347         allocation_sites_scratchpad_length_);
3348
3349     if (mode == RECORD_SCRATCHPAD_SLOT) {
3350       // We need to allow slots buffer overflow here since the evacuation
3351       // candidates are not part of the global list of old space pages and
3352       // releasing an evacuation candidate due to a slots buffer overflow
3353       // results in lost pages.
3354       mark_compact_collector()->RecordSlot(slot, slot, *slot,
3355                                            SlotsBuffer::IGNORE_OVERFLOW);
3356     }
3357     allocation_sites_scratchpad_length_++;
3358   }
3359 }
3360
3361
3362 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3363   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3364 }
3365
3366
3367 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3368     ExternalArrayType array_type) {
3369   switch (array_type) {
3370 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3371   case kExternal##Type##Array:                                  \
3372     return kExternal##Type##ArrayMapRootIndex;
3373
3374     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3375 #undef ARRAY_TYPE_TO_ROOT_INDEX
3376
3377     default:
3378       UNREACHABLE();
3379       return kUndefinedValueRootIndex;
3380   }
3381 }
3382
3383
3384 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3385   return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3386 }
3387
3388
3389 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3390     ExternalArrayType array_type) {
3391   switch (array_type) {
3392 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3393   case kExternal##Type##Array:                                  \
3394     return kFixed##Type##ArrayMapRootIndex;
3395
3396     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3397 #undef ARRAY_TYPE_TO_ROOT_INDEX
3398
3399     default:
3400       UNREACHABLE();
3401       return kUndefinedValueRootIndex;
3402   }
3403 }
3404
3405
3406 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3407     ElementsKind elementsKind) {
3408   switch (elementsKind) {
3409 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3410   case EXTERNAL_##TYPE##_ELEMENTS:                                \
3411     return kEmptyExternal##Type##ArrayRootIndex;
3412
3413     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3414 #undef ELEMENT_KIND_TO_ROOT_INDEX
3415
3416     default:
3417       UNREACHABLE();
3418       return kUndefinedValueRootIndex;
3419   }
3420 }
3421
3422
3423 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3424     ElementsKind elementsKind) {
3425   switch (elementsKind) {
3426 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3427   case TYPE##_ELEMENTS:                                           \
3428     return kEmptyFixed##Type##ArrayRootIndex;
3429
3430     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3431 #undef ELEMENT_KIND_TO_ROOT_INDEX
3432     default:
3433       UNREACHABLE();
3434       return kUndefinedValueRootIndex;
3435   }
3436 }
3437
3438
3439 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3440   return ExternalArray::cast(
3441       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3442 }
3443
3444
3445 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3446   return FixedTypedArrayBase::cast(
3447       roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3448 }
3449
3450
3451 AllocationResult Heap::AllocateForeign(Address address,
3452                                        PretenureFlag pretenure) {
3453   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3454   STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3455   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3456   Foreign* result;
3457   AllocationResult allocation = Allocate(foreign_map(), space);
3458   if (!allocation.To(&result)) return allocation;
3459   result->set_foreign_address(address);
3460   return result;
3461 }
3462
3463
3464 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3465   if (length < 0 || length > ByteArray::kMaxLength) {
3466     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3467   }
3468   int size = ByteArray::SizeFor(length);
3469   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3470   HeapObject* result;
3471   {
3472     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3473     if (!allocation.To(&result)) return allocation;
3474   }
3475
3476   result->set_map_no_write_barrier(byte_array_map());
3477   ByteArray::cast(result)->set_length(length);
3478   return result;
3479 }
3480
3481
3482 void Heap::CreateFillerObjectAt(Address addr, int size) {
3483   if (size == 0) return;
3484   HeapObject* filler = HeapObject::FromAddress(addr);
3485   // At this point, we may be deserializing the heap from a snapshot, and
3486   // none of the maps have been created yet and are NULL.
3487   if (size == kPointerSize) {
3488     filler->set_map_no_write_barrier(raw_unchecked_one_pointer_filler_map());
3489     DCHECK(filler->map() == NULL || filler->map() == one_pointer_filler_map());
3490   } else if (size == 2 * kPointerSize) {
3491     filler->set_map_no_write_barrier(raw_unchecked_two_pointer_filler_map());
3492     DCHECK(filler->map() == NULL || filler->map() == two_pointer_filler_map());
3493   } else {
3494     filler->set_map_no_write_barrier(raw_unchecked_free_space_map());
3495     DCHECK(filler->map() == NULL || filler->map() == free_space_map());
3496     FreeSpace::cast(filler)->nobarrier_set_size(size);
3497   }
3498 }
3499
3500
3501 bool Heap::CanMoveObjectStart(HeapObject* object) {
3502   Address address = object->address();
3503   bool is_in_old_pointer_space = InOldPointerSpace(address);
3504   bool is_in_old_data_space = InOldDataSpace(address);
3505
3506   if (lo_space()->Contains(object)) return false;
3507
3508   Page* page = Page::FromAddress(address);
3509   // We can move the object start if:
3510   // (1) the object is not in old pointer or old data space,
3511   // (2) the page of the object was already swept,
3512   // (3) the page was already concurrently swept. This case is an optimization
3513   // for concurrent sweeping. The WasSwept predicate for concurrently swept
3514   // pages is set after sweeping all pages.
3515   return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3516          page->WasSwept() || page->SweepingCompleted();
3517 }
3518
3519
3520 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3521   if (incremental_marking()->IsMarking() &&
3522       Marking::IsBlack(Marking::MarkBitFrom(address))) {
3523     if (mode == FROM_GC) {
3524       MemoryChunk::IncrementLiveBytesFromGC(address, by);
3525     } else {
3526       MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3527     }
3528   }
3529 }
3530
3531
3532 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3533                                          int elements_to_trim) {
3534   DCHECK(!object->IsFixedTypedArrayBase());
3535   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3536   const int bytes_to_trim = elements_to_trim * element_size;
3537   Map* map = object->map();
3538
3539   // For now this trick is only applied to objects in new and paged space.
3540   // In large object space the object's start must coincide with chunk
3541   // and thus the trick is just not applicable.
3542   DCHECK(!lo_space()->Contains(object));
3543   DCHECK(object->map() != fixed_cow_array_map());
3544
3545   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3546   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3547   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3548
3549   const int len = object->length();
3550   DCHECK(elements_to_trim <= len);
3551
3552   // Calculate location of new array start.
3553   Address new_start = object->address() + bytes_to_trim;
3554
3555   // Technically in new space this write might be omitted (except for
3556   // debug mode which iterates through the heap), but to play safer
3557   // we still do it.
3558   CreateFillerObjectAt(object->address(), bytes_to_trim);
3559
3560   // Initialize header of the trimmed array. Since left trimming is only
3561   // performed on pages which are not concurrently swept creating a filler
3562   // object does not require synchronization.
3563   DCHECK(CanMoveObjectStart(object));
3564   Object** former_start = HeapObject::RawField(object, 0);
3565   int new_start_index = elements_to_trim * (element_size / kPointerSize);
3566   former_start[new_start_index] = map;
3567   former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3568   FixedArrayBase* new_object =
3569       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3570
3571   // Maintain consistency of live bytes during incremental marking
3572   marking()->TransferMark(object->address(), new_start);
3573   AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
3574
3575   // Notify the heap profiler of change in object layout.
3576   OnMoveEvent(new_object, object, new_object->Size());
3577   return new_object;
3578 }
3579
3580
3581 // Force instantiation of templatized method.
3582 template
3583 void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
3584 template
3585 void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
3586
3587
3588 template<Heap::InvocationMode mode>
3589 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3590   const int len = object->length();
3591   DCHECK(elements_to_trim < len);
3592
3593   int bytes_to_trim;
3594   if (object->IsFixedTypedArrayBase()) {
3595     InstanceType type = object->map()->instance_type();
3596     bytes_to_trim =
3597         FixedTypedArrayBase::TypedArraySize(type, len) -
3598         FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
3599   } else {
3600     const int element_size =
3601         object->IsFixedArray() ? kPointerSize : kDoubleSize;
3602     bytes_to_trim = elements_to_trim * element_size;
3603   }
3604
3605   // For now this trick is only applied to objects in new and paged space.
3606   DCHECK(object->map() != fixed_cow_array_map());
3607
3608   if (bytes_to_trim == 0) {
3609     // No need to create filler and update live bytes counters, just initialize
3610     // header of the trimmed array.
3611     object->synchronized_set_length(len - elements_to_trim);
3612     return;
3613   }
3614
3615   // Calculate location of new array end.
3616   Address new_end = object->address() + object->Size() - bytes_to_trim;
3617
3618   // Technically in new space this write might be omitted (except for
3619   // debug mode which iterates through the heap), but to play safer
3620   // we still do it.
3621   // We do not create a filler for objects in large object space.
3622   // TODO(hpayer): We should shrink the large object page if the size
3623   // of the object changed significantly.
3624   if (!lo_space()->Contains(object)) {
3625     CreateFillerObjectAt(new_end, bytes_to_trim);
3626   }
3627
3628   // Initialize header of the trimmed array. We are storing the new length
3629   // using release store after creating a filler for the left-over space to
3630   // avoid races with the sweeper thread.
3631   object->synchronized_set_length(len - elements_to_trim);
3632
3633   // Maintain consistency of live bytes during incremental marking
3634   AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
3635
3636   // Notify the heap profiler of change in object layout. The array may not be
3637   // moved during GC, and size has to be adjusted nevertheless.
3638   HeapProfiler* profiler = isolate()->heap_profiler();
3639   if (profiler->is_tracking_allocations()) {
3640     profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3641   }
3642 }
3643
3644
3645 AllocationResult Heap::AllocateExternalArray(int length,
3646                                              ExternalArrayType array_type,
3647                                              void* external_pointer,
3648                                              PretenureFlag pretenure) {
3649   int size = ExternalArray::kAlignedSize;
3650   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3651   HeapObject* result;
3652   {
3653     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3654     if (!allocation.To(&result)) return allocation;
3655   }
3656
3657   result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
3658   ExternalArray::cast(result)->set_length(length);
3659   ExternalArray::cast(result)->set_external_pointer(external_pointer);
3660   return result;
3661 }
3662
3663 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3664                                ElementsKind* element_kind) {
3665   switch (array_type) {
3666 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3667   case kExternal##Type##Array:                          \
3668     *element_size = size;                               \
3669     *element_kind = TYPE##_ELEMENTS;                    \
3670     return;
3671
3672     TYPED_ARRAYS(TYPED_ARRAY_CASE)
3673 #undef TYPED_ARRAY_CASE
3674
3675     default:
3676       *element_size = 0;               // Bogus
3677       *element_kind = UINT8_ELEMENTS;  // Bogus
3678       UNREACHABLE();
3679   }
3680 }
3681
3682
3683 AllocationResult Heap::AllocateFixedTypedArray(int length,
3684                                                ExternalArrayType array_type,
3685                                                PretenureFlag pretenure) {
3686   int element_size;
3687   ElementsKind elements_kind;
3688   ForFixedTypedArray(array_type, &element_size, &elements_kind);
3689   int size = OBJECT_POINTER_ALIGN(length * element_size +
3690                                   FixedTypedArrayBase::kDataOffset);
3691 #ifndef V8_HOST_ARCH_64_BIT
3692   if (array_type == kExternalFloat64Array) {
3693     size += kPointerSize;
3694   }
3695 #endif
3696   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3697
3698   HeapObject* object;
3699   AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3700   if (!allocation.To(&object)) return allocation;
3701
3702   if (array_type == kExternalFloat64Array) {
3703     object = EnsureDoubleAligned(this, object, size);
3704   }
3705
3706   object->set_map(MapForFixedTypedArray(array_type));
3707   FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3708   elements->set_length(length);
3709   memset(elements->DataPtr(), 0, elements->DataSize());
3710   return elements;
3711 }
3712
3713
3714 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3715   DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3716   AllocationResult allocation =
3717       AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3718
3719   HeapObject* result;
3720   if (!allocation.To(&result)) return allocation;
3721
3722   if (immovable) {
3723     Address address = result->address();
3724     // Code objects which should stay at a fixed address are allocated either
3725     // in the first page of code space (objects on the first page of each space
3726     // are never moved) or in large object space.
3727     if (!code_space_->FirstPage()->Contains(address) &&
3728         MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3729       // Discard the first code allocation, which was on a page where it could
3730       // be moved.
3731       CreateFillerObjectAt(result->address(), object_size);
3732       allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3733       if (!allocation.To(&result)) return allocation;
3734       OnAllocationEvent(result, object_size);
3735     }
3736   }
3737
3738   result->set_map_no_write_barrier(code_map());
3739   Code* code = Code::cast(result);
3740   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3741          isolate_->code_range()->contains(code->address()));
3742   code->set_gc_metadata(Smi::FromInt(0));
3743   code->set_ic_age(global_ic_age_);
3744   return code;
3745 }
3746
3747
3748 AllocationResult Heap::CopyCode(Code* code) {
3749   AllocationResult allocation;
3750   HeapObject* new_constant_pool;
3751   if (FLAG_enable_ool_constant_pool &&
3752       code->constant_pool() != empty_constant_pool_array()) {
3753     // Copy the constant pool, since edits to the copied code may modify
3754     // the constant pool.
3755     allocation = CopyConstantPoolArray(code->constant_pool());
3756     if (!allocation.To(&new_constant_pool)) return allocation;
3757   } else {
3758     new_constant_pool = empty_constant_pool_array();
3759   }
3760
3761   HeapObject* result = NULL;
3762   // Allocate an object the same size as the code object.
3763   int obj_size = code->Size();
3764   allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3765   if (!allocation.To(&result)) return allocation;
3766
3767   // Copy code object.
3768   Address old_addr = code->address();
3769   Address new_addr = result->address();
3770   CopyBlock(new_addr, old_addr, obj_size);
3771   Code* new_code = Code::cast(result);
3772
3773   // Update the constant pool.
3774   new_code->set_constant_pool(new_constant_pool);
3775
3776   // Relocate the copy.
3777   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3778          isolate_->code_range()->contains(code->address()));
3779   new_code->Relocate(new_addr - old_addr);
3780   return new_code;
3781 }
3782
3783
3784 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3785   // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3786   // do not risk leaving uninitialized Code object (and breaking the heap).
3787   ByteArray* reloc_info_array;
3788   {
3789     AllocationResult allocation =
3790         AllocateByteArray(reloc_info.length(), TENURED);
3791     if (!allocation.To(&reloc_info_array)) return allocation;
3792   }
3793   HeapObject* new_constant_pool;
3794   if (FLAG_enable_ool_constant_pool &&
3795       code->constant_pool() != empty_constant_pool_array()) {
3796     // Copy the constant pool, since edits to the copied code may modify
3797     // the constant pool.
3798     AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
3799     if (!allocation.To(&new_constant_pool)) return allocation;
3800   } else {
3801     new_constant_pool = empty_constant_pool_array();
3802   }
3803
3804   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3805
3806   int new_obj_size = Code::SizeFor(new_body_size);
3807
3808   Address old_addr = code->address();
3809
3810   size_t relocation_offset =
3811       static_cast<size_t>(code->instruction_end() - old_addr);
3812
3813   HeapObject* result;
3814   AllocationResult allocation =
3815       AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3816   if (!allocation.To(&result)) return allocation;
3817
3818   // Copy code object.
3819   Address new_addr = result->address();
3820
3821   // Copy header and instructions.
3822   CopyBytes(new_addr, old_addr, relocation_offset);
3823
3824   Code* new_code = Code::cast(result);
3825   new_code->set_relocation_info(reloc_info_array);
3826
3827   // Update constant pool.
3828   new_code->set_constant_pool(new_constant_pool);
3829
3830   // Copy patched rinfo.
3831   CopyBytes(new_code->relocation_start(), reloc_info.start(),
3832             static_cast<size_t>(reloc_info.length()));
3833
3834   // Relocate the copy.
3835   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3836          isolate_->code_range()->contains(code->address()));
3837   new_code->Relocate(new_addr - old_addr);
3838
3839 #ifdef VERIFY_HEAP
3840   if (FLAG_verify_heap) code->ObjectVerify();
3841 #endif
3842   return new_code;
3843 }
3844
3845
3846 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3847                                        AllocationSite* allocation_site) {
3848   memento->set_map_no_write_barrier(allocation_memento_map());
3849   DCHECK(allocation_site->map() == allocation_site_map());
3850   memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3851   if (FLAG_allocation_site_pretenuring) {
3852     allocation_site->IncrementMementoCreateCount();
3853   }
3854 }
3855
3856
3857 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3858                                 AllocationSite* allocation_site) {
3859   DCHECK(gc_state_ == NOT_IN_GC);
3860   DCHECK(map->instance_type() != MAP_TYPE);
3861   // If allocation failures are disallowed, we may allocate in a different
3862   // space when new space is full and the object is not a large object.
3863   AllocationSpace retry_space =
3864       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3865   int size = map->instance_size();
3866   if (allocation_site != NULL) {
3867     size += AllocationMemento::kSize;
3868   }
3869   HeapObject* result;
3870   AllocationResult allocation = AllocateRaw(size, space, retry_space);
3871   if (!allocation.To(&result)) return allocation;
3872   // No need for write barrier since object is white and map is in old space.
3873   result->set_map_no_write_barrier(map);
3874   if (allocation_site != NULL) {
3875     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3876         reinterpret_cast<Address>(result) + map->instance_size());
3877     InitializeAllocationMemento(alloc_memento, allocation_site);
3878   }
3879   return result;
3880 }
3881
3882
3883 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3884                                      Map* map) {
3885   obj->set_properties(properties);
3886   obj->initialize_elements();
3887   // TODO(1240798): Initialize the object's body using valid initial values
3888   // according to the object's initial map.  For example, if the map's
3889   // instance type is JS_ARRAY_TYPE, the length field should be initialized
3890   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3891   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
3892   // verification code has to cope with (temporarily) invalid objects.  See
3893   // for example, JSArray::JSArrayVerify).
3894   Object* filler;
3895   // We cannot always fill with one_pointer_filler_map because objects
3896   // created from API functions expect their internal fields to be initialized
3897   // with undefined_value.
3898   // Pre-allocated fields need to be initialized with undefined_value as well
3899   // so that object accesses before the constructor completes (e.g. in the
3900   // debugger) will not cause a crash.
3901   Object* constructor = map->GetConstructor();
3902   if (constructor->IsJSFunction() &&
3903       JSFunction::cast(constructor)->IsInobjectSlackTrackingInProgress()) {
3904     // We might want to shrink the object later.
3905     DCHECK(obj->GetInternalFieldCount() == 0);
3906     filler = Heap::one_pointer_filler_map();
3907   } else {
3908     filler = Heap::undefined_value();
3909   }
3910   obj->InitializeBody(map, Heap::undefined_value(), filler);
3911 }
3912
3913
3914 AllocationResult Heap::AllocateJSObjectFromMap(
3915     Map* map, PretenureFlag pretenure, bool allocate_properties,
3916     AllocationSite* allocation_site) {
3917   // JSFunctions should be allocated using AllocateFunction to be
3918   // properly initialized.
3919   DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3920
3921   // Both types of global objects should be allocated using
3922   // AllocateGlobalObject to be properly initialized.
3923   DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3924   DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3925
3926   // Allocate the backing storage for the properties.
3927   FixedArray* properties;
3928   if (allocate_properties) {
3929     int prop_size = map->InitialPropertiesLength();
3930     DCHECK(prop_size >= 0);
3931     {
3932       AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3933       if (!allocation.To(&properties)) return allocation;
3934     }
3935   } else {
3936     properties = empty_fixed_array();
3937   }
3938
3939   // Allocate the JSObject.
3940   int size = map->instance_size();
3941   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3942   JSObject* js_obj;
3943   AllocationResult allocation = Allocate(map, space, allocation_site);
3944   if (!allocation.To(&js_obj)) return allocation;
3945
3946   // Initialize the JSObject.
3947   InitializeJSObjectFromMap(js_obj, properties, map);
3948   DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
3949          js_obj->HasFixedTypedArrayElements());
3950   return js_obj;
3951 }
3952
3953
3954 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3955                                         PretenureFlag pretenure,
3956                                         AllocationSite* allocation_site) {
3957   DCHECK(constructor->has_initial_map());
3958
3959   // Allocate the object based on the constructors initial map.
3960   AllocationResult allocation = AllocateJSObjectFromMap(
3961       constructor->initial_map(), pretenure, true, allocation_site);
3962 #ifdef DEBUG
3963   // Make sure result is NOT a global object if valid.
3964   HeapObject* obj;
3965   DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
3966 #endif
3967   return allocation;
3968 }
3969
3970
3971 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3972   // Make the clone.
3973   Map* map = source->map();
3974
3975   // We can only clone normal objects or arrays. Copying anything else
3976   // will break invariants.
3977   CHECK(map->instance_type() == JS_OBJECT_TYPE ||
3978         map->instance_type() == JS_ARRAY_TYPE);
3979
3980   int object_size = map->instance_size();
3981   HeapObject* clone;
3982
3983   DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3984
3985   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3986
3987   // If we're forced to always allocate, we use the general allocation
3988   // functions which may leave us with an object in old space.
3989   if (always_allocate()) {
3990     {
3991       AllocationResult allocation =
3992           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3993       if (!allocation.To(&clone)) return allocation;
3994     }
3995     Address clone_address = clone->address();
3996     CopyBlock(clone_address, source->address(), object_size);
3997
3998     // Update write barrier for all tagged fields that lie beyond the header.
3999     const int start_offset = JSObject::kHeaderSize;
4000     const int end_offset = object_size;
4001
4002 #if V8_DOUBLE_FIELDS_UNBOXING
4003     LayoutDescriptorHelper helper(map);
4004     bool has_only_tagged_fields = helper.all_fields_tagged();
4005
4006     if (!has_only_tagged_fields) {
4007       for (int offset = start_offset; offset < end_offset;) {
4008         int end_of_region_offset;
4009         if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
4010           RecordWrites(clone_address, offset,
4011                        (end_of_region_offset - offset) / kPointerSize);
4012         }
4013         offset = end_of_region_offset;
4014       }
4015     } else {
4016 #endif
4017       // Object has only tagged fields.
4018       RecordWrites(clone_address, start_offset,
4019                    (end_offset - start_offset) / kPointerSize);
4020 #if V8_DOUBLE_FIELDS_UNBOXING
4021     }
4022 #endif
4023
4024   } else {
4025     wb_mode = SKIP_WRITE_BARRIER;
4026
4027     {
4028       int adjusted_object_size =
4029           site != NULL ? object_size + AllocationMemento::kSize : object_size;
4030       AllocationResult allocation =
4031           AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
4032       if (!allocation.To(&clone)) return allocation;
4033     }
4034     SLOW_DCHECK(InNewSpace(clone));
4035     // Since we know the clone is allocated in new space, we can copy
4036     // the contents without worrying about updating the write barrier.
4037     CopyBlock(clone->address(), source->address(), object_size);
4038
4039     if (site != NULL) {
4040       AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4041           reinterpret_cast<Address>(clone) + object_size);
4042       InitializeAllocationMemento(alloc_memento, site);
4043     }
4044   }
4045
4046   SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
4047               source->GetElementsKind());
4048   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4049   FixedArray* properties = FixedArray::cast(source->properties());
4050   // Update elements if necessary.
4051   if (elements->length() > 0) {
4052     FixedArrayBase* elem;
4053     {
4054       AllocationResult allocation;
4055       if (elements->map() == fixed_cow_array_map()) {
4056         allocation = FixedArray::cast(elements);
4057       } else if (source->HasFastDoubleElements()) {
4058         allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4059       } else {
4060         allocation = CopyFixedArray(FixedArray::cast(elements));
4061       }
4062       if (!allocation.To(&elem)) return allocation;
4063     }
4064     JSObject::cast(clone)->set_elements(elem, wb_mode);
4065   }
4066   // Update properties if necessary.
4067   if (properties->length() > 0) {
4068     FixedArray* prop;
4069     {
4070       AllocationResult allocation = CopyFixedArray(properties);
4071       if (!allocation.To(&prop)) return allocation;
4072     }
4073     JSObject::cast(clone)->set_properties(prop, wb_mode);
4074   }
4075   // Return the new clone.
4076   return clone;
4077 }
4078
4079
4080 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
4081                                     int len) {
4082   // Only works for one byte strings.
4083   DCHECK(vector.length() == len);
4084   MemCopy(chars, vector.start(), len);
4085 }
4086
4087 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
4088                                     int len) {
4089   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
4090   size_t stream_length = vector.length();
4091   while (stream_length != 0) {
4092     size_t consumed = 0;
4093     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
4094     DCHECK(c != unibrow::Utf8::kBadChar);
4095     DCHECK(consumed <= stream_length);
4096     stream_length -= consumed;
4097     stream += consumed;
4098     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4099       len -= 2;
4100       if (len < 0) break;
4101       *chars++ = unibrow::Utf16::LeadSurrogate(c);
4102       *chars++ = unibrow::Utf16::TrailSurrogate(c);
4103     } else {
4104       len -= 1;
4105       if (len < 0) break;
4106       *chars++ = c;
4107     }
4108   }
4109   DCHECK(stream_length == 0);
4110   DCHECK(len == 0);
4111 }
4112
4113
4114 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
4115   DCHECK(s->length() == len);
4116   String::WriteToFlat(s, chars, 0, len);
4117 }
4118
4119
4120 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
4121   DCHECK(s->length() == len);
4122   String::WriteToFlat(s, chars, 0, len);
4123 }
4124
4125
4126 template <bool is_one_byte, typename T>
4127 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
4128                                                       uint32_t hash_field) {
4129   DCHECK(chars >= 0);
4130   // Compute map and object size.
4131   int size;
4132   Map* map;
4133
4134   DCHECK_LE(0, chars);
4135   DCHECK_GE(String::kMaxLength, chars);
4136   if (is_one_byte) {
4137     map = one_byte_internalized_string_map();
4138     size = SeqOneByteString::SizeFor(chars);
4139   } else {
4140     map = internalized_string_map();
4141     size = SeqTwoByteString::SizeFor(chars);
4142   }
4143   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
4144
4145   // Allocate string.
4146   HeapObject* result;
4147   {
4148     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4149     if (!allocation.To(&result)) return allocation;
4150   }
4151
4152   result->set_map_no_write_barrier(map);
4153   // Set length and hash fields of the allocated string.
4154   String* answer = String::cast(result);
4155   answer->set_length(chars);
4156   answer->set_hash_field(hash_field);
4157
4158   DCHECK_EQ(size, answer->Size());
4159
4160   if (is_one_byte) {
4161     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
4162   } else {
4163     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
4164   }
4165   return answer;
4166 }
4167
4168
4169 // Need explicit instantiations.
4170 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
4171                                                                      int,
4172                                                                      uint32_t);
4173 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
4174                                                                       int,
4175                                                                       uint32_t);
4176 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
4177     Vector<const char>, int, uint32_t);
4178
4179
4180 AllocationResult Heap::AllocateRawOneByteString(int length,
4181                                                 PretenureFlag pretenure) {
4182   DCHECK_LE(0, length);
4183   DCHECK_GE(String::kMaxLength, length);
4184   int size = SeqOneByteString::SizeFor(length);
4185   DCHECK(size <= SeqOneByteString::kMaxSize);
4186   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4187
4188   HeapObject* result;
4189   {
4190     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4191     if (!allocation.To(&result)) return allocation;
4192   }
4193
4194   // Partially initialize the object.
4195   result->set_map_no_write_barrier(one_byte_string_map());
4196   String::cast(result)->set_length(length);
4197   String::cast(result)->set_hash_field(String::kEmptyHashField);
4198   DCHECK_EQ(size, HeapObject::cast(result)->Size());
4199
4200   return result;
4201 }
4202
4203
4204 AllocationResult Heap::AllocateRawTwoByteString(int length,
4205                                                 PretenureFlag pretenure) {
4206   DCHECK_LE(0, length);
4207   DCHECK_GE(String::kMaxLength, length);
4208   int size = SeqTwoByteString::SizeFor(length);
4209   DCHECK(size <= SeqTwoByteString::kMaxSize);
4210   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4211
4212   HeapObject* result;
4213   {
4214     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4215     if (!allocation.To(&result)) return allocation;
4216   }
4217
4218   // Partially initialize the object.
4219   result->set_map_no_write_barrier(string_map());
4220   String::cast(result)->set_length(length);
4221   String::cast(result)->set_hash_field(String::kEmptyHashField);
4222   DCHECK_EQ(size, HeapObject::cast(result)->Size());
4223   return result;
4224 }
4225
4226
4227 AllocationResult Heap::AllocateEmptyFixedArray() {
4228   int size = FixedArray::SizeFor(0);
4229   HeapObject* result;
4230   {
4231     AllocationResult allocation =
4232         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4233     if (!allocation.To(&result)) return allocation;
4234   }
4235   // Initialize the object.
4236   result->set_map_no_write_barrier(fixed_array_map());
4237   FixedArray::cast(result)->set_length(0);
4238   return result;
4239 }
4240
4241
4242 AllocationResult Heap::AllocateEmptyExternalArray(
4243     ExternalArrayType array_type) {
4244   return AllocateExternalArray(0, array_type, NULL, TENURED);
4245 }
4246
4247
4248 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
4249   if (!InNewSpace(src)) {
4250     return src;
4251   }
4252
4253   int len = src->length();
4254   HeapObject* obj;
4255   {
4256     AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
4257     if (!allocation.To(&obj)) return allocation;
4258   }
4259   obj->set_map_no_write_barrier(fixed_array_map());
4260   FixedArray* result = FixedArray::cast(obj);
4261   result->set_length(len);
4262
4263   // Copy the content
4264   DisallowHeapAllocation no_gc;
4265   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4266   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4267
4268   // TODO(mvstanton): The map is set twice because of protection against calling
4269   // set() on a COW FixedArray. Issue v8:3221 created to track this, and
4270   // we might then be able to remove this whole method.
4271   HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
4272   return result;
4273 }
4274
4275
4276 AllocationResult Heap::AllocateEmptyFixedTypedArray(
4277     ExternalArrayType array_type) {
4278   return AllocateFixedTypedArray(0, array_type, TENURED);
4279 }
4280
4281
4282 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4283   int len = src->length();
4284   HeapObject* obj;
4285   {
4286     AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
4287     if (!allocation.To(&obj)) return allocation;
4288   }
4289   if (InNewSpace(obj)) {
4290     obj->set_map_no_write_barrier(map);
4291     CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
4292               FixedArray::SizeFor(len) - kPointerSize);
4293     return obj;
4294   }
4295   obj->set_map_no_write_barrier(map);
4296   FixedArray* result = FixedArray::cast(obj);
4297   result->set_length(len);
4298
4299   // Copy the content
4300   DisallowHeapAllocation no_gc;
4301   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4302   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4303   return result;
4304 }
4305
4306
4307 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4308                                                    Map* map) {
4309   int len = src->length();
4310   HeapObject* obj;
4311   {
4312     AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4313     if (!allocation.To(&obj)) return allocation;
4314   }
4315   obj->set_map_no_write_barrier(map);
4316   CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
4317             src->address() + FixedDoubleArray::kLengthOffset,
4318             FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4319   return obj;
4320 }
4321
4322
4323 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4324                                                     Map* map) {
4325   HeapObject* obj;
4326   if (src->is_extended_layout()) {
4327     ConstantPoolArray::NumberOfEntries small(src,
4328                                              ConstantPoolArray::SMALL_SECTION);
4329     ConstantPoolArray::NumberOfEntries extended(
4330         src, ConstantPoolArray::EXTENDED_SECTION);
4331     AllocationResult allocation =
4332         AllocateExtendedConstantPoolArray(small, extended);
4333     if (!allocation.To(&obj)) return allocation;
4334   } else {
4335     ConstantPoolArray::NumberOfEntries small(src,
4336                                              ConstantPoolArray::SMALL_SECTION);
4337     AllocationResult allocation = AllocateConstantPoolArray(small);
4338     if (!allocation.To(&obj)) return allocation;
4339   }
4340   obj->set_map_no_write_barrier(map);
4341   CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
4342             src->address() + ConstantPoolArray::kFirstEntryOffset,
4343             src->size() - ConstantPoolArray::kFirstEntryOffset);
4344   return obj;
4345 }
4346
4347
4348 AllocationResult Heap::AllocateRawFixedArray(int length,
4349                                              PretenureFlag pretenure) {
4350   if (length < 0 || length > FixedArray::kMaxLength) {
4351     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4352   }
4353   int size = FixedArray::SizeFor(length);
4354   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4355
4356   return AllocateRaw(size, space, OLD_POINTER_SPACE);
4357 }
4358
4359
4360 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4361                                                     PretenureFlag pretenure,
4362                                                     Object* filler) {
4363   DCHECK(length >= 0);
4364   DCHECK(empty_fixed_array()->IsFixedArray());
4365   if (length == 0) return empty_fixed_array();
4366
4367   DCHECK(!InNewSpace(filler));
4368   HeapObject* result;
4369   {
4370     AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4371     if (!allocation.To(&result)) return allocation;
4372   }
4373
4374   result->set_map_no_write_barrier(fixed_array_map());
4375   FixedArray* array = FixedArray::cast(result);
4376   array->set_length(length);
4377   MemsetPointer(array->data_start(), filler, length);
4378   return array;
4379 }
4380
4381
4382 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4383   return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4384 }
4385
4386
4387 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4388   if (length == 0) return empty_fixed_array();
4389
4390   HeapObject* obj;
4391   {
4392     AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4393     if (!allocation.To(&obj)) return allocation;
4394   }
4395
4396   obj->set_map_no_write_barrier(fixed_array_map());
4397   FixedArray::cast(obj)->set_length(length);
4398   return obj;
4399 }
4400
4401
4402 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4403     int length, PretenureFlag pretenure) {
4404   if (length == 0) return empty_fixed_array();
4405
4406   HeapObject* elements;
4407   AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4408   if (!allocation.To(&elements)) return allocation;
4409
4410   elements->set_map_no_write_barrier(fixed_double_array_map());
4411   FixedDoubleArray::cast(elements)->set_length(length);
4412   return elements;
4413 }
4414
4415
4416 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4417                                                    PretenureFlag pretenure) {
4418   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4419     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4420   }
4421   int size = FixedDoubleArray::SizeFor(length);
4422 #ifndef V8_HOST_ARCH_64_BIT
4423   size += kPointerSize;
4424 #endif
4425   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4426
4427   HeapObject* object;
4428   {
4429     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4430     if (!allocation.To(&object)) return allocation;
4431   }
4432
4433   return EnsureDoubleAligned(this, object, size);
4434 }
4435
4436
4437 AllocationResult Heap::AllocateConstantPoolArray(
4438     const ConstantPoolArray::NumberOfEntries& small) {
4439   CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4440   int size = ConstantPoolArray::SizeFor(small);
4441 #ifndef V8_HOST_ARCH_64_BIT
4442   size += kPointerSize;
4443 #endif
4444   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4445
4446   HeapObject* object;
4447   {
4448     AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4449     if (!allocation.To(&object)) return allocation;
4450   }
4451   object = EnsureDoubleAligned(this, object, size);
4452   object->set_map_no_write_barrier(constant_pool_array_map());
4453
4454   ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4455   constant_pool->Init(small);
4456   constant_pool->ClearPtrEntries(isolate());
4457   return constant_pool;
4458 }
4459
4460
4461 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4462     const ConstantPoolArray::NumberOfEntries& small,
4463     const ConstantPoolArray::NumberOfEntries& extended) {
4464   CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4465   CHECK(extended.are_in_range(0, kMaxInt));
4466   int size = ConstantPoolArray::SizeForExtended(small, extended);
4467 #ifndef V8_HOST_ARCH_64_BIT
4468   size += kPointerSize;
4469 #endif
4470   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4471
4472   HeapObject* object;
4473   {
4474     AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4475     if (!allocation.To(&object)) return allocation;
4476   }
4477   object = EnsureDoubleAligned(this, object, size);
4478   object->set_map_no_write_barrier(constant_pool_array_map());
4479
4480   ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4481   constant_pool->InitExtended(small, extended);
4482   constant_pool->ClearPtrEntries(isolate());
4483   return constant_pool;
4484 }
4485
4486
4487 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4488   ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4489   int size = ConstantPoolArray::SizeFor(small);
4490   HeapObject* result = NULL;
4491   {
4492     AllocationResult allocation =
4493         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4494     if (!allocation.To(&result)) return allocation;
4495   }
4496   result->set_map_no_write_barrier(constant_pool_array_map());
4497   ConstantPoolArray::cast(result)->Init(small);
4498   return result;
4499 }
4500
4501
4502 AllocationResult Heap::AllocateSymbol() {
4503   // Statically ensure that it is safe to allocate symbols in paged spaces.
4504   STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4505
4506   HeapObject* result = NULL;
4507   AllocationResult allocation =
4508       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4509   if (!allocation.To(&result)) return allocation;
4510
4511   result->set_map_no_write_barrier(symbol_map());
4512
4513   // Generate a random hash value.
4514   int hash;
4515   int attempts = 0;
4516   do {
4517     hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4518     attempts++;
4519   } while (hash == 0 && attempts < 30);
4520   if (hash == 0) hash = 1;  // never return 0
4521
4522   Symbol::cast(result)
4523       ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4524   Symbol::cast(result)->set_name(undefined_value());
4525   Symbol::cast(result)->set_flags(Smi::FromInt(0));
4526
4527   DCHECK(!Symbol::cast(result)->is_private());
4528   return result;
4529 }
4530
4531
4532 AllocationResult Heap::AllocateStruct(InstanceType type) {
4533   Map* map;
4534   switch (type) {
4535 #define MAKE_CASE(NAME, Name, name) \
4536   case NAME##_TYPE:                 \
4537     map = name##_map();             \
4538     break;
4539     STRUCT_LIST(MAKE_CASE)
4540 #undef MAKE_CASE
4541     default:
4542       UNREACHABLE();
4543       return exception();
4544   }
4545   int size = map->instance_size();
4546   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4547   Struct* result;
4548   {
4549     AllocationResult allocation = Allocate(map, space);
4550     if (!allocation.To(&result)) return allocation;
4551   }
4552   result->InitializeBody(size);
4553   return result;
4554 }
4555
4556
4557 bool Heap::IsHeapIterable() {
4558   // TODO(hpayer): This function is not correct. Allocation folding in old
4559   // space breaks the iterability.
4560   return new_space_top_after_last_gc_ == new_space()->top();
4561 }
4562
4563
4564 void Heap::MakeHeapIterable() {
4565   DCHECK(AllowHeapAllocation::IsAllowed());
4566   if (!IsHeapIterable()) {
4567     CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4568   }
4569   if (mark_compact_collector()->sweeping_in_progress()) {
4570     mark_compact_collector()->EnsureSweepingCompleted();
4571   }
4572   DCHECK(IsHeapIterable());
4573 }
4574
4575
4576 void Heap::IdleMarkCompact(const char* message) {
4577   bool uncommit = false;
4578   if (gc_count_at_last_idle_gc_ == gc_count_) {
4579     // No GC since the last full GC, the mutator is probably not active.
4580     isolate_->compilation_cache()->Clear();
4581     uncommit = true;
4582   }
4583   CollectAllGarbage(kReduceMemoryFootprintMask, message);
4584   gc_idle_time_handler_.NotifyIdleMarkCompact();
4585   gc_count_at_last_idle_gc_ = gc_count_;
4586   if (uncommit) {
4587     new_space_.Shrink();
4588     UncommitFromSpace();
4589   }
4590 }
4591
4592
4593 bool Heap::TryFinalizeIdleIncrementalMarking(
4594     double idle_time_in_ms, size_t size_of_objects,
4595     size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
4596   if (FLAG_overapproximate_weak_closure &&
4597       (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
4598        (!incremental_marking()->weak_closure_was_overapproximated() &&
4599         mark_compact_collector_.marking_deque()->IsEmpty() &&
4600         gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure(
4601             static_cast<size_t>(idle_time_in_ms))))) {
4602     OverApproximateWeakClosure(
4603         "Idle notification: overapproximate weak closure");
4604     return true;
4605   } else if (incremental_marking()->IsComplete() ||
4606              (mark_compact_collector_.marking_deque()->IsEmpty() &&
4607               gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
4608                   static_cast<size_t>(idle_time_in_ms), size_of_objects,
4609                   final_incremental_mark_compact_speed_in_bytes_per_ms))) {
4610     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
4611     return true;
4612   }
4613   return false;
4614 }
4615
4616
4617 bool Heap::WorthActivatingIncrementalMarking() {
4618   return incremental_marking()->IsStopped() &&
4619          incremental_marking()->ShouldActivate();
4620 }
4621
4622
4623 static double MonotonicallyIncreasingTimeInMs() {
4624   return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
4625          static_cast<double>(base::Time::kMillisecondsPerSecond);
4626 }
4627
4628
4629 bool Heap::IdleNotification(int idle_time_in_ms) {
4630   return IdleNotification(
4631       V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
4632       (static_cast<double>(idle_time_in_ms) /
4633        static_cast<double>(base::Time::kMillisecondsPerSecond)));
4634 }
4635
4636
4637 bool Heap::IdleNotification(double deadline_in_seconds) {
4638   CHECK(HasBeenSetUp());  // http://crbug.com/425035
4639   double deadline_in_ms =
4640       deadline_in_seconds *
4641       static_cast<double>(base::Time::kMillisecondsPerSecond);
4642   HistogramTimerScope idle_notification_scope(
4643       isolate_->counters()->gc_idle_notification());
4644   double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
4645
4646   GCIdleTimeHandler::HeapState heap_state;
4647   heap_state.contexts_disposed = contexts_disposed_;
4648   heap_state.contexts_disposal_rate =
4649       tracer()->ContextDisposalRateInMilliseconds();
4650   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4651   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4652   // TODO(ulan): Start incremental marking only for large heaps.
4653   intptr_t limit = old_generation_allocation_limit_;
4654   if (static_cast<size_t>(idle_time_in_ms) >
4655       GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
4656     limit = idle_old_generation_allocation_limit_;
4657   }
4658
4659   heap_state.can_start_incremental_marking =
4660       incremental_marking()->WorthActivating() &&
4661       NextGCIsLikelyToBeFull(limit) && FLAG_incremental_marking &&
4662       !mark_compact_collector()->sweeping_in_progress();
4663   heap_state.sweeping_in_progress =
4664       mark_compact_collector()->sweeping_in_progress();
4665   heap_state.mark_compact_speed_in_bytes_per_ms =
4666       static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
4667   heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
4668       tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
4669   heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms =
4670       static_cast<size_t>(
4671           tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
4672   heap_state.scavenge_speed_in_bytes_per_ms =
4673       static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4674   heap_state.used_new_space_size = new_space_.Size();
4675   heap_state.new_space_capacity = new_space_.Capacity();
4676   heap_state.new_space_allocation_throughput_in_bytes_per_ms =
4677       static_cast<size_t>(
4678           tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4679
4680   GCIdleTimeAction action =
4681       gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
4682   isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4683       static_cast<int>(idle_time_in_ms));
4684
4685   bool result = false;
4686   switch (action.type) {
4687     case DONE:
4688       result = true;
4689       break;
4690     case DO_INCREMENTAL_MARKING: {
4691       if (incremental_marking()->IsStopped()) {
4692         incremental_marking()->Start();
4693       }
4694       double remaining_idle_time_in_ms = 0.0;
4695       do {
4696         incremental_marking()->Step(
4697             action.parameter, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
4698             IncrementalMarking::FORCE_MARKING,
4699             IncrementalMarking::DO_NOT_FORCE_COMPLETION);
4700         remaining_idle_time_in_ms =
4701             deadline_in_ms - MonotonicallyIncreasingTimeInMs();
4702       } while (remaining_idle_time_in_ms >=
4703                    2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
4704                !incremental_marking()->IsComplete() &&
4705                !mark_compact_collector_.marking_deque()->IsEmpty());
4706       if (remaining_idle_time_in_ms > 0.0) {
4707         action.additional_work = TryFinalizeIdleIncrementalMarking(
4708             remaining_idle_time_in_ms, heap_state.size_of_objects,
4709             heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
4710       }
4711       break;
4712     }
4713     case DO_FULL_GC: {
4714       if (contexts_disposed_) {
4715         HistogramTimerScope scope(isolate_->counters()->gc_context());
4716         CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
4717         gc_idle_time_handler_.NotifyIdleMarkCompact();
4718         gc_count_at_last_idle_gc_ = gc_count_;
4719       } else {
4720         IdleMarkCompact("idle notification: finalize idle round");
4721       }
4722       break;
4723     }
4724     case DO_SCAVENGE:
4725       CollectGarbage(NEW_SPACE, "idle notification: scavenge");
4726       break;
4727     case DO_FINALIZE_SWEEPING:
4728       mark_compact_collector()->EnsureSweepingCompleted();
4729       break;
4730     case DO_NOTHING:
4731       break;
4732   }
4733
4734   double current_time = MonotonicallyIncreasingTimeInMs();
4735   last_idle_notification_time_ = current_time;
4736   double deadline_difference = deadline_in_ms - current_time;
4737
4738   if (deadline_difference >= 0) {
4739     if (action.type != DONE && action.type != DO_NOTHING) {
4740       isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4741           static_cast<int>(deadline_difference));
4742     }
4743   } else {
4744     isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4745         static_cast<int>(-deadline_difference));
4746   }
4747
4748   if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
4749       FLAG_trace_idle_notification_verbose) {
4750     PrintPID("%8.0f ms: ", isolate()->time_millis_since_init());
4751     PrintF(
4752         "Idle notification: requested idle time %.2f ms, used idle time %.2f "
4753         "ms, deadline usage %.2f ms [",
4754         idle_time_in_ms, idle_time_in_ms - deadline_difference,
4755         deadline_difference);
4756     action.Print();
4757     PrintF("]");
4758     if (FLAG_trace_idle_notification_verbose) {
4759       PrintF("[");
4760       heap_state.Print();
4761       PrintF("]");
4762     }
4763     PrintF("\n");
4764   }
4765
4766   contexts_disposed_ = 0;
4767   return result;
4768 }
4769
4770
4771 bool Heap::RecentIdleNotificationHappened() {
4772   return (last_idle_notification_time_ +
4773           GCIdleTimeHandler::kMaxScheduledIdleTime) >
4774          MonotonicallyIncreasingTimeInMs();
4775 }
4776
4777
4778 #ifdef DEBUG
4779
4780 void Heap::Print() {
4781   if (!HasBeenSetUp()) return;
4782   isolate()->PrintStack(stdout);
4783   AllSpaces spaces(this);
4784   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4785     space->Print();
4786   }
4787 }
4788
4789
4790 void Heap::ReportCodeStatistics(const char* title) {
4791   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4792   PagedSpace::ResetCodeStatistics(isolate());
4793   // We do not look for code in new space, map space, or old space.  If code
4794   // somehow ends up in those spaces, we would miss it here.
4795   code_space_->CollectCodeStatistics();
4796   lo_space_->CollectCodeStatistics();
4797   PagedSpace::ReportCodeStatistics(isolate());
4798 }
4799
4800
4801 // This function expects that NewSpace's allocated objects histogram is
4802 // populated (via a call to CollectStatistics or else as a side effect of a
4803 // just-completed scavenge collection).
4804 void Heap::ReportHeapStatistics(const char* title) {
4805   USE(title);
4806   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4807          gc_count_);
4808   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4809          old_generation_allocation_limit_);
4810
4811   PrintF("\n");
4812   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4813   isolate_->global_handles()->PrintStats();
4814   PrintF("\n");
4815
4816   PrintF("Heap statistics : ");
4817   isolate_->memory_allocator()->ReportStatistics();
4818   PrintF("To space : ");
4819   new_space_.ReportStatistics();
4820   PrintF("Old pointer space : ");
4821   old_pointer_space_->ReportStatistics();
4822   PrintF("Old data space : ");
4823   old_data_space_->ReportStatistics();
4824   PrintF("Code space : ");
4825   code_space_->ReportStatistics();
4826   PrintF("Map space : ");
4827   map_space_->ReportStatistics();
4828   PrintF("Cell space : ");
4829   cell_space_->ReportStatistics();
4830   PrintF("Large object space : ");
4831   lo_space_->ReportStatistics();
4832   PrintF(">>>>>> ========================================= >>>>>>\n");
4833 }
4834
4835 #endif  // DEBUG
4836
4837 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4838
4839
4840 bool Heap::Contains(Address addr) {
4841   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4842   return HasBeenSetUp() &&
4843          (new_space_.ToSpaceContains(addr) ||
4844           old_pointer_space_->Contains(addr) ||
4845           old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
4846           map_space_->Contains(addr) || cell_space_->Contains(addr) ||
4847           lo_space_->SlowContains(addr));
4848 }
4849
4850
4851 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4852   return InSpace(value->address(), space);
4853 }
4854
4855
4856 bool Heap::InSpace(Address addr, AllocationSpace space) {
4857   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4858   if (!HasBeenSetUp()) return false;
4859
4860   switch (space) {
4861     case NEW_SPACE:
4862       return new_space_.ToSpaceContains(addr);
4863     case OLD_POINTER_SPACE:
4864       return old_pointer_space_->Contains(addr);
4865     case OLD_DATA_SPACE:
4866       return old_data_space_->Contains(addr);
4867     case CODE_SPACE:
4868       return code_space_->Contains(addr);
4869     case MAP_SPACE:
4870       return map_space_->Contains(addr);
4871     case CELL_SPACE:
4872       return cell_space_->Contains(addr);
4873     case LO_SPACE:
4874       return lo_space_->SlowContains(addr);
4875   }
4876   UNREACHABLE();
4877   return false;
4878 }
4879
4880
4881 bool Heap::RootIsImmortalImmovable(int root_index) {
4882   switch (root_index) {
4883 #define CASE(name)               \
4884   case Heap::k##name##RootIndex: \
4885     return true;
4886     IMMORTAL_IMMOVABLE_ROOT_LIST(CASE);
4887 #undef CASE
4888     default:
4889       return false;
4890   }
4891 }
4892
4893
4894 #ifdef VERIFY_HEAP
4895 void Heap::Verify() {
4896   CHECK(HasBeenSetUp());
4897   HandleScope scope(isolate());
4898
4899   store_buffer()->Verify();
4900
4901   if (mark_compact_collector()->sweeping_in_progress()) {
4902     // We have to wait here for the sweeper threads to have an iterable heap.
4903     mark_compact_collector()->EnsureSweepingCompleted();
4904   }
4905
4906   VerifyPointersVisitor visitor;
4907   IterateRoots(&visitor, VISIT_ONLY_STRONG);
4908
4909   VerifySmisVisitor smis_visitor;
4910   IterateSmiRoots(&smis_visitor);
4911
4912   new_space_.Verify();
4913
4914   old_pointer_space_->Verify(&visitor);
4915   map_space_->Verify(&visitor);
4916
4917   VerifyPointersVisitor no_dirty_regions_visitor;
4918   old_data_space_->Verify(&no_dirty_regions_visitor);
4919   code_space_->Verify(&no_dirty_regions_visitor);
4920   cell_space_->Verify(&no_dirty_regions_visitor);
4921
4922   lo_space_->Verify();
4923 }
4924 #endif
4925
4926
4927 void Heap::ZapFromSpace() {
4928   NewSpacePageIterator it(new_space_.FromSpaceStart(),
4929                           new_space_.FromSpaceEnd());
4930   while (it.has_next()) {
4931     NewSpacePage* page = it.next();
4932     for (Address cursor = page->area_start(), limit = page->area_end();
4933          cursor < limit; cursor += kPointerSize) {
4934       Memory::Address_at(cursor) = kFromSpaceZapValue;
4935     }
4936   }
4937 }
4938
4939
4940 void Heap::IterateAndMarkPointersToFromSpace(bool record_slots, Address start,
4941                                              Address end,
4942                                              ObjectSlotCallback callback) {
4943   Address slot_address = start;
4944
4945   while (slot_address < end) {
4946     Object** slot = reinterpret_cast<Object**>(slot_address);
4947     Object* object = *slot;
4948     // If the store buffer becomes overfull we mark pages as being exempt from
4949     // the store buffer.  These pages are scanned to find pointers that point
4950     // to the new space.  In that case we may hit newly promoted objects and
4951     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
4952     if (object->IsHeapObject()) {
4953       if (Heap::InFromSpace(object)) {
4954         callback(reinterpret_cast<HeapObject**>(slot),
4955                  HeapObject::cast(object));
4956         Object* new_object = *slot;
4957         if (InNewSpace(new_object)) {
4958           SLOW_DCHECK(Heap::InToSpace(new_object));
4959           SLOW_DCHECK(new_object->IsHeapObject());
4960           store_buffer_.EnterDirectlyIntoStoreBuffer(
4961               reinterpret_cast<Address>(slot));
4962         }
4963         SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4964       } else if (record_slots &&
4965                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4966         mark_compact_collector()->RecordSlot(slot, slot, object);
4967       }
4968     }
4969     slot_address += kPointerSize;
4970   }
4971 }
4972
4973
4974 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4975   IterateStrongRoots(v, mode);
4976   IterateWeakRoots(v, mode);
4977 }
4978
4979
4980 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4981   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4982   v->Synchronize(VisitorSynchronization::kStringTable);
4983   if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4984     // Scavenge collections have special processing for this.
4985     external_string_table_.Iterate(v);
4986   }
4987   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4988 }
4989
4990
4991 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4992   // Acquire execution access since we are going to read stack limit values.
4993   ExecutionAccess access(isolate());
4994   v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4995   v->Synchronize(VisitorSynchronization::kSmiRootList);
4996 }
4997
4998
4999 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
5000   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
5001   v->Synchronize(VisitorSynchronization::kStrongRootList);
5002
5003   v->VisitPointer(bit_cast<Object**>(&hidden_string_));
5004   v->Synchronize(VisitorSynchronization::kInternalizedString);
5005
5006   isolate_->bootstrapper()->Iterate(v);
5007   v->Synchronize(VisitorSynchronization::kBootstrapper);
5008   isolate_->Iterate(v);
5009   v->Synchronize(VisitorSynchronization::kTop);
5010   Relocatable::Iterate(isolate_, v);
5011   v->Synchronize(VisitorSynchronization::kRelocatable);
5012
5013   if (isolate_->deoptimizer_data() != NULL) {
5014     isolate_->deoptimizer_data()->Iterate(v);
5015   }
5016   v->Synchronize(VisitorSynchronization::kDebug);
5017   isolate_->compilation_cache()->Iterate(v);
5018   v->Synchronize(VisitorSynchronization::kCompilationCache);
5019
5020   // Iterate over local handles in handle scopes.
5021   isolate_->handle_scope_implementer()->Iterate(v);
5022   isolate_->IterateDeferredHandles(v);
5023   v->Synchronize(VisitorSynchronization::kHandleScope);
5024
5025   // Iterate over the builtin code objects and code stubs in the
5026   // heap. Note that it is not necessary to iterate over code objects
5027   // on scavenge collections.
5028   if (mode != VISIT_ALL_IN_SCAVENGE) {
5029     isolate_->builtins()->IterateBuiltins(v);
5030   }
5031   v->Synchronize(VisitorSynchronization::kBuiltins);
5032
5033   // Iterate over global handles.
5034   switch (mode) {
5035     case VISIT_ONLY_STRONG:
5036       isolate_->global_handles()->IterateStrongRoots(v);
5037       break;
5038     case VISIT_ALL_IN_SCAVENGE:
5039       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
5040       break;
5041     case VISIT_ALL_IN_SWEEP_NEWSPACE:
5042     case VISIT_ALL:
5043       isolate_->global_handles()->IterateAllRoots(v);
5044       break;
5045   }
5046   v->Synchronize(VisitorSynchronization::kGlobalHandles);
5047
5048   // Iterate over eternal handles.
5049   if (mode == VISIT_ALL_IN_SCAVENGE) {
5050     isolate_->eternal_handles()->IterateNewSpaceRoots(v);
5051   } else {
5052     isolate_->eternal_handles()->IterateAllRoots(v);
5053   }
5054   v->Synchronize(VisitorSynchronization::kEternalHandles);
5055
5056   // Iterate over pointers being held by inactive threads.
5057   isolate_->thread_manager()->Iterate(v);
5058   v->Synchronize(VisitorSynchronization::kThreadManager);
5059
5060   // Iterate over the pointers the Serialization/Deserialization code is
5061   // holding.
5062   // During garbage collection this keeps the partial snapshot cache alive.
5063   // During deserialization of the startup snapshot this creates the partial
5064   // snapshot cache and deserializes the objects it refers to.  During
5065   // serialization this does nothing, since the partial snapshot cache is
5066   // empty.  However the next thing we do is create the partial snapshot,
5067   // filling up the partial snapshot cache with objects it needs as we go.
5068   SerializerDeserializer::Iterate(isolate_, v);
5069   // We don't do a v->Synchronize call here, because in debug mode that will
5070   // output a flag to the snapshot.  However at this point the serializer and
5071   // deserializer are deliberately a little unsynchronized (see above) so the
5072   // checking of the sync flag in the snapshot would fail.
5073 }
5074
5075
5076 // TODO(1236194): Since the heap size is configurable on the command line
5077 // and through the API, we should gracefully handle the case that the heap
5078 // size is not big enough to fit all the initial objects.
5079 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
5080                          int max_executable_size, size_t code_range_size) {
5081   if (HasBeenSetUp()) return false;
5082
5083   // Overwrite default configuration.
5084   if (max_semi_space_size > 0) {
5085     max_semi_space_size_ = max_semi_space_size * MB;
5086   }
5087   if (max_old_space_size > 0) {
5088     max_old_generation_size_ = static_cast<intptr_t>(max_old_space_size) * MB;
5089   }
5090   if (max_executable_size > 0) {
5091     max_executable_size_ = static_cast<intptr_t>(max_executable_size) * MB;
5092   }
5093
5094   // If max space size flags are specified overwrite the configuration.
5095   if (FLAG_max_semi_space_size > 0) {
5096     max_semi_space_size_ = FLAG_max_semi_space_size * MB;
5097   }
5098   if (FLAG_max_old_space_size > 0) {
5099     max_old_generation_size_ =
5100         static_cast<intptr_t>(FLAG_max_old_space_size) * MB;
5101   }
5102   if (FLAG_max_executable_size > 0) {
5103     max_executable_size_ = static_cast<intptr_t>(FLAG_max_executable_size) * MB;
5104   }
5105
5106   if (FLAG_stress_compaction) {
5107     // This will cause more frequent GCs when stressing.
5108     max_semi_space_size_ = Page::kPageSize;
5109   }
5110
5111   if (isolate()->snapshot_available()) {
5112     // If we are using a snapshot we always reserve the default amount
5113     // of memory for each semispace because code in the snapshot has
5114     // write-barrier code that relies on the size and alignment of new
5115     // space.  We therefore cannot use a larger max semispace size
5116     // than the default reserved semispace size.
5117     if (max_semi_space_size_ > reserved_semispace_size_) {
5118       max_semi_space_size_ = reserved_semispace_size_;
5119       if (FLAG_trace_gc) {
5120         PrintPID("Max semi-space size cannot be more than %d kbytes\n",
5121                  reserved_semispace_size_ >> 10);
5122       }
5123     }
5124   } else {
5125     // If we are not using snapshots we reserve space for the actual
5126     // max semispace size.
5127     reserved_semispace_size_ = max_semi_space_size_;
5128   }
5129
5130   // The max executable size must be less than or equal to the max old
5131   // generation size.
5132   if (max_executable_size_ > max_old_generation_size_) {
5133     max_executable_size_ = max_old_generation_size_;
5134   }
5135
5136   // The new space size must be a power of two to support single-bit testing
5137   // for containment.
5138   max_semi_space_size_ =
5139       base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
5140   reserved_semispace_size_ =
5141       base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
5142
5143   if (FLAG_min_semi_space_size > 0) {
5144     int initial_semispace_size = FLAG_min_semi_space_size * MB;
5145     if (initial_semispace_size > max_semi_space_size_) {
5146       initial_semispace_size_ = max_semi_space_size_;
5147       if (FLAG_trace_gc) {
5148         PrintPID(
5149             "Min semi-space size cannot be more than the maximum "
5150             "semi-space size of %d MB\n",
5151             max_semi_space_size_ / MB);
5152       }
5153     } else {
5154       initial_semispace_size_ = initial_semispace_size;
5155     }
5156   }
5157
5158   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
5159
5160   if (FLAG_target_semi_space_size > 0) {
5161     int target_semispace_size = FLAG_target_semi_space_size * MB;
5162     if (target_semispace_size < initial_semispace_size_) {
5163       target_semispace_size_ = initial_semispace_size_;
5164       if (FLAG_trace_gc) {
5165         PrintPID(
5166             "Target semi-space size cannot be less than the minimum "
5167             "semi-space size of %d MB\n",
5168             initial_semispace_size_ / MB);
5169       }
5170     } else if (target_semispace_size > max_semi_space_size_) {
5171       target_semispace_size_ = max_semi_space_size_;
5172       if (FLAG_trace_gc) {
5173         PrintPID(
5174             "Target semi-space size cannot be less than the maximum "
5175             "semi-space size of %d MB\n",
5176             max_semi_space_size_ / MB);
5177       }
5178     } else {
5179       target_semispace_size_ = target_semispace_size;
5180     }
5181   }
5182
5183   target_semispace_size_ = Max(initial_semispace_size_, target_semispace_size_);
5184
5185   if (FLAG_semi_space_growth_factor < 2) {
5186     FLAG_semi_space_growth_factor = 2;
5187   }
5188
5189   // The old generation is paged and needs at least one page for each space.
5190   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5191   max_old_generation_size_ =
5192       Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
5193           max_old_generation_size_);
5194
5195   if (FLAG_initial_old_space_size > 0) {
5196     initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
5197   } else {
5198     initial_old_generation_size_ =
5199         max_old_generation_size_ / kInitalOldGenerationLimitFactor;
5200   }
5201   old_generation_allocation_limit_ = initial_old_generation_size_;
5202
5203   // We rely on being able to allocate new arrays in paged spaces.
5204   DCHECK(Page::kMaxRegularHeapObjectSize >=
5205          (JSArray::kSize +
5206           FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
5207           AllocationMemento::kSize));
5208
5209   code_range_size_ = code_range_size * MB;
5210
5211   configured_ = true;
5212   return true;
5213 }
5214
5215
5216 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
5217
5218
5219 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5220   *stats->start_marker = HeapStats::kStartMarker;
5221   *stats->end_marker = HeapStats::kEndMarker;
5222   *stats->new_space_size = new_space_.SizeAsInt();
5223   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5224   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
5225   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5226   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
5227   *stats->old_data_space_capacity = old_data_space_->Capacity();
5228   *stats->code_space_size = code_space_->SizeOfObjects();
5229   *stats->code_space_capacity = code_space_->Capacity();
5230   *stats->map_space_size = map_space_->SizeOfObjects();
5231   *stats->map_space_capacity = map_space_->Capacity();
5232   *stats->cell_space_size = cell_space_->SizeOfObjects();
5233   *stats->cell_space_capacity = cell_space_->Capacity();
5234   *stats->lo_space_size = lo_space_->Size();
5235   isolate_->global_handles()->RecordStats(stats);
5236   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5237   *stats->memory_allocator_capacity =
5238       isolate()->memory_allocator()->Size() +
5239       isolate()->memory_allocator()->Available();
5240   *stats->os_error = base::OS::GetLastError();
5241   isolate()->memory_allocator()->Available();
5242   if (take_snapshot) {
5243     HeapIterator iterator(this);
5244     for (HeapObject* obj = iterator.next(); obj != NULL;
5245          obj = iterator.next()) {
5246       InstanceType type = obj->map()->instance_type();
5247       DCHECK(0 <= type && type <= LAST_TYPE);
5248       stats->objects_per_type[type]++;
5249       stats->size_per_type[type] += obj->Size();
5250     }
5251   }
5252 }
5253
5254
5255 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5256   return old_pointer_space_->SizeOfObjects() +
5257          old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5258          map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
5259          lo_space_->SizeOfObjects();
5260 }
5261
5262
5263 int64_t Heap::PromotedExternalMemorySize() {
5264   if (amount_of_external_allocated_memory_ <=
5265       amount_of_external_allocated_memory_at_last_global_gc_)
5266     return 0;
5267   return amount_of_external_allocated_memory_ -
5268          amount_of_external_allocated_memory_at_last_global_gc_;
5269 }
5270
5271
5272 intptr_t Heap::CalculateOldGenerationAllocationLimit(double factor,
5273                                                      intptr_t old_gen_size) {
5274   CHECK(factor > 1.0);
5275   CHECK(old_gen_size > 0);
5276   intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5277   limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5278   limit += new_space_.Capacity();
5279   intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5280   return Min(limit, halfway_to_the_max);
5281 }
5282
5283
5284 void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
5285                                            int freed_global_handles) {
5286   const int kMaxHandles = 1000;
5287   const int kMinHandles = 100;
5288   const double min_factor = 1.1;
5289   double max_factor = 4;
5290   const double idle_max_factor = 1.5;
5291   // We set the old generation growing factor to 2 to grow the heap slower on
5292   // memory-constrained devices.
5293   if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5294     max_factor = 2;
5295   }
5296
5297   // If there are many freed global handles, then the next full GC will
5298   // likely collect a lot of garbage. Choose the heap growing factor
5299   // depending on freed global handles.
5300   // TODO(ulan, hpayer): Take into account mutator utilization.
5301   // TODO(hpayer): The idle factor could make the handles heuristic obsolete.
5302   // Look into that.
5303   double factor;
5304   if (freed_global_handles <= kMinHandles) {
5305     factor = max_factor;
5306   } else if (freed_global_handles >= kMaxHandles) {
5307     factor = min_factor;
5308   } else {
5309     // Compute factor using linear interpolation between points
5310     // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5311     factor = max_factor -
5312              (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5313                  (kMaxHandles - kMinHandles);
5314   }
5315
5316   if (FLAG_stress_compaction ||
5317       mark_compact_collector()->reduce_memory_footprint_) {
5318     factor = min_factor;
5319   }
5320
5321   old_generation_allocation_limit_ =
5322       CalculateOldGenerationAllocationLimit(factor, old_gen_size);
5323   idle_old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
5324       Min(factor, idle_max_factor), old_gen_size);
5325 }
5326
5327
5328 void Heap::EnableInlineAllocation() {
5329   if (!inline_allocation_disabled_) return;
5330   inline_allocation_disabled_ = false;
5331
5332   // Update inline allocation limit for new space.
5333   new_space()->UpdateInlineAllocationLimit(0);
5334 }
5335
5336
5337 void Heap::DisableInlineAllocation() {
5338   if (inline_allocation_disabled_) return;
5339   inline_allocation_disabled_ = true;
5340
5341   // Update inline allocation limit for new space.
5342   new_space()->UpdateInlineAllocationLimit(0);
5343
5344   // Update inline allocation limit for old spaces.
5345   PagedSpaces spaces(this);
5346   for (PagedSpace* space = spaces.next(); space != NULL;
5347        space = spaces.next()) {
5348     space->EmptyAllocationInfo();
5349   }
5350 }
5351
5352
5353 V8_DECLARE_ONCE(initialize_gc_once);
5354
5355 static void InitializeGCOnce() {
5356   InitializeScavengingVisitorsTables();
5357   NewSpaceScavenger::Initialize();
5358   MarkCompactCollector::Initialize();
5359 }
5360
5361
5362 bool Heap::SetUp() {
5363 #ifdef DEBUG
5364   allocation_timeout_ = FLAG_gc_interval;
5365 #endif
5366
5367   // Initialize heap spaces and initial maps and objects. Whenever something
5368   // goes wrong, just return false. The caller should check the results and
5369   // call Heap::TearDown() to release allocated memory.
5370   //
5371   // If the heap is not yet configured (e.g. through the API), configure it.
5372   // Configuration is based on the flags new-space-size (really the semispace
5373   // size) and old-space-size if set or the initial values of semispace_size_
5374   // and old_generation_size_ otherwise.
5375   if (!configured_) {
5376     if (!ConfigureHeapDefault()) return false;
5377   }
5378
5379   concurrent_sweeping_enabled_ =
5380       FLAG_concurrent_sweeping && isolate_->max_available_threads() > 1;
5381
5382   base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5383
5384   MarkMapPointersAsEncoded(false);
5385
5386   // Set up memory allocator.
5387   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5388     return false;
5389
5390   // Set up new space.
5391   if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5392     return false;
5393   }
5394   new_space_top_after_last_gc_ = new_space()->top();
5395
5396   // Initialize old pointer space.
5397   old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
5398                                     OLD_POINTER_SPACE, NOT_EXECUTABLE);
5399   if (old_pointer_space_ == NULL) return false;
5400   if (!old_pointer_space_->SetUp()) return false;
5401
5402   // Initialize old data space.
5403   old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
5404                                  NOT_EXECUTABLE);
5405   if (old_data_space_ == NULL) return false;
5406   if (!old_data_space_->SetUp()) return false;
5407
5408   if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5409
5410   // Initialize the code space, set its maximum capacity to the old
5411   // generation size. It needs executable memory.
5412   code_space_ =
5413       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5414   if (code_space_ == NULL) return false;
5415   if (!code_space_->SetUp()) return false;
5416
5417   // Initialize map space.
5418   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5419   if (map_space_ == NULL) return false;
5420   if (!map_space_->SetUp()) return false;
5421
5422   // Initialize simple cell space.
5423   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5424   if (cell_space_ == NULL) return false;
5425   if (!cell_space_->SetUp()) return false;
5426
5427   // The large object code space may contain code or data.  We set the memory
5428   // to be non-executable here for safety, but this means we need to enable it
5429   // explicitly when allocating large code objects.
5430   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5431   if (lo_space_ == NULL) return false;
5432   if (!lo_space_->SetUp()) return false;
5433
5434   // Set up the seed that is used to randomize the string hash function.
5435   DCHECK(hash_seed() == 0);
5436   if (FLAG_randomize_hashes) {
5437     if (FLAG_hash_seed == 0) {
5438       int rnd = isolate()->random_number_generator()->NextInt();
5439       set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5440     } else {
5441       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5442     }
5443   }
5444
5445   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5446   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5447
5448   store_buffer()->SetUp();
5449
5450   mark_compact_collector()->SetUp();
5451
5452   return true;
5453 }
5454
5455
5456 bool Heap::CreateHeapObjects() {
5457   // Create initial maps.
5458   if (!CreateInitialMaps()) return false;
5459   CreateApiObjects();
5460
5461   // Create initial objects
5462   CreateInitialObjects();
5463   CHECK_EQ(0u, gc_count_);
5464
5465   set_native_contexts_list(undefined_value());
5466   set_array_buffers_list(undefined_value());
5467   set_new_array_buffer_views_list(undefined_value());
5468   set_allocation_sites_list(undefined_value());
5469   return true;
5470 }
5471
5472
5473 void Heap::SetStackLimits() {
5474   DCHECK(isolate_ != NULL);
5475   DCHECK(isolate_ == isolate());
5476   // On 64 bit machines, pointers are generally out of range of Smis.  We write
5477   // something that looks like an out of range Smi to the GC.
5478
5479   // Set up the special root array entries containing the stack limits.
5480   // These are actually addresses, but the tag makes the GC ignore it.
5481   roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5482       (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5483   roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5484       (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5485 }
5486
5487
5488 void Heap::NotifyDeserializationComplete() {
5489   deserialization_complete_ = true;
5490 #ifdef DEBUG
5491   // All pages right after bootstrapping must be marked as never-evacuate.
5492   PagedSpaces spaces(this);
5493   for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
5494     PageIterator it(s);
5495     while (it.has_next()) CHECK(it.next()->NeverEvacuate());
5496   }
5497 #endif  // DEBUG
5498 }
5499
5500
5501 void Heap::TearDown() {
5502 #ifdef VERIFY_HEAP
5503   if (FLAG_verify_heap) {
5504     Verify();
5505   }
5506 #endif
5507
5508   UpdateMaximumCommitted();
5509
5510   if (FLAG_print_cumulative_gc_stat) {
5511     PrintF("\n");
5512     PrintF("gc_count=%d ", gc_count_);
5513     PrintF("mark_sweep_count=%d ", ms_count_);
5514     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5515     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5516     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5517     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5518     PrintF("total_marking_time=%.1f ", tracer_.cumulative_marking_duration());
5519     PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5520     PrintF("\n\n");
5521   }
5522
5523   if (FLAG_print_max_heap_committed) {
5524     PrintF("\n");
5525     PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5526            MaximumCommittedMemory());
5527     PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5528            new_space_.MaximumCommittedMemory());
5529     PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5530            old_data_space_->MaximumCommittedMemory());
5531     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5532            old_pointer_space_->MaximumCommittedMemory());
5533     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5534            old_pointer_space_->MaximumCommittedMemory());
5535     PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5536            code_space_->MaximumCommittedMemory());
5537     PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5538            map_space_->MaximumCommittedMemory());
5539     PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5540            cell_space_->MaximumCommittedMemory());
5541     PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5542            lo_space_->MaximumCommittedMemory());
5543     PrintF("\n\n");
5544   }
5545
5546   if (FLAG_verify_predictable) {
5547     PrintAlloctionsHash();
5548   }
5549
5550   TearDownArrayBuffers();
5551
5552   isolate_->global_handles()->TearDown();
5553
5554   external_string_table_.TearDown();
5555
5556   mark_compact_collector()->TearDown();
5557
5558   new_space_.TearDown();
5559
5560   if (old_pointer_space_ != NULL) {
5561     old_pointer_space_->TearDown();
5562     delete old_pointer_space_;
5563     old_pointer_space_ = NULL;
5564   }
5565
5566   if (old_data_space_ != NULL) {
5567     old_data_space_->TearDown();
5568     delete old_data_space_;
5569     old_data_space_ = NULL;
5570   }
5571
5572   if (code_space_ != NULL) {
5573     code_space_->TearDown();
5574     delete code_space_;
5575     code_space_ = NULL;
5576   }
5577
5578   if (map_space_ != NULL) {
5579     map_space_->TearDown();
5580     delete map_space_;
5581     map_space_ = NULL;
5582   }
5583
5584   if (cell_space_ != NULL) {
5585     cell_space_->TearDown();
5586     delete cell_space_;
5587     cell_space_ = NULL;
5588   }
5589
5590   if (lo_space_ != NULL) {
5591     lo_space_->TearDown();
5592     delete lo_space_;
5593     lo_space_ = NULL;
5594   }
5595
5596   store_buffer()->TearDown();
5597
5598   isolate_->memory_allocator()->TearDown();
5599 }
5600
5601
5602 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5603                                  GCType gc_type, bool pass_isolate) {
5604   DCHECK(callback != NULL);
5605   GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5606   DCHECK(!gc_prologue_callbacks_.Contains(pair));
5607   return gc_prologue_callbacks_.Add(pair);
5608 }
5609
5610
5611 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5612   DCHECK(callback != NULL);
5613   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5614     if (gc_prologue_callbacks_[i].callback == callback) {
5615       gc_prologue_callbacks_.Remove(i);
5616       return;
5617     }
5618   }
5619   UNREACHABLE();
5620 }
5621
5622
5623 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5624                                  GCType gc_type, bool pass_isolate) {
5625   DCHECK(callback != NULL);
5626   GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5627   DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5628   return gc_epilogue_callbacks_.Add(pair);
5629 }
5630
5631
5632 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5633   DCHECK(callback != NULL);
5634   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5635     if (gc_epilogue_callbacks_[i].callback == callback) {
5636       gc_epilogue_callbacks_.Remove(i);
5637       return;
5638     }
5639   }
5640   UNREACHABLE();
5641 }
5642
5643
5644 // TODO(ishell): Find a better place for this.
5645 void Heap::AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
5646                                          Handle<DependentCode> dep) {
5647   DCHECK(!InNewSpace(*obj));
5648   DCHECK(!InNewSpace(*dep));
5649   Handle<WeakHashTable> table(weak_object_to_code_table(), isolate());
5650   table = WeakHashTable::Put(table, obj, dep);
5651   if (*table != weak_object_to_code_table())
5652     set_weak_object_to_code_table(*table);
5653   DCHECK_EQ(*dep, LookupWeakObjectToCodeDependency(obj));
5654 }
5655
5656
5657 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
5658   Object* dep = weak_object_to_code_table()->Lookup(obj);
5659   if (dep->IsDependentCode()) return DependentCode::cast(dep);
5660   return DependentCode::cast(empty_fixed_array());
5661 }
5662
5663
5664 void Heap::AddRetainedMap(Handle<Map> map) {
5665   if (FLAG_retain_maps_for_n_gc == 0) return;
5666   Handle<WeakCell> cell = Map::WeakCellForMap(map);
5667   Handle<ArrayList> array(retained_maps(), isolate());
5668   array = ArrayList::Add(
5669       array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
5670       ArrayList::kReloadLengthAfterAllocation);
5671   if (*array != retained_maps()) {
5672     set_retained_maps(*array);
5673   }
5674 }
5675
5676
5677 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5678   v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5679 }
5680
5681 #ifdef DEBUG
5682
5683 class PrintHandleVisitor : public ObjectVisitor {
5684  public:
5685   void VisitPointers(Object** start, Object** end) {
5686     for (Object** p = start; p < end; p++)
5687       PrintF("  handle %p to %p\n", reinterpret_cast<void*>(p),
5688              reinterpret_cast<void*>(*p));
5689   }
5690 };
5691
5692
5693 void Heap::PrintHandles() {
5694   PrintF("Handles:\n");
5695   PrintHandleVisitor v;
5696   isolate_->handle_scope_implementer()->Iterate(&v);
5697 }
5698
5699 #endif
5700
5701
5702 Space* AllSpaces::next() {
5703   switch (counter_++) {
5704     case NEW_SPACE:
5705       return heap_->new_space();
5706     case OLD_POINTER_SPACE:
5707       return heap_->old_pointer_space();
5708     case OLD_DATA_SPACE:
5709       return heap_->old_data_space();
5710     case CODE_SPACE:
5711       return heap_->code_space();
5712     case MAP_SPACE:
5713       return heap_->map_space();
5714     case CELL_SPACE:
5715       return heap_->cell_space();
5716     case LO_SPACE:
5717       return heap_->lo_space();
5718     default:
5719       return NULL;
5720   }
5721 }
5722
5723
5724 PagedSpace* PagedSpaces::next() {
5725   switch (counter_++) {
5726     case OLD_POINTER_SPACE:
5727       return heap_->old_pointer_space();
5728     case OLD_DATA_SPACE:
5729       return heap_->old_data_space();
5730     case CODE_SPACE:
5731       return heap_->code_space();
5732     case MAP_SPACE:
5733       return heap_->map_space();
5734     case CELL_SPACE:
5735       return heap_->cell_space();
5736     default:
5737       return NULL;
5738   }
5739 }
5740
5741
5742 OldSpace* OldSpaces::next() {
5743   switch (counter_++) {
5744     case OLD_POINTER_SPACE:
5745       return heap_->old_pointer_space();
5746     case OLD_DATA_SPACE:
5747       return heap_->old_data_space();
5748     case CODE_SPACE:
5749       return heap_->code_space();
5750     default:
5751       return NULL;
5752   }
5753 }
5754
5755
5756 SpaceIterator::SpaceIterator(Heap* heap)
5757     : heap_(heap),
5758       current_space_(FIRST_SPACE),
5759       iterator_(NULL),
5760       size_func_(NULL) {}
5761
5762
5763 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5764     : heap_(heap),
5765       current_space_(FIRST_SPACE),
5766       iterator_(NULL),
5767       size_func_(size_func) {}
5768
5769
5770 SpaceIterator::~SpaceIterator() {
5771   // Delete active iterator if any.
5772   delete iterator_;
5773 }
5774
5775
5776 bool SpaceIterator::has_next() {
5777   // Iterate until no more spaces.
5778   return current_space_ != LAST_SPACE;
5779 }
5780
5781
5782 ObjectIterator* SpaceIterator::next() {
5783   if (iterator_ != NULL) {
5784     delete iterator_;
5785     iterator_ = NULL;
5786     // Move to the next space
5787     current_space_++;
5788     if (current_space_ > LAST_SPACE) {
5789       return NULL;
5790     }
5791   }
5792
5793   // Return iterator for the new current space.
5794   return CreateIterator();
5795 }
5796
5797
5798 // Create an iterator for the space to iterate.
5799 ObjectIterator* SpaceIterator::CreateIterator() {
5800   DCHECK(iterator_ == NULL);
5801
5802   switch (current_space_) {
5803     case NEW_SPACE:
5804       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5805       break;
5806     case OLD_POINTER_SPACE:
5807       iterator_ =
5808           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5809       break;
5810     case OLD_DATA_SPACE:
5811       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5812       break;
5813     case CODE_SPACE:
5814       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5815       break;
5816     case MAP_SPACE:
5817       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5818       break;
5819     case CELL_SPACE:
5820       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5821       break;
5822     case LO_SPACE:
5823       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5824       break;
5825   }
5826
5827   // Return the newly allocated iterator;
5828   DCHECK(iterator_ != NULL);
5829   return iterator_;
5830 }
5831
5832
5833 class HeapObjectsFilter {
5834  public:
5835   virtual ~HeapObjectsFilter() {}
5836   virtual bool SkipObject(HeapObject* object) = 0;
5837 };
5838
5839
5840 class UnreachableObjectsFilter : public HeapObjectsFilter {
5841  public:
5842   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5843     MarkReachableObjects();
5844   }
5845
5846   ~UnreachableObjectsFilter() {
5847     heap_->mark_compact_collector()->ClearMarkbits();
5848   }
5849
5850   bool SkipObject(HeapObject* object) {
5851     MarkBit mark_bit = Marking::MarkBitFrom(object);
5852     return !mark_bit.Get();
5853   }
5854
5855  private:
5856   class MarkingVisitor : public ObjectVisitor {
5857    public:
5858     MarkingVisitor() : marking_stack_(10) {}
5859
5860     void VisitPointers(Object** start, Object** end) {
5861       for (Object** p = start; p < end; p++) {
5862         if (!(*p)->IsHeapObject()) continue;
5863         HeapObject* obj = HeapObject::cast(*p);
5864         MarkBit mark_bit = Marking::MarkBitFrom(obj);
5865         if (!mark_bit.Get()) {
5866           mark_bit.Set();
5867           marking_stack_.Add(obj);
5868         }
5869       }
5870     }
5871
5872     void TransitiveClosure() {
5873       while (!marking_stack_.is_empty()) {
5874         HeapObject* obj = marking_stack_.RemoveLast();
5875         obj->Iterate(this);
5876       }
5877     }
5878
5879    private:
5880     List<HeapObject*> marking_stack_;
5881   };
5882
5883   void MarkReachableObjects() {
5884     MarkingVisitor visitor;
5885     heap_->IterateRoots(&visitor, VISIT_ALL);
5886     visitor.TransitiveClosure();
5887   }
5888
5889   Heap* heap_;
5890   DisallowHeapAllocation no_allocation_;
5891 };
5892
5893
5894 HeapIterator::HeapIterator(Heap* heap)
5895     : make_heap_iterable_helper_(heap),
5896       no_heap_allocation_(),
5897       heap_(heap),
5898       filtering_(HeapIterator::kNoFiltering),
5899       filter_(NULL) {
5900   Init();
5901 }
5902
5903
5904 HeapIterator::HeapIterator(Heap* heap,
5905                            HeapIterator::HeapObjectsFiltering filtering)
5906     : make_heap_iterable_helper_(heap),
5907       no_heap_allocation_(),
5908       heap_(heap),
5909       filtering_(filtering),
5910       filter_(NULL) {
5911   Init();
5912 }
5913
5914
5915 HeapIterator::~HeapIterator() { Shutdown(); }
5916
5917
5918 void HeapIterator::Init() {
5919   // Start the iteration.
5920   space_iterator_ = new SpaceIterator(heap_);
5921   switch (filtering_) {
5922     case kFilterUnreachable:
5923       filter_ = new UnreachableObjectsFilter(heap_);
5924       break;
5925     default:
5926       break;
5927   }
5928   object_iterator_ = space_iterator_->next();
5929 }
5930
5931
5932 void HeapIterator::Shutdown() {
5933 #ifdef DEBUG
5934   // Assert that in filtering mode we have iterated through all
5935   // objects. Otherwise, heap will be left in an inconsistent state.
5936   if (filtering_ != kNoFiltering) {
5937     DCHECK(object_iterator_ == NULL);
5938   }
5939 #endif
5940   // Make sure the last iterator is deallocated.
5941   delete space_iterator_;
5942   space_iterator_ = NULL;
5943   object_iterator_ = NULL;
5944   delete filter_;
5945   filter_ = NULL;
5946 }
5947
5948
5949 HeapObject* HeapIterator::next() {
5950   if (filter_ == NULL) return NextObject();
5951
5952   HeapObject* obj = NextObject();
5953   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5954   return obj;
5955 }
5956
5957
5958 HeapObject* HeapIterator::NextObject() {
5959   // No iterator means we are done.
5960   if (object_iterator_ == NULL) return NULL;
5961
5962   if (HeapObject* obj = object_iterator_->next_object()) {
5963     // If the current iterator has more objects we are fine.
5964     return obj;
5965   } else {
5966     // Go though the spaces looking for one that has objects.
5967     while (space_iterator_->has_next()) {
5968       object_iterator_ = space_iterator_->next();
5969       if (HeapObject* obj = object_iterator_->next_object()) {
5970         return obj;
5971       }
5972     }
5973   }
5974   // Done with the last space.
5975   object_iterator_ = NULL;
5976   return NULL;
5977 }
5978
5979
5980 void HeapIterator::reset() {
5981   // Restart the iterator.
5982   Shutdown();
5983   Init();
5984 }
5985
5986
5987 #ifdef DEBUG
5988
5989 Object* const PathTracer::kAnyGlobalObject = NULL;
5990
5991 class PathTracer::MarkVisitor : public ObjectVisitor {
5992  public:
5993   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5994   void VisitPointers(Object** start, Object** end) {
5995     // Scan all HeapObject pointers in [start, end)
5996     for (Object** p = start; !tracer_->found() && (p < end); p++) {
5997       if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5998     }
5999   }
6000
6001  private:
6002   PathTracer* tracer_;
6003 };
6004
6005
6006 class PathTracer::UnmarkVisitor : public ObjectVisitor {
6007  public:
6008   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
6009   void VisitPointers(Object** start, Object** end) {
6010     // Scan all HeapObject pointers in [start, end)
6011     for (Object** p = start; p < end; p++) {
6012       if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
6013     }
6014   }
6015
6016  private:
6017   PathTracer* tracer_;
6018 };
6019
6020
6021 void PathTracer::VisitPointers(Object** start, Object** end) {
6022   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
6023   // Visit all HeapObject pointers in [start, end)
6024   for (Object** p = start; !done && (p < end); p++) {
6025     if ((*p)->IsHeapObject()) {
6026       TracePathFrom(p);
6027       done = ((what_to_find_ == FIND_FIRST) && found_target_);
6028     }
6029   }
6030 }
6031
6032
6033 void PathTracer::Reset() {
6034   found_target_ = false;
6035   object_stack_.Clear();
6036 }
6037
6038
6039 void PathTracer::TracePathFrom(Object** root) {
6040   DCHECK((search_target_ == kAnyGlobalObject) ||
6041          search_target_->IsHeapObject());
6042   found_target_in_trace_ = false;
6043   Reset();
6044
6045   MarkVisitor mark_visitor(this);
6046   MarkRecursively(root, &mark_visitor);
6047
6048   UnmarkVisitor unmark_visitor(this);
6049   UnmarkRecursively(root, &unmark_visitor);
6050
6051   ProcessResults();
6052 }
6053
6054
6055 static bool SafeIsNativeContext(HeapObject* obj) {
6056   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
6057 }
6058
6059
6060 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
6061   if (!(*p)->IsHeapObject()) return;
6062
6063   HeapObject* obj = HeapObject::cast(*p);
6064
6065   MapWord map_word = obj->map_word();
6066   if (!map_word.ToMap()->IsHeapObject()) return;  // visited before
6067
6068   if (found_target_in_trace_) return;  // stop if target found
6069   object_stack_.Add(obj);
6070   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
6071       (obj == search_target_)) {
6072     found_target_in_trace_ = true;
6073     found_target_ = true;
6074     return;
6075   }
6076
6077   bool is_native_context = SafeIsNativeContext(obj);
6078
6079   // not visited yet
6080   Map* map = Map::cast(map_word.ToMap());
6081
6082   MapWord marked_map_word =
6083       MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
6084   obj->set_map_word(marked_map_word);
6085
6086   // Scan the object body.
6087   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
6088     // This is specialized to scan Context's properly.
6089     Object** start =
6090         reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
6091     Object** end =
6092         reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
6093                                    Context::FIRST_WEAK_SLOT * kPointerSize);
6094     mark_visitor->VisitPointers(start, end);
6095   } else {
6096     obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
6097   }
6098
6099   // Scan the map after the body because the body is a lot more interesting
6100   // when doing leak detection.
6101   MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
6102
6103   if (!found_target_in_trace_) {  // don't pop if found the target
6104     object_stack_.RemoveLast();
6105   }
6106 }
6107
6108
6109 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
6110   if (!(*p)->IsHeapObject()) return;
6111
6112   HeapObject* obj = HeapObject::cast(*p);
6113
6114   MapWord map_word = obj->map_word();
6115   if (map_word.ToMap()->IsHeapObject()) return;  // unmarked already
6116
6117   MapWord unmarked_map_word =
6118       MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
6119   obj->set_map_word(unmarked_map_word);
6120
6121   Map* map = Map::cast(unmarked_map_word.ToMap());
6122
6123   UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
6124
6125   obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
6126 }
6127
6128
6129 void PathTracer::ProcessResults() {
6130   if (found_target_) {
6131     OFStream os(stdout);
6132     os << "=====================================\n"
6133        << "====        Path to object       ====\n"
6134        << "=====================================\n\n";
6135
6136     DCHECK(!object_stack_.is_empty());
6137     for (int i = 0; i < object_stack_.length(); i++) {
6138       if (i > 0) os << "\n     |\n     |\n     V\n\n";
6139       object_stack_[i]->Print(os);
6140     }
6141     os << "=====================================\n";
6142   }
6143 }
6144
6145
6146 // Triggers a depth-first traversal of reachable objects from one
6147 // given root object and finds a path to a specific heap object and
6148 // prints it.
6149 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
6150   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6151   tracer.VisitPointer(&root);
6152 }
6153
6154
6155 // Triggers a depth-first traversal of reachable objects from roots
6156 // and finds a path to a specific heap object and prints it.
6157 void Heap::TracePathToObject(Object* target) {
6158   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
6159   IterateRoots(&tracer, VISIT_ONLY_STRONG);
6160 }
6161
6162
6163 // Triggers a depth-first traversal of reachable objects from roots
6164 // and finds a path to any global object and prints it. Useful for
6165 // determining the source for leaks of global objects.
6166 void Heap::TracePathToGlobal() {
6167   PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
6168                     VISIT_ALL);
6169   IterateRoots(&tracer, VISIT_ONLY_STRONG);
6170 }
6171 #endif
6172
6173
6174 void Heap::UpdateCumulativeGCStatistics(double duration,
6175                                         double spent_in_mutator,
6176                                         double marking_time) {
6177   if (FLAG_print_cumulative_gc_stat) {
6178     total_gc_time_ms_ += duration;
6179     max_gc_pause_ = Max(max_gc_pause_, duration);
6180     max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
6181     min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
6182   } else if (FLAG_trace_gc_verbose) {
6183     total_gc_time_ms_ += duration;
6184   }
6185
6186   marking_time_ += marking_time;
6187 }
6188
6189
6190 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
6191   DisallowHeapAllocation no_gc;
6192   // Uses only lower 32 bits if pointers are larger.
6193   uintptr_t addr_hash =
6194       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
6195   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6196 }
6197
6198
6199 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
6200   DisallowHeapAllocation no_gc;
6201   int index = (Hash(map, name) & kHashMask);
6202   for (int i = 0; i < kEntriesPerBucket; i++) {
6203     Key& key = keys_[index + i];
6204     if ((key.map == *map) && key.name->Equals(*name)) {
6205       return field_offsets_[index + i];
6206     }
6207   }
6208   return kNotFound;
6209 }
6210
6211
6212 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
6213                               int field_offset) {
6214   DisallowHeapAllocation no_gc;
6215   if (!name->IsUniqueName()) {
6216     if (!StringTable::InternalizeStringIfExists(
6217              name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
6218       return;
6219     }
6220   }
6221   // This cache is cleared only between mark compact passes, so we expect the
6222   // cache to only contain old space names.
6223   DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
6224
6225   int index = (Hash(map, name) & kHashMask);
6226   // After a GC there will be free slots, so we use them in order (this may
6227   // help to get the most frequently used one in position 0).
6228   for (int i = 0; i < kEntriesPerBucket; i++) {
6229     Key& key = keys_[index];
6230     Object* free_entry_indicator = NULL;
6231     if (key.map == free_entry_indicator) {
6232       key.map = *map;
6233       key.name = *name;
6234       field_offsets_[index + i] = field_offset;
6235       return;
6236     }
6237   }
6238   // No free entry found in this bucket, so we move them all down one and
6239   // put the new entry at position zero.
6240   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
6241     Key& key = keys_[index + i];
6242     Key& key2 = keys_[index + i - 1];
6243     key = key2;
6244     field_offsets_[index + i] = field_offsets_[index + i - 1];
6245   }
6246
6247   // Write the new first entry.
6248   Key& key = keys_[index];
6249   key.map = *map;
6250   key.name = *name;
6251   field_offsets_[index] = field_offset;
6252 }
6253
6254
6255 void KeyedLookupCache::Clear() {
6256   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6257 }
6258
6259
6260 void DescriptorLookupCache::Clear() {
6261   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6262 }
6263
6264
6265 void ExternalStringTable::CleanUp() {
6266   int last = 0;
6267   for (int i = 0; i < new_space_strings_.length(); ++i) {
6268     if (new_space_strings_[i] == heap_->the_hole_value()) {
6269       continue;
6270     }
6271     DCHECK(new_space_strings_[i]->IsExternalString());
6272     if (heap_->InNewSpace(new_space_strings_[i])) {
6273       new_space_strings_[last++] = new_space_strings_[i];
6274     } else {
6275       old_space_strings_.Add(new_space_strings_[i]);
6276     }
6277   }
6278   new_space_strings_.Rewind(last);
6279   new_space_strings_.Trim();
6280
6281   last = 0;
6282   for (int i = 0; i < old_space_strings_.length(); ++i) {
6283     if (old_space_strings_[i] == heap_->the_hole_value()) {
6284       continue;
6285     }
6286     DCHECK(old_space_strings_[i]->IsExternalString());
6287     DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6288     old_space_strings_[last++] = old_space_strings_[i];
6289   }
6290   old_space_strings_.Rewind(last);
6291   old_space_strings_.Trim();
6292 #ifdef VERIFY_HEAP
6293   if (FLAG_verify_heap) {
6294     Verify();
6295   }
6296 #endif
6297 }
6298
6299
6300 void ExternalStringTable::TearDown() {
6301   for (int i = 0; i < new_space_strings_.length(); ++i) {
6302     heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6303   }
6304   new_space_strings_.Free();
6305   for (int i = 0; i < old_space_strings_.length(); ++i) {
6306     heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6307   }
6308   old_space_strings_.Free();
6309 }
6310
6311
6312 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6313   chunk->set_next_chunk(chunks_queued_for_free_);
6314   chunks_queued_for_free_ = chunk;
6315 }
6316
6317
6318 void Heap::FreeQueuedChunks() {
6319   if (chunks_queued_for_free_ == NULL) return;
6320   MemoryChunk* next;
6321   MemoryChunk* chunk;
6322   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6323     next = chunk->next_chunk();
6324     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6325
6326     if (chunk->owner()->identity() == LO_SPACE) {
6327       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6328       // If FromAnyPointerAddress encounters a slot that belongs to a large
6329       // chunk queued for deletion it will fail to find the chunk because
6330       // it try to perform a search in the list of pages owned by of the large
6331       // object space and queued chunks were detached from that list.
6332       // To work around this we split large chunk into normal kPageSize aligned
6333       // pieces and initialize size, owner and flags field of every piece.
6334       // If FromAnyPointerAddress encounters a slot that belongs to one of
6335       // these smaller pieces it will treat it as a slot on a normal Page.
6336       Address chunk_end = chunk->address() + chunk->size();
6337       MemoryChunk* inner =
6338           MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
6339       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6340       while (inner <= inner_last) {
6341         // Size of a large chunk is always a multiple of
6342         // OS::AllocateAlignment() so there is always
6343         // enough space for a fake MemoryChunk header.
6344         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6345         // Guard against overflow.
6346         if (area_end < inner->address()) area_end = chunk_end;
6347         inner->SetArea(inner->address(), area_end);
6348         inner->set_size(Page::kPageSize);
6349         inner->set_owner(lo_space());
6350         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6351         inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6352       }
6353     }
6354   }
6355   isolate_->heap()->store_buffer()->Compact();
6356   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6357   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6358     next = chunk->next_chunk();
6359     isolate_->memory_allocator()->Free(chunk);
6360   }
6361   chunks_queued_for_free_ = NULL;
6362 }
6363
6364
6365 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6366   uintptr_t p = reinterpret_cast<uintptr_t>(page);
6367   // Tag the page pointer to make it findable in the dump file.
6368   if (compacted) {
6369     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
6370   } else {
6371     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
6372   }
6373   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6374       reinterpret_cast<Address>(p);
6375   remembered_unmapped_pages_index_++;
6376   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6377 }
6378
6379
6380 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6381   memset(object_counts_, 0, sizeof(object_counts_));
6382   memset(object_sizes_, 0, sizeof(object_sizes_));
6383   if (clear_last_time_stats) {
6384     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6385     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6386   }
6387 }
6388
6389
6390 static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6391
6392
6393 void Heap::CheckpointObjectStats() {
6394   base::LockGuard<base::Mutex> lock_guard(
6395       checkpoint_object_stats_mutex.Pointer());
6396   Counters* counters = isolate()->counters();
6397 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)              \
6398   counters->count_of_##name()->Increment(                \
6399       static_cast<int>(object_counts_[name]));           \
6400   counters->count_of_##name()->Decrement(                \
6401       static_cast<int>(object_counts_last_time_[name])); \
6402   counters->size_of_##name()->Increment(                 \
6403       static_cast<int>(object_sizes_[name]));            \
6404   counters->size_of_##name()->Decrement(                 \
6405       static_cast<int>(object_sizes_last_time_[name]));
6406   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6407 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6408   int index;
6409 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
6410   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
6411   counters->count_of_CODE_TYPE_##name()->Increment(       \
6412       static_cast<int>(object_counts_[index]));           \
6413   counters->count_of_CODE_TYPE_##name()->Decrement(       \
6414       static_cast<int>(object_counts_last_time_[index])); \
6415   counters->size_of_CODE_TYPE_##name()->Increment(        \
6416       static_cast<int>(object_sizes_[index]));            \
6417   counters->size_of_CODE_TYPE_##name()->Decrement(        \
6418       static_cast<int>(object_sizes_last_time_[index]));
6419   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6420 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6421 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
6422   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
6423   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
6424       static_cast<int>(object_counts_[index]));           \
6425   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
6426       static_cast<int>(object_counts_last_time_[index])); \
6427   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
6428       static_cast<int>(object_sizes_[index]));            \
6429   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
6430       static_cast<int>(object_sizes_last_time_[index]));
6431   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6432 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6433 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
6434   index =                                                                     \
6435       FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6436   counters->count_of_CODE_AGE_##name()->Increment(                            \
6437       static_cast<int>(object_counts_[index]));                               \
6438   counters->count_of_CODE_AGE_##name()->Decrement(                            \
6439       static_cast<int>(object_counts_last_time_[index]));                     \
6440   counters->size_of_CODE_AGE_##name()->Increment(                             \
6441       static_cast<int>(object_sizes_[index]));                                \
6442   counters->size_of_CODE_AGE_##name()->Decrement(                             \
6443       static_cast<int>(object_sizes_last_time_[index]));
6444   CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6445 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6446
6447   MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6448   MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6449   ClearObjectStats();
6450 }
6451 }
6452 }  // namespace v8::internal