Upstream version 8.37.186.0
[platform/framework/web/crosswalk.git] / src / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/base/once.h"
10 #include "src/bootstrapper.h"
11 #include "src/codegen.h"
12 #include "src/compilation-cache.h"
13 #include "src/conversions.h"
14 #include "src/cpu-profiler.h"
15 #include "src/debug.h"
16 #include "src/deoptimizer.h"
17 #include "src/global-handles.h"
18 #include "src/heap-profiler.h"
19 #include "src/incremental-marking.h"
20 #include "src/isolate-inl.h"
21 #include "src/mark-compact.h"
22 #include "src/natives.h"
23 #include "src/objects-visiting.h"
24 #include "src/objects-visiting-inl.h"
25 #include "src/runtime-profiler.h"
26 #include "src/scopeinfo.h"
27 #include "src/snapshot.h"
28 #include "src/store-buffer.h"
29 #include "src/utils/random-number-generator.h"
30 #include "src/utils.h"
31 #include "src/v8threads.h"
32 #include "src/vm-state-inl.h"
33 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
34 #include "src/regexp-macro-assembler.h"
35 #include "src/arm/regexp-macro-assembler-arm.h"
36 #endif
37 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
38 #include "src/regexp-macro-assembler.h"
39 #include "src/mips/regexp-macro-assembler-mips.h"
40 #endif
41
42 namespace v8 {
43 namespace internal {
44
45
46 Heap::Heap()
47     : amount_of_external_allocated_memory_(0),
48       amount_of_external_allocated_memory_at_last_global_gc_(0),
49       isolate_(NULL),
50       code_range_size_(0),
51 // semispace_size_ should be a power of 2 and old_generation_size_ should be
52 // a multiple of Page::kPageSize.
53       reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
54       max_semi_space_size_(8 * (kPointerSize / 4)  * MB),
55       initial_semispace_size_(Page::kPageSize),
56       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
57       max_executable_size_(256ul * (kPointerSize / 4) * MB),
58 // Variables set based on semispace_size_ and old_generation_size_ in
59 // ConfigureHeap.
60 // Will be 4 * reserved_semispace_size_ to ensure that young
61 // generation can be aligned to its size.
62       maximum_committed_(0),
63       survived_since_last_expansion_(0),
64       sweep_generation_(0),
65       always_allocate_scope_depth_(0),
66       linear_allocation_scope_depth_(0),
67       contexts_disposed_(0),
68       global_ic_age_(0),
69       flush_monomorphic_ics_(false),
70       scan_on_scavenge_pages_(0),
71       new_space_(this),
72       old_pointer_space_(NULL),
73       old_data_space_(NULL),
74       code_space_(NULL),
75       map_space_(NULL),
76       cell_space_(NULL),
77       property_cell_space_(NULL),
78       lo_space_(NULL),
79       gc_state_(NOT_IN_GC),
80       gc_post_processing_depth_(0),
81       ms_count_(0),
82       gc_count_(0),
83       remembered_unmapped_pages_index_(0),
84       unflattened_strings_length_(0),
85 #ifdef DEBUG
86       allocation_timeout_(0),
87 #endif  // DEBUG
88       old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
89       old_gen_exhausted_(false),
90       inline_allocation_disabled_(false),
91       store_buffer_rebuilder_(store_buffer()),
92       hidden_string_(NULL),
93       gc_safe_size_of_old_object_(NULL),
94       total_regexp_code_generated_(0),
95       tracer_(NULL),
96       high_survival_rate_period_length_(0),
97       promoted_objects_size_(0),
98       promotion_rate_(0),
99       semi_space_copied_object_size_(0),
100       semi_space_copied_rate_(0),
101       maximum_size_scavenges_(0),
102       max_gc_pause_(0.0),
103       total_gc_time_ms_(0.0),
104       max_alive_after_gc_(0),
105       min_in_mutator_(kMaxInt),
106       alive_after_last_gc_(0),
107       last_gc_end_timestamp_(0.0),
108       marking_time_(0.0),
109       sweeping_time_(0.0),
110       mark_compact_collector_(this),
111       store_buffer_(this),
112       marking_(this),
113       incremental_marking_(this),
114       number_idle_notifications_(0),
115       last_idle_notification_gc_count_(0),
116       last_idle_notification_gc_count_init_(false),
117       mark_sweeps_since_idle_round_started_(0),
118       gc_count_at_last_idle_gc_(0),
119       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
120       full_codegen_bytes_generated_(0),
121       crankshaft_codegen_bytes_generated_(0),
122       gcs_since_last_deopt_(0),
123 #ifdef VERIFY_HEAP
124       no_weak_object_verification_scope_depth_(0),
125 #endif
126       allocation_sites_scratchpad_length_(0),
127       promotion_queue_(this),
128       configured_(false),
129       external_string_table_(this),
130       chunks_queued_for_free_(NULL),
131       gc_callbacks_depth_(0) {
132   // Allow build-time customization of the max semispace size. Building
133   // V8 with snapshots and a non-default max semispace size is much
134   // easier if you can define it as part of the build environment.
135 #if defined(V8_MAX_SEMISPACE_SIZE)
136   max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
137 #endif
138
139   // Ensure old_generation_size_ is a multiple of kPageSize.
140   ASSERT(MB >= Page::kPageSize);
141
142   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
143   set_native_contexts_list(NULL);
144   set_array_buffers_list(Smi::FromInt(0));
145   set_allocation_sites_list(Smi::FromInt(0));
146   set_encountered_weak_collections(Smi::FromInt(0));
147   // Put a dummy entry in the remembered pages so we can find the list the
148   // minidump even if there are no real unmapped pages.
149   RememberUnmappedPage(NULL, false);
150
151   ClearObjectStats(true);
152 }
153
154
155 intptr_t Heap::Capacity() {
156   if (!HasBeenSetUp()) return 0;
157
158   return new_space_.Capacity() +
159       old_pointer_space_->Capacity() +
160       old_data_space_->Capacity() +
161       code_space_->Capacity() +
162       map_space_->Capacity() +
163       cell_space_->Capacity() +
164       property_cell_space_->Capacity();
165 }
166
167
168 intptr_t Heap::CommittedMemory() {
169   if (!HasBeenSetUp()) return 0;
170
171   return new_space_.CommittedMemory() +
172       old_pointer_space_->CommittedMemory() +
173       old_data_space_->CommittedMemory() +
174       code_space_->CommittedMemory() +
175       map_space_->CommittedMemory() +
176       cell_space_->CommittedMemory() +
177       property_cell_space_->CommittedMemory() +
178       lo_space_->Size();
179 }
180
181
182 size_t Heap::CommittedPhysicalMemory() {
183   if (!HasBeenSetUp()) return 0;
184
185   return new_space_.CommittedPhysicalMemory() +
186       old_pointer_space_->CommittedPhysicalMemory() +
187       old_data_space_->CommittedPhysicalMemory() +
188       code_space_->CommittedPhysicalMemory() +
189       map_space_->CommittedPhysicalMemory() +
190       cell_space_->CommittedPhysicalMemory() +
191       property_cell_space_->CommittedPhysicalMemory() +
192       lo_space_->CommittedPhysicalMemory();
193 }
194
195
196 intptr_t Heap::CommittedMemoryExecutable() {
197   if (!HasBeenSetUp()) return 0;
198
199   return isolate()->memory_allocator()->SizeExecutable();
200 }
201
202
203 void Heap::UpdateMaximumCommitted() {
204   if (!HasBeenSetUp()) return;
205
206   intptr_t current_committed_memory = CommittedMemory();
207   if (current_committed_memory > maximum_committed_) {
208     maximum_committed_ = current_committed_memory;
209   }
210 }
211
212
213 intptr_t Heap::Available() {
214   if (!HasBeenSetUp()) return 0;
215
216   return new_space_.Available() +
217       old_pointer_space_->Available() +
218       old_data_space_->Available() +
219       code_space_->Available() +
220       map_space_->Available() +
221       cell_space_->Available() +
222       property_cell_space_->Available();
223 }
224
225
226 bool Heap::HasBeenSetUp() {
227   return old_pointer_space_ != NULL &&
228          old_data_space_ != NULL &&
229          code_space_ != NULL &&
230          map_space_ != NULL &&
231          cell_space_ != NULL &&
232          property_cell_space_ != NULL &&
233          lo_space_ != NULL;
234 }
235
236
237 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
238   if (IntrusiveMarking::IsMarked(object)) {
239     return IntrusiveMarking::SizeOfMarkedObject(object);
240   }
241   return object->SizeFromMap(object->map());
242 }
243
244
245 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
246                                               const char** reason) {
247   // Is global GC requested?
248   if (space != NEW_SPACE) {
249     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
250     *reason = "GC in old space requested";
251     return MARK_COMPACTOR;
252   }
253
254   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
255     *reason = "GC in old space forced by flags";
256     return MARK_COMPACTOR;
257   }
258
259   // Is enough data promoted to justify a global GC?
260   if (OldGenerationAllocationLimitReached()) {
261     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
262     *reason = "promotion limit reached";
263     return MARK_COMPACTOR;
264   }
265
266   // Have allocation in OLD and LO failed?
267   if (old_gen_exhausted_) {
268     isolate_->counters()->
269         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
270     *reason = "old generations exhausted";
271     return MARK_COMPACTOR;
272   }
273
274   // Is there enough space left in OLD to guarantee that a scavenge can
275   // succeed?
276   //
277   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
278   // for object promotion. It counts only the bytes that the memory
279   // allocator has not yet allocated from the OS and assigned to any space,
280   // and does not count available bytes already in the old space or code
281   // space.  Undercounting is safe---we may get an unrequested full GC when
282   // a scavenge would have succeeded.
283   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
284     isolate_->counters()->
285         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
286     *reason = "scavenge might not succeed";
287     return MARK_COMPACTOR;
288   }
289
290   // Default
291   *reason = NULL;
292   return SCAVENGER;
293 }
294
295
296 // TODO(1238405): Combine the infrastructure for --heap-stats and
297 // --log-gc to avoid the complicated preprocessor and flag testing.
298 void Heap::ReportStatisticsBeforeGC() {
299   // Heap::ReportHeapStatistics will also log NewSpace statistics when
300   // compiled --log-gc is set.  The following logic is used to avoid
301   // double logging.
302 #ifdef DEBUG
303   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
304   if (FLAG_heap_stats) {
305     ReportHeapStatistics("Before GC");
306   } else if (FLAG_log_gc) {
307     new_space_.ReportStatistics();
308   }
309   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
310 #else
311   if (FLAG_log_gc) {
312     new_space_.CollectStatistics();
313     new_space_.ReportStatistics();
314     new_space_.ClearHistograms();
315   }
316 #endif  // DEBUG
317 }
318
319
320 void Heap::PrintShortHeapStatistics() {
321   if (!FLAG_trace_gc_verbose) return;
322   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
323                ", available: %6" V8_PTR_PREFIX "d KB\n",
324            isolate_->memory_allocator()->Size() / KB,
325            isolate_->memory_allocator()->Available() / KB);
326   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
327                ", available: %6" V8_PTR_PREFIX "d KB"
328                ", committed: %6" V8_PTR_PREFIX "d KB\n",
329            new_space_.Size() / KB,
330            new_space_.Available() / KB,
331            new_space_.CommittedMemory() / KB);
332   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
333                ", available: %6" V8_PTR_PREFIX "d KB"
334                ", committed: %6" V8_PTR_PREFIX "d KB\n",
335            old_pointer_space_->SizeOfObjects() / KB,
336            old_pointer_space_->Available() / KB,
337            old_pointer_space_->CommittedMemory() / KB);
338   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
339                ", available: %6" V8_PTR_PREFIX "d KB"
340                ", committed: %6" V8_PTR_PREFIX "d KB\n",
341            old_data_space_->SizeOfObjects() / KB,
342            old_data_space_->Available() / KB,
343            old_data_space_->CommittedMemory() / KB);
344   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
345                ", available: %6" V8_PTR_PREFIX "d KB"
346                ", committed: %6" V8_PTR_PREFIX "d KB\n",
347            code_space_->SizeOfObjects() / KB,
348            code_space_->Available() / KB,
349            code_space_->CommittedMemory() / KB);
350   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
351                ", available: %6" V8_PTR_PREFIX "d KB"
352                ", committed: %6" V8_PTR_PREFIX "d KB\n",
353            map_space_->SizeOfObjects() / KB,
354            map_space_->Available() / KB,
355            map_space_->CommittedMemory() / KB);
356   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
357                ", available: %6" V8_PTR_PREFIX "d KB"
358                ", committed: %6" V8_PTR_PREFIX "d KB\n",
359            cell_space_->SizeOfObjects() / KB,
360            cell_space_->Available() / KB,
361            cell_space_->CommittedMemory() / KB);
362   PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
363                ", available: %6" V8_PTR_PREFIX "d KB"
364                ", committed: %6" V8_PTR_PREFIX "d KB\n",
365            property_cell_space_->SizeOfObjects() / KB,
366            property_cell_space_->Available() / KB,
367            property_cell_space_->CommittedMemory() / KB);
368   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
369                ", available: %6" V8_PTR_PREFIX "d KB"
370                ", committed: %6" V8_PTR_PREFIX "d KB\n",
371            lo_space_->SizeOfObjects() / KB,
372            lo_space_->Available() / KB,
373            lo_space_->CommittedMemory() / KB);
374   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
375                ", available: %6" V8_PTR_PREFIX "d KB"
376                ", committed: %6" V8_PTR_PREFIX "d KB\n",
377            this->SizeOfObjects() / KB,
378            this->Available() / KB,
379            this->CommittedMemory() / KB);
380   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
381            static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
382   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
383 }
384
385
386 // TODO(1238405): Combine the infrastructure for --heap-stats and
387 // --log-gc to avoid the complicated preprocessor and flag testing.
388 void Heap::ReportStatisticsAfterGC() {
389   // Similar to the before GC, we use some complicated logic to ensure that
390   // NewSpace statistics are logged exactly once when --log-gc is turned on.
391 #if defined(DEBUG)
392   if (FLAG_heap_stats) {
393     new_space_.CollectStatistics();
394     ReportHeapStatistics("After GC");
395   } else if (FLAG_log_gc) {
396     new_space_.ReportStatistics();
397   }
398 #else
399   if (FLAG_log_gc) new_space_.ReportStatistics();
400 #endif  // DEBUG
401 }
402
403
404 void Heap::GarbageCollectionPrologue() {
405   {  AllowHeapAllocation for_the_first_part_of_prologue;
406     ClearJSFunctionResultCaches();
407     gc_count_++;
408     unflattened_strings_length_ = 0;
409
410     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
411       mark_compact_collector()->EnableCodeFlushing(true);
412     }
413
414 #ifdef VERIFY_HEAP
415     if (FLAG_verify_heap) {
416       Verify();
417     }
418 #endif
419   }
420
421   // Reset GC statistics.
422   promoted_objects_size_ = 0;
423   semi_space_copied_object_size_ = 0;
424
425   UpdateMaximumCommitted();
426
427 #ifdef DEBUG
428   ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
429
430   if (FLAG_gc_verbose) Print();
431
432   ReportStatisticsBeforeGC();
433 #endif  // DEBUG
434
435   store_buffer()->GCPrologue();
436
437   if (isolate()->concurrent_osr_enabled()) {
438     isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
439   }
440
441   if (new_space_.IsAtMaximumCapacity()) {
442     maximum_size_scavenges_++;
443   } else {
444     maximum_size_scavenges_ = 0;
445   }
446   CheckNewSpaceExpansionCriteria();
447 }
448
449
450 intptr_t Heap::SizeOfObjects() {
451   intptr_t total = 0;
452   AllSpaces spaces(this);
453   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
454     total += space->SizeOfObjects();
455   }
456   return total;
457 }
458
459
460 void Heap::ClearAllICsByKind(Code::Kind kind) {
461   HeapObjectIterator it(code_space());
462
463   for (Object* object = it.Next(); object != NULL; object = it.Next()) {
464     Code* code = Code::cast(object);
465     Code::Kind current_kind = code->kind();
466     if (current_kind == Code::FUNCTION ||
467         current_kind == Code::OPTIMIZED_FUNCTION) {
468       code->ClearInlineCaches(kind);
469     }
470   }
471 }
472
473
474 void Heap::RepairFreeListsAfterBoot() {
475   PagedSpaces spaces(this);
476   for (PagedSpace* space = spaces.next();
477        space != NULL;
478        space = spaces.next()) {
479     space->RepairFreeListsAfterBoot();
480   }
481 }
482
483
484 void Heap::ProcessPretenuringFeedback() {
485   if (FLAG_allocation_site_pretenuring) {
486     int tenure_decisions = 0;
487     int dont_tenure_decisions = 0;
488     int allocation_mementos_found = 0;
489     int allocation_sites = 0;
490     int active_allocation_sites = 0;
491
492     // If the scratchpad overflowed, we have to iterate over the allocation
493     // sites list.
494     // TODO(hpayer): We iterate over the whole list of allocation sites when
495     // we grew to the maximum semi-space size to deopt maybe tenured
496     // allocation sites. We could hold the maybe tenured allocation sites
497     // in a seperate data structure if this is a performance problem.
498     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
499     bool use_scratchpad =
500          allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
501          !deopt_maybe_tenured;
502
503     int i = 0;
504     Object* list_element = allocation_sites_list();
505     bool trigger_deoptimization = false;
506     bool maximum_size_scavenge = MaximumSizeScavenge();
507     while (use_scratchpad ?
508               i < allocation_sites_scratchpad_length_ :
509               list_element->IsAllocationSite()) {
510       AllocationSite* site = use_scratchpad ?
511           AllocationSite::cast(allocation_sites_scratchpad()->get(i)) :
512           AllocationSite::cast(list_element);
513       allocation_mementos_found += site->memento_found_count();
514       if (site->memento_found_count() > 0) {
515         active_allocation_sites++;
516         if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
517           trigger_deoptimization = true;
518         }
519         if (site->GetPretenureMode() == TENURED) {
520           tenure_decisions++;
521         } else {
522           dont_tenure_decisions++;
523         }
524         allocation_sites++;
525       }
526
527       if (deopt_maybe_tenured && site->IsMaybeTenure()) {
528         site->set_deopt_dependent_code(true);
529         trigger_deoptimization = true;
530       }
531
532       if (use_scratchpad) {
533         i++;
534       } else {
535         list_element = site->weak_next();
536       }
537     }
538
539     if (trigger_deoptimization) {
540       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
541     }
542
543     FlushAllocationSitesScratchpad();
544
545     if (FLAG_trace_pretenuring_statistics &&
546         (allocation_mementos_found > 0 ||
547          tenure_decisions > 0 ||
548          dont_tenure_decisions > 0)) {
549       PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
550              "#mementos, #tenure decisions, #donttenure decisions) "
551              "(%s, %d, %d, %d, %d, %d)\n",
552              use_scratchpad ? "use scratchpad" : "use list",
553              allocation_sites,
554              active_allocation_sites,
555              allocation_mementos_found,
556              tenure_decisions,
557              dont_tenure_decisions);
558     }
559   }
560 }
561
562
563 void Heap::DeoptMarkedAllocationSites() {
564   // TODO(hpayer): If iterating over the allocation sites list becomes a
565   // performance issue, use a cache heap data structure instead (similar to the
566   // allocation sites scratchpad).
567   Object* list_element = allocation_sites_list();
568   while (list_element->IsAllocationSite()) {
569     AllocationSite* site = AllocationSite::cast(list_element);
570     if (site->deopt_dependent_code()) {
571       site->dependent_code()->MarkCodeForDeoptimization(
572           isolate_,
573           DependentCode::kAllocationSiteTenuringChangedGroup);
574       site->set_deopt_dependent_code(false);
575     }
576     list_element = site->weak_next();
577   }
578   Deoptimizer::DeoptimizeMarkedCode(isolate_);
579 }
580
581
582 void Heap::GarbageCollectionEpilogue() {
583   store_buffer()->GCEpilogue();
584
585   // In release mode, we only zap the from space under heap verification.
586   if (Heap::ShouldZapGarbage()) {
587     ZapFromSpace();
588   }
589
590   // Process pretenuring feedback and update allocation sites.
591   ProcessPretenuringFeedback();
592
593 #ifdef VERIFY_HEAP
594   if (FLAG_verify_heap) {
595     Verify();
596   }
597 #endif
598
599   AllowHeapAllocation for_the_rest_of_the_epilogue;
600
601 #ifdef DEBUG
602   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
603   if (FLAG_print_handles) PrintHandles();
604   if (FLAG_gc_verbose) Print();
605   if (FLAG_code_stats) ReportCodeStatistics("After GC");
606 #endif
607   if (FLAG_deopt_every_n_garbage_collections > 0) {
608     // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
609     // the topmost optimized frame can be deoptimized safely, because it
610     // might not have a lazy bailout point right after its current PC.
611     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
612       Deoptimizer::DeoptimizeAll(isolate());
613       gcs_since_last_deopt_ = 0;
614     }
615   }
616
617   UpdateMaximumCommitted();
618
619   isolate_->counters()->alive_after_last_gc()->Set(
620       static_cast<int>(SizeOfObjects()));
621
622   isolate_->counters()->string_table_capacity()->Set(
623       string_table()->Capacity());
624   isolate_->counters()->number_of_symbols()->Set(
625       string_table()->NumberOfElements());
626
627   if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
628     isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
629         static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
630             (crankshaft_codegen_bytes_generated_
631             + full_codegen_bytes_generated_)));
632   }
633
634   if (CommittedMemory() > 0) {
635     isolate_->counters()->external_fragmentation_total()->AddSample(
636         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
637
638     isolate_->counters()->heap_fraction_new_space()->
639         AddSample(static_cast<int>(
640             (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
641     isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
642         static_cast<int>(
643             (old_pointer_space()->CommittedMemory() * 100.0) /
644             CommittedMemory()));
645     isolate_->counters()->heap_fraction_old_data_space()->AddSample(
646         static_cast<int>(
647             (old_data_space()->CommittedMemory() * 100.0) /
648             CommittedMemory()));
649     isolate_->counters()->heap_fraction_code_space()->
650         AddSample(static_cast<int>(
651             (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
652     isolate_->counters()->heap_fraction_map_space()->AddSample(
653         static_cast<int>(
654             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
655     isolate_->counters()->heap_fraction_cell_space()->AddSample(
656         static_cast<int>(
657             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
658     isolate_->counters()->heap_fraction_property_cell_space()->
659         AddSample(static_cast<int>(
660             (property_cell_space()->CommittedMemory() * 100.0) /
661             CommittedMemory()));
662     isolate_->counters()->heap_fraction_lo_space()->
663         AddSample(static_cast<int>(
664             (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
665
666     isolate_->counters()->heap_sample_total_committed()->AddSample(
667         static_cast<int>(CommittedMemory() / KB));
668     isolate_->counters()->heap_sample_total_used()->AddSample(
669         static_cast<int>(SizeOfObjects() / KB));
670     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
671         static_cast<int>(map_space()->CommittedMemory() / KB));
672     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
673         static_cast<int>(cell_space()->CommittedMemory() / KB));
674     isolate_->counters()->
675         heap_sample_property_cell_space_committed()->
676             AddSample(static_cast<int>(
677                 property_cell_space()->CommittedMemory() / KB));
678     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
679         static_cast<int>(code_space()->CommittedMemory() / KB));
680
681     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
682         static_cast<int>(MaximumCommittedMemory() / KB));
683   }
684
685 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
686   isolate_->counters()->space##_bytes_available()->Set(                        \
687       static_cast<int>(space()->Available()));                                 \
688   isolate_->counters()->space##_bytes_committed()->Set(                        \
689       static_cast<int>(space()->CommittedMemory()));                           \
690   isolate_->counters()->space##_bytes_used()->Set(                             \
691       static_cast<int>(space()->SizeOfObjects()));
692 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
693   if (space()->CommittedMemory() > 0) {                                        \
694     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
695         static_cast<int>(100 -                                                 \
696             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
697   }
698 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
699   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
700   UPDATE_FRAGMENTATION_FOR_SPACE(space)
701
702   UPDATE_COUNTERS_FOR_SPACE(new_space)
703   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
704   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
705   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
706   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
707   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
708   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
709   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
710 #undef UPDATE_COUNTERS_FOR_SPACE
711 #undef UPDATE_FRAGMENTATION_FOR_SPACE
712 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
713
714 #ifdef DEBUG
715   ReportStatisticsAfterGC();
716 #endif  // DEBUG
717   isolate_->debug()->AfterGarbageCollection();
718
719   // Remember the last top pointer so that we can later find out
720   // whether we allocated in new space since the last GC.
721   new_space_top_after_last_gc_ = new_space()->top();
722 }
723
724
725 void Heap::CollectAllGarbage(int flags,
726                              const char* gc_reason,
727                              const v8::GCCallbackFlags gc_callback_flags) {
728   // Since we are ignoring the return value, the exact choice of space does
729   // not matter, so long as we do not specify NEW_SPACE, which would not
730   // cause a full GC.
731   mark_compact_collector_.SetFlags(flags);
732   CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
733   mark_compact_collector_.SetFlags(kNoGCFlags);
734 }
735
736
737 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
738   // Since we are ignoring the return value, the exact choice of space does
739   // not matter, so long as we do not specify NEW_SPACE, which would not
740   // cause a full GC.
741   // Major GC would invoke weak handle callbacks on weakly reachable
742   // handles, but won't collect weakly reachable objects until next
743   // major GC.  Therefore if we collect aggressively and weak handle callback
744   // has been invoked, we rerun major GC to release objects which become
745   // garbage.
746   // Note: as weak callbacks can execute arbitrary code, we cannot
747   // hope that eventually there will be no weak callbacks invocations.
748   // Therefore stop recollecting after several attempts.
749   if (isolate()->concurrent_recompilation_enabled()) {
750     // The optimizing compiler may be unnecessarily holding on to memory.
751     DisallowHeapAllocation no_recursive_gc;
752     isolate()->optimizing_compiler_thread()->Flush();
753   }
754   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
755                                      kReduceMemoryFootprintMask);
756   isolate_->compilation_cache()->Clear();
757   const int kMaxNumberOfAttempts = 7;
758   const int kMinNumberOfAttempts = 2;
759   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
760     if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
761         attempt + 1 >= kMinNumberOfAttempts) {
762       break;
763     }
764   }
765   mark_compact_collector()->SetFlags(kNoGCFlags);
766   new_space_.Shrink();
767   UncommitFromSpace();
768   incremental_marking()->UncommitMarkingDeque();
769 }
770
771
772 void Heap::EnsureFillerObjectAtTop() {
773   // There may be an allocation memento behind every object in new space.
774   // If we evacuate a not full new space or if we are on the last page of
775   // the new space, then there may be uninitialized memory behind the top
776   // pointer of the new space page. We store a filler object there to
777   // identify the unused space.
778   Address from_top = new_space_.top();
779   Address from_limit = new_space_.limit();
780   if (from_top < from_limit) {
781     int remaining_in_page = static_cast<int>(from_limit - from_top);
782     CreateFillerObjectAt(from_top, remaining_in_page);
783   }
784 }
785
786
787 bool Heap::CollectGarbage(GarbageCollector collector,
788                           const char* gc_reason,
789                           const char* collector_reason,
790                           const v8::GCCallbackFlags gc_callback_flags) {
791   // The VM is in the GC state until exiting this function.
792   VMState<GC> state(isolate_);
793
794 #ifdef DEBUG
795   // Reset the allocation timeout to the GC interval, but make sure to
796   // allow at least a few allocations after a collection. The reason
797   // for this is that we have a lot of allocation sequences and we
798   // assume that a garbage collection will allow the subsequent
799   // allocation attempts to go through.
800   allocation_timeout_ = Max(6, FLAG_gc_interval);
801 #endif
802
803   EnsureFillerObjectAtTop();
804
805   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
806     if (FLAG_trace_incremental_marking) {
807       PrintF("[IncrementalMarking] Scavenge during marking.\n");
808     }
809   }
810
811   if (collector == MARK_COMPACTOR &&
812       !mark_compact_collector()->abort_incremental_marking() &&
813       !incremental_marking()->IsStopped() &&
814       !incremental_marking()->should_hurry() &&
815       FLAG_incremental_marking_steps) {
816     // Make progress in incremental marking.
817     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
818     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
819                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
820     if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
821       if (FLAG_trace_incremental_marking) {
822         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
823       }
824       collector = SCAVENGER;
825       collector_reason = "incremental marking delaying mark-sweep";
826     }
827   }
828
829   bool next_gc_likely_to_collect_more = false;
830
831   { GCTracer tracer(this, gc_reason, collector_reason);
832     ASSERT(AllowHeapAllocation::IsAllowed());
833     DisallowHeapAllocation no_allocation_during_gc;
834     GarbageCollectionPrologue();
835     // The GC count was incremented in the prologue.  Tell the tracer about
836     // it.
837     tracer.set_gc_count(gc_count_);
838
839     // Tell the tracer which collector we've selected.
840     tracer.set_collector(collector);
841
842     {
843       HistogramTimerScope histogram_timer_scope(
844           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
845                                    : isolate_->counters()->gc_compactor());
846       next_gc_likely_to_collect_more =
847           PerformGarbageCollection(collector, &tracer, gc_callback_flags);
848     }
849
850     GarbageCollectionEpilogue();
851   }
852
853   // Start incremental marking for the next cycle. The heap snapshot
854   // generator needs incremental marking to stay off after it aborted.
855   if (!mark_compact_collector()->abort_incremental_marking() &&
856       incremental_marking()->IsStopped() &&
857       incremental_marking()->WorthActivating() &&
858       NextGCIsLikelyToBeFull()) {
859     incremental_marking()->Start();
860   }
861
862   return next_gc_likely_to_collect_more;
863 }
864
865
866 int Heap::NotifyContextDisposed() {
867   if (isolate()->concurrent_recompilation_enabled()) {
868     // Flush the queued recompilation tasks.
869     isolate()->optimizing_compiler_thread()->Flush();
870   }
871   flush_monomorphic_ics_ = true;
872   AgeInlineCaches();
873   return ++contexts_disposed_;
874 }
875
876
877 void Heap::MoveElements(FixedArray* array,
878                         int dst_index,
879                         int src_index,
880                         int len) {
881   if (len == 0) return;
882
883   ASSERT(array->map() != fixed_cow_array_map());
884   Object** dst_objects = array->data_start() + dst_index;
885   MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
886   if (!InNewSpace(array)) {
887     for (int i = 0; i < len; i++) {
888       // TODO(hpayer): check store buffer for entries
889       if (InNewSpace(dst_objects[i])) {
890         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
891       }
892     }
893   }
894   incremental_marking()->RecordWrites(array);
895 }
896
897
898 #ifdef VERIFY_HEAP
899 // Helper class for verifying the string table.
900 class StringTableVerifier : public ObjectVisitor {
901  public:
902   void VisitPointers(Object** start, Object** end) {
903     // Visit all HeapObject pointers in [start, end).
904     for (Object** p = start; p < end; p++) {
905       if ((*p)->IsHeapObject()) {
906         // Check that the string is actually internalized.
907         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
908               (*p)->IsInternalizedString());
909       }
910     }
911   }
912 };
913
914
915 static void VerifyStringTable(Heap* heap) {
916   StringTableVerifier verifier;
917   heap->string_table()->IterateElements(&verifier);
918 }
919 #endif  // VERIFY_HEAP
920
921
922 static bool AbortIncrementalMarkingAndCollectGarbage(
923     Heap* heap,
924     AllocationSpace space,
925     const char* gc_reason = NULL) {
926   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
927   bool result = heap->CollectGarbage(space, gc_reason);
928   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
929   return result;
930 }
931
932
933 void Heap::ReserveSpace(int *sizes, Address *locations_out) {
934   bool gc_performed = true;
935   int counter = 0;
936   static const int kThreshold = 20;
937   while (gc_performed && counter++ < kThreshold) {
938     gc_performed = false;
939     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
940     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
941       if (sizes[space] != 0) {
942         AllocationResult allocation;
943         if (space == NEW_SPACE) {
944           allocation = new_space()->AllocateRaw(sizes[space]);
945         } else {
946           allocation = paged_space(space)->AllocateRaw(sizes[space]);
947         }
948         FreeListNode* node;
949         if (!allocation.To(&node)) {
950           if (space == NEW_SPACE) {
951             Heap::CollectGarbage(NEW_SPACE,
952                                  "failed to reserve space in the new space");
953           } else {
954             AbortIncrementalMarkingAndCollectGarbage(
955                 this,
956                 static_cast<AllocationSpace>(space),
957                 "failed to reserve space in paged space");
958           }
959           gc_performed = true;
960           break;
961         } else {
962           // Mark with a free list node, in case we have a GC before
963           // deserializing.
964           node->set_size(this, sizes[space]);
965           locations_out[space] = node->address();
966         }
967       }
968     }
969   }
970
971   if (gc_performed) {
972     // Failed to reserve the space after several attempts.
973     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
974   }
975 }
976
977
978 void Heap::EnsureFromSpaceIsCommitted() {
979   if (new_space_.CommitFromSpaceIfNeeded()) return;
980
981   // Committing memory to from space failed.
982   // Memory is exhausted and we will die.
983   V8::FatalProcessOutOfMemory("Committing semi space failed.");
984 }
985
986
987 void Heap::ClearJSFunctionResultCaches() {
988   if (isolate_->bootstrapper()->IsActive()) return;
989
990   Object* context = native_contexts_list();
991   while (!context->IsUndefined()) {
992     // Get the caches for this context. GC can happen when the context
993     // is not fully initialized, so the caches can be undefined.
994     Object* caches_or_undefined =
995         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
996     if (!caches_or_undefined->IsUndefined()) {
997       FixedArray* caches = FixedArray::cast(caches_or_undefined);
998       // Clear the caches:
999       int length = caches->length();
1000       for (int i = 0; i < length; i++) {
1001         JSFunctionResultCache::cast(caches->get(i))->Clear();
1002       }
1003     }
1004     // Get the next context:
1005     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1006   }
1007 }
1008
1009
1010 void Heap::ClearNormalizedMapCaches() {
1011   if (isolate_->bootstrapper()->IsActive() &&
1012       !incremental_marking()->IsMarking()) {
1013     return;
1014   }
1015
1016   Object* context = native_contexts_list();
1017   while (!context->IsUndefined()) {
1018     // GC can happen when the context is not fully initialized,
1019     // so the cache can be undefined.
1020     Object* cache =
1021         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1022     if (!cache->IsUndefined()) {
1023       NormalizedMapCache::cast(cache)->Clear();
1024     }
1025     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1026   }
1027 }
1028
1029
1030 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1031   if (start_new_space_size == 0) return;
1032
1033   promotion_rate_ =
1034         (static_cast<double>(promoted_objects_size_) /
1035             static_cast<double>(start_new_space_size) * 100);
1036
1037   semi_space_copied_rate_ =
1038         (static_cast<double>(semi_space_copied_object_size_) /
1039             static_cast<double>(start_new_space_size) * 100);
1040
1041   double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1042
1043   if (survival_rate > kYoungSurvivalRateHighThreshold) {
1044     high_survival_rate_period_length_++;
1045   } else {
1046     high_survival_rate_period_length_ = 0;
1047   }
1048 }
1049
1050 bool Heap::PerformGarbageCollection(
1051     GarbageCollector collector,
1052     GCTracer* tracer,
1053     const v8::GCCallbackFlags gc_callback_flags) {
1054   int freed_global_handles = 0;
1055
1056   if (collector != SCAVENGER) {
1057     PROFILE(isolate_, CodeMovingGCEvent());
1058   }
1059
1060 #ifdef VERIFY_HEAP
1061   if (FLAG_verify_heap) {
1062     VerifyStringTable(this);
1063   }
1064 #endif
1065
1066   GCType gc_type =
1067       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1068
1069   { GCCallbacksScope scope(this);
1070     if (scope.CheckReenter()) {
1071       AllowHeapAllocation allow_allocation;
1072       GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1073       VMState<EXTERNAL> state(isolate_);
1074       HandleScope handle_scope(isolate_);
1075       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1076     }
1077   }
1078
1079   EnsureFromSpaceIsCommitted();
1080
1081   int start_new_space_size = Heap::new_space()->SizeAsInt();
1082
1083   if (IsHighSurvivalRate()) {
1084     // We speed up the incremental marker if it is running so that it
1085     // does not fall behind the rate of promotion, which would cause a
1086     // constantly growing old space.
1087     incremental_marking()->NotifyOfHighPromotionRate();
1088   }
1089
1090   if (collector == MARK_COMPACTOR) {
1091     // Perform mark-sweep with optional compaction.
1092     MarkCompact(tracer);
1093     sweep_generation_++;
1094     // Temporarily set the limit for case when PostGarbageCollectionProcessing
1095     // allocates and triggers GC. The real limit is set at after
1096     // PostGarbageCollectionProcessing.
1097     old_generation_allocation_limit_ =
1098         OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1099     old_gen_exhausted_ = false;
1100   } else {
1101     tracer_ = tracer;
1102     Scavenge();
1103     tracer_ = NULL;
1104   }
1105
1106   UpdateSurvivalStatistics(start_new_space_size);
1107
1108   isolate_->counters()->objs_since_last_young()->Set(0);
1109
1110   // Callbacks that fire after this point might trigger nested GCs and
1111   // restart incremental marking, the assertion can't be moved down.
1112   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1113
1114   gc_post_processing_depth_++;
1115   { AllowHeapAllocation allow_allocation;
1116     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1117     freed_global_handles =
1118         isolate_->global_handles()->PostGarbageCollectionProcessing(
1119             collector, tracer);
1120   }
1121   gc_post_processing_depth_--;
1122
1123   isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1124
1125   // Update relocatables.
1126   Relocatable::PostGarbageCollectionProcessing(isolate_);
1127
1128   if (collector == MARK_COMPACTOR) {
1129     // Register the amount of external allocated memory.
1130     amount_of_external_allocated_memory_at_last_global_gc_ =
1131         amount_of_external_allocated_memory_;
1132     old_generation_allocation_limit_ =
1133         OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
1134                                      freed_global_handles);
1135   }
1136
1137   { GCCallbacksScope scope(this);
1138     if (scope.CheckReenter()) {
1139       AllowHeapAllocation allow_allocation;
1140       GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1141       VMState<EXTERNAL> state(isolate_);
1142       HandleScope handle_scope(isolate_);
1143       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1144     }
1145   }
1146
1147 #ifdef VERIFY_HEAP
1148   if (FLAG_verify_heap) {
1149     VerifyStringTable(this);
1150   }
1151 #endif
1152
1153   return freed_global_handles > 0;
1154 }
1155
1156
1157 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1158   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1159     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1160       if (!gc_prologue_callbacks_[i].pass_isolate_) {
1161         v8::GCPrologueCallback callback =
1162             reinterpret_cast<v8::GCPrologueCallback>(
1163                 gc_prologue_callbacks_[i].callback);
1164         callback(gc_type, flags);
1165       } else {
1166         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1167         gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1168       }
1169     }
1170   }
1171 }
1172
1173
1174 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1175                                    GCCallbackFlags gc_callback_flags) {
1176   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1177     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1178       if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1179         v8::GCPrologueCallback callback =
1180             reinterpret_cast<v8::GCPrologueCallback>(
1181                 gc_epilogue_callbacks_[i].callback);
1182         callback(gc_type, gc_callback_flags);
1183       } else {
1184         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1185         gc_epilogue_callbacks_[i].callback(
1186             isolate, gc_type, gc_callback_flags);
1187       }
1188     }
1189   }
1190 }
1191
1192
1193 void Heap::MarkCompact(GCTracer* tracer) {
1194   gc_state_ = MARK_COMPACT;
1195   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1196
1197   uint64_t size_of_objects_before_gc = SizeOfObjects();
1198
1199   mark_compact_collector_.Prepare(tracer);
1200
1201   ms_count_++;
1202   tracer->set_full_gc_count(ms_count_);
1203
1204   MarkCompactPrologue();
1205
1206   mark_compact_collector_.CollectGarbage();
1207
1208   LOG(isolate_, ResourceEvent("markcompact", "end"));
1209
1210   gc_state_ = NOT_IN_GC;
1211
1212   isolate_->counters()->objs_since_last_full()->Set(0);
1213
1214   flush_monomorphic_ics_ = false;
1215
1216   if (FLAG_allocation_site_pretenuring) {
1217     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1218   }
1219 }
1220
1221
1222 void Heap::MarkCompactPrologue() {
1223   // At any old GC clear the keyed lookup cache to enable collection of unused
1224   // maps.
1225   isolate_->keyed_lookup_cache()->Clear();
1226   isolate_->context_slot_cache()->Clear();
1227   isolate_->descriptor_lookup_cache()->Clear();
1228   RegExpResultsCache::Clear(string_split_cache());
1229   RegExpResultsCache::Clear(regexp_multiple_cache());
1230
1231   isolate_->compilation_cache()->MarkCompactPrologue();
1232
1233   CompletelyClearInstanceofCache();
1234
1235   FlushNumberStringCache();
1236   if (FLAG_cleanup_code_caches_at_gc) {
1237     polymorphic_code_cache()->set_cache(undefined_value());
1238   }
1239
1240   ClearNormalizedMapCaches();
1241 }
1242
1243
1244 // Helper class for copying HeapObjects
1245 class ScavengeVisitor: public ObjectVisitor {
1246  public:
1247   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1248
1249   void VisitPointer(Object** p) { ScavengePointer(p); }
1250
1251   void VisitPointers(Object** start, Object** end) {
1252     // Copy all HeapObject pointers in [start, end)
1253     for (Object** p = start; p < end; p++) ScavengePointer(p);
1254   }
1255
1256  private:
1257   void ScavengePointer(Object** p) {
1258     Object* object = *p;
1259     if (!heap_->InNewSpace(object)) return;
1260     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1261                          reinterpret_cast<HeapObject*>(object));
1262   }
1263
1264   Heap* heap_;
1265 };
1266
1267
1268 #ifdef VERIFY_HEAP
1269 // Visitor class to verify pointers in code or data space do not point into
1270 // new space.
1271 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1272  public:
1273   explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1274   void VisitPointers(Object** start, Object**end) {
1275     for (Object** current = start; current < end; current++) {
1276       if ((*current)->IsHeapObject()) {
1277         CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1278       }
1279     }
1280   }
1281
1282  private:
1283   Heap* heap_;
1284 };
1285
1286
1287 static void VerifyNonPointerSpacePointers(Heap* heap) {
1288   // Verify that there are no pointers to new space in spaces where we
1289   // do not expect them.
1290   VerifyNonPointerSpacePointersVisitor v(heap);
1291   HeapObjectIterator code_it(heap->code_space());
1292   for (HeapObject* object = code_it.Next();
1293        object != NULL; object = code_it.Next())
1294     object->Iterate(&v);
1295
1296   // The old data space was normally swept conservatively so that the iterator
1297   // doesn't work, so we normally skip the next bit.
1298   if (!heap->old_data_space()->was_swept_conservatively()) {
1299     HeapObjectIterator data_it(heap->old_data_space());
1300     for (HeapObject* object = data_it.Next();
1301          object != NULL; object = data_it.Next())
1302       object->Iterate(&v);
1303   }
1304 }
1305 #endif  // VERIFY_HEAP
1306
1307
1308 void Heap::CheckNewSpaceExpansionCriteria() {
1309   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1310       survived_since_last_expansion_ > new_space_.Capacity()) {
1311     // Grow the size of new space if there is room to grow, enough data
1312     // has survived scavenge since the last expansion and we are not in
1313     // high promotion mode.
1314     new_space_.Grow();
1315     survived_since_last_expansion_ = 0;
1316   }
1317 }
1318
1319
1320 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1321   return heap->InNewSpace(*p) &&
1322       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1323 }
1324
1325
1326 void Heap::ScavengeStoreBufferCallback(
1327     Heap* heap,
1328     MemoryChunk* page,
1329     StoreBufferEvent event) {
1330   heap->store_buffer_rebuilder_.Callback(page, event);
1331 }
1332
1333
1334 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1335   if (event == kStoreBufferStartScanningPagesEvent) {
1336     start_of_current_page_ = NULL;
1337     current_page_ = NULL;
1338   } else if (event == kStoreBufferScanningPageEvent) {
1339     if (current_page_ != NULL) {
1340       // If this page already overflowed the store buffer during this iteration.
1341       if (current_page_->scan_on_scavenge()) {
1342         // Then we should wipe out the entries that have been added for it.
1343         store_buffer_->SetTop(start_of_current_page_);
1344       } else if (store_buffer_->Top() - start_of_current_page_ >=
1345                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1346         // Did we find too many pointers in the previous page?  The heuristic is
1347         // that no page can take more then 1/5 the remaining slots in the store
1348         // buffer.
1349         current_page_->set_scan_on_scavenge(true);
1350         store_buffer_->SetTop(start_of_current_page_);
1351       } else {
1352         // In this case the page we scanned took a reasonable number of slots in
1353         // the store buffer.  It has now been rehabilitated and is no longer
1354         // marked scan_on_scavenge.
1355         ASSERT(!current_page_->scan_on_scavenge());
1356       }
1357     }
1358     start_of_current_page_ = store_buffer_->Top();
1359     current_page_ = page;
1360   } else if (event == kStoreBufferFullEvent) {
1361     // The current page overflowed the store buffer again.  Wipe out its entries
1362     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1363     // several times while scanning.
1364     if (current_page_ == NULL) {
1365       // Store Buffer overflowed while scanning promoted objects.  These are not
1366       // in any particular page, though they are likely to be clustered by the
1367       // allocation routines.
1368       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1369     } else {
1370       // Store Buffer overflowed while scanning a particular old space page for
1371       // pointers to new space.
1372       ASSERT(current_page_ == page);
1373       ASSERT(page != NULL);
1374       current_page_->set_scan_on_scavenge(true);
1375       ASSERT(start_of_current_page_ != store_buffer_->Top());
1376       store_buffer_->SetTop(start_of_current_page_);
1377     }
1378   } else {
1379     UNREACHABLE();
1380   }
1381 }
1382
1383
1384 void PromotionQueue::Initialize() {
1385   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1386   // entries (where each is a pair of intptr_t). This allows us to simplify
1387   // the test fpr when to switch pages.
1388   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1389          == 0);
1390   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1391   front_ = rear_ =
1392       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1393   emergency_stack_ = NULL;
1394   guard_ = false;
1395 }
1396
1397
1398 void PromotionQueue::RelocateQueueHead() {
1399   ASSERT(emergency_stack_ == NULL);
1400
1401   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1402   intptr_t* head_start = rear_;
1403   intptr_t* head_end =
1404       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1405
1406   int entries_count =
1407       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1408
1409   emergency_stack_ = new List<Entry>(2 * entries_count);
1410
1411   while (head_start != head_end) {
1412     int size = static_cast<int>(*(head_start++));
1413     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1414     emergency_stack_->Add(Entry(obj, size));
1415   }
1416   rear_ = head_end;
1417 }
1418
1419
1420 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1421  public:
1422   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1423
1424   virtual Object* RetainAs(Object* object) {
1425     if (!heap_->InFromSpace(object)) {
1426       return object;
1427     }
1428
1429     MapWord map_word = HeapObject::cast(object)->map_word();
1430     if (map_word.IsForwardingAddress()) {
1431       return map_word.ToForwardingAddress();
1432     }
1433     return NULL;
1434   }
1435
1436  private:
1437   Heap* heap_;
1438 };
1439
1440
1441 void Heap::Scavenge() {
1442   RelocationLock relocation_lock(this);
1443
1444 #ifdef VERIFY_HEAP
1445   if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1446 #endif
1447
1448   gc_state_ = SCAVENGE;
1449
1450   // Implements Cheney's copying algorithm
1451   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1452
1453   // Clear descriptor cache.
1454   isolate_->descriptor_lookup_cache()->Clear();
1455
1456   // Used for updating survived_since_last_expansion_ at function end.
1457   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1458
1459   SelectScavengingVisitorsTable();
1460
1461   incremental_marking()->PrepareForScavenge();
1462
1463   // Flip the semispaces.  After flipping, to space is empty, from space has
1464   // live objects.
1465   new_space_.Flip();
1466   new_space_.ResetAllocationInfo();
1467
1468   // We need to sweep newly copied objects which can be either in the
1469   // to space or promoted to the old generation.  For to-space
1470   // objects, we treat the bottom of the to space as a queue.  Newly
1471   // copied and unswept objects lie between a 'front' mark and the
1472   // allocation pointer.
1473   //
1474   // Promoted objects can go into various old-generation spaces, and
1475   // can be allocated internally in the spaces (from the free list).
1476   // We treat the top of the to space as a queue of addresses of
1477   // promoted objects.  The addresses of newly promoted and unswept
1478   // objects lie between a 'front' mark and a 'rear' mark that is
1479   // updated as a side effect of promoting an object.
1480   //
1481   // There is guaranteed to be enough room at the top of the to space
1482   // for the addresses of promoted objects: every object promoted
1483   // frees up its size in bytes from the top of the new space, and
1484   // objects are at least one pointer in size.
1485   Address new_space_front = new_space_.ToSpaceStart();
1486   promotion_queue_.Initialize();
1487
1488 #ifdef DEBUG
1489   store_buffer()->Clean();
1490 #endif
1491
1492   ScavengeVisitor scavenge_visitor(this);
1493   // Copy roots.
1494   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1495
1496   // Copy objects reachable from the old generation.
1497   {
1498     StoreBufferRebuildScope scope(this,
1499                                   store_buffer(),
1500                                   &ScavengeStoreBufferCallback);
1501     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1502   }
1503
1504   // Copy objects reachable from simple cells by scavenging cell values
1505   // directly.
1506   HeapObjectIterator cell_iterator(cell_space_);
1507   for (HeapObject* heap_object = cell_iterator.Next();
1508        heap_object != NULL;
1509        heap_object = cell_iterator.Next()) {
1510     if (heap_object->IsCell()) {
1511       Cell* cell = Cell::cast(heap_object);
1512       Address value_address = cell->ValueAddress();
1513       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1514     }
1515   }
1516
1517   // Copy objects reachable from global property cells by scavenging global
1518   // property cell values directly.
1519   HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1520   for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1521        heap_object != NULL;
1522        heap_object = js_global_property_cell_iterator.Next()) {
1523     if (heap_object->IsPropertyCell()) {
1524       PropertyCell* cell = PropertyCell::cast(heap_object);
1525       Address value_address = cell->ValueAddress();
1526       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1527       Address type_address = cell->TypeAddress();
1528       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1529     }
1530   }
1531
1532   // Copy objects reachable from the encountered weak collections list.
1533   scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1534
1535   // Copy objects reachable from the code flushing candidates list.
1536   MarkCompactCollector* collector = mark_compact_collector();
1537   if (collector->is_code_flushing_enabled()) {
1538     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1539   }
1540
1541   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1542
1543   while (isolate()->global_handles()->IterateObjectGroups(
1544       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1545     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1546   }
1547   isolate()->global_handles()->RemoveObjectGroups();
1548   isolate()->global_handles()->RemoveImplicitRefGroups();
1549
1550   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1551       &IsUnscavengedHeapObject);
1552   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1553       &scavenge_visitor);
1554   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1555
1556   UpdateNewSpaceReferencesInExternalStringTable(
1557       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1558
1559   promotion_queue_.Destroy();
1560
1561   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1562
1563   ScavengeWeakObjectRetainer weak_object_retainer(this);
1564   ProcessWeakReferences(&weak_object_retainer);
1565
1566   ASSERT(new_space_front == new_space_.top());
1567
1568   // Set age mark.
1569   new_space_.set_age_mark(new_space_.top());
1570
1571   new_space_.LowerInlineAllocationLimit(
1572       new_space_.inline_allocation_limit_step());
1573
1574   // Update how much has survived scavenge.
1575   IncrementYoungSurvivorsCounter(static_cast<int>(
1576       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1577
1578   LOG(isolate_, ResourceEvent("scavenge", "end"));
1579
1580   gc_state_ = NOT_IN_GC;
1581
1582   scavenges_since_last_idle_round_++;
1583 }
1584
1585
1586 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1587                                                                 Object** p) {
1588   MapWord first_word = HeapObject::cast(*p)->map_word();
1589
1590   if (!first_word.IsForwardingAddress()) {
1591     // Unreachable external string can be finalized.
1592     heap->FinalizeExternalString(String::cast(*p));
1593     return NULL;
1594   }
1595
1596   // String is still reachable.
1597   return String::cast(first_word.ToForwardingAddress());
1598 }
1599
1600
1601 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1602     ExternalStringTableUpdaterCallback updater_func) {
1603 #ifdef VERIFY_HEAP
1604   if (FLAG_verify_heap) {
1605     external_string_table_.Verify();
1606   }
1607 #endif
1608
1609   if (external_string_table_.new_space_strings_.is_empty()) return;
1610
1611   Object** start = &external_string_table_.new_space_strings_[0];
1612   Object** end = start + external_string_table_.new_space_strings_.length();
1613   Object** last = start;
1614
1615   for (Object** p = start; p < end; ++p) {
1616     ASSERT(InFromSpace(*p));
1617     String* target = updater_func(this, p);
1618
1619     if (target == NULL) continue;
1620
1621     ASSERT(target->IsExternalString());
1622
1623     if (InNewSpace(target)) {
1624       // String is still in new space.  Update the table entry.
1625       *last = target;
1626       ++last;
1627     } else {
1628       // String got promoted.  Move it to the old string list.
1629       external_string_table_.AddOldString(target);
1630     }
1631   }
1632
1633   ASSERT(last <= end);
1634   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1635 }
1636
1637
1638 void Heap::UpdateReferencesInExternalStringTable(
1639     ExternalStringTableUpdaterCallback updater_func) {
1640
1641   // Update old space string references.
1642   if (external_string_table_.old_space_strings_.length() > 0) {
1643     Object** start = &external_string_table_.old_space_strings_[0];
1644     Object** end = start + external_string_table_.old_space_strings_.length();
1645     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1646   }
1647
1648   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1649 }
1650
1651
1652 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1653   ProcessArrayBuffers(retainer);
1654   ProcessNativeContexts(retainer);
1655   // TODO(mvstanton): AllocationSites only need to be processed during
1656   // MARK_COMPACT, as they live in old space. Verify and address.
1657   ProcessAllocationSites(retainer);
1658 }
1659
1660
1661 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1662   Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1663   // Update the head of the list of contexts.
1664   set_native_contexts_list(head);
1665 }
1666
1667
1668 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1669   Object* array_buffer_obj =
1670       VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1671   set_array_buffers_list(array_buffer_obj);
1672 }
1673
1674
1675 void Heap::TearDownArrayBuffers() {
1676   Object* undefined = undefined_value();
1677   for (Object* o = array_buffers_list(); o != undefined;) {
1678     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1679     Runtime::FreeArrayBuffer(isolate(), buffer);
1680     o = buffer->weak_next();
1681   }
1682   set_array_buffers_list(undefined);
1683 }
1684
1685
1686 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1687   Object* allocation_site_obj =
1688       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1689   set_allocation_sites_list(allocation_site_obj);
1690 }
1691
1692
1693 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1694   DisallowHeapAllocation no_allocation_scope;
1695   Object* cur = allocation_sites_list();
1696   bool marked = false;
1697   while (cur->IsAllocationSite()) {
1698     AllocationSite* casted = AllocationSite::cast(cur);
1699     if (casted->GetPretenureMode() == flag) {
1700       casted->ResetPretenureDecision();
1701       casted->set_deopt_dependent_code(true);
1702       marked = true;
1703     }
1704     cur = casted->weak_next();
1705   }
1706   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1707 }
1708
1709
1710 void Heap::EvaluateOldSpaceLocalPretenuring(
1711     uint64_t size_of_objects_before_gc) {
1712   uint64_t size_of_objects_after_gc = SizeOfObjects();
1713   double old_generation_survival_rate =
1714       (static_cast<double>(size_of_objects_after_gc) * 100) /
1715           static_cast<double>(size_of_objects_before_gc);
1716
1717   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1718     // Too many objects died in the old generation, pretenuring of wrong
1719     // allocation sites may be the cause for that. We have to deopt all
1720     // dependent code registered in the allocation sites to re-evaluate
1721     // our pretenuring decisions.
1722     ResetAllAllocationSitesDependentCode(TENURED);
1723     if (FLAG_trace_pretenuring) {
1724       PrintF("Deopt all allocation sites dependent code due to low survival "
1725              "rate in the old generation %f\n", old_generation_survival_rate);
1726     }
1727   }
1728 }
1729
1730
1731 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1732   DisallowHeapAllocation no_allocation;
1733   // All external strings are listed in the external string table.
1734
1735   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1736    public:
1737     explicit ExternalStringTableVisitorAdapter(
1738         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1739     virtual void VisitPointers(Object** start, Object** end) {
1740       for (Object** p = start; p < end; p++) {
1741         ASSERT((*p)->IsExternalString());
1742         visitor_->VisitExternalString(Utils::ToLocal(
1743             Handle<String>(String::cast(*p))));
1744       }
1745     }
1746    private:
1747     v8::ExternalResourceVisitor* visitor_;
1748   } external_string_table_visitor(visitor);
1749
1750   external_string_table_.Iterate(&external_string_table_visitor);
1751 }
1752
1753
1754 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1755  public:
1756   static inline void VisitPointer(Heap* heap, Object** p) {
1757     Object* object = *p;
1758     if (!heap->InNewSpace(object)) return;
1759     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1760                          reinterpret_cast<HeapObject*>(object));
1761   }
1762 };
1763
1764
1765 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1766                          Address new_space_front) {
1767   do {
1768     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1769     // The addresses new_space_front and new_space_.top() define a
1770     // queue of unprocessed copied objects.  Process them until the
1771     // queue is empty.
1772     while (new_space_front != new_space_.top()) {
1773       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1774         HeapObject* object = HeapObject::FromAddress(new_space_front);
1775         new_space_front +=
1776           NewSpaceScavenger::IterateBody(object->map(), object);
1777       } else {
1778         new_space_front =
1779             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1780       }
1781     }
1782
1783     // Promote and process all the to-be-promoted objects.
1784     {
1785       StoreBufferRebuildScope scope(this,
1786                                     store_buffer(),
1787                                     &ScavengeStoreBufferCallback);
1788       while (!promotion_queue()->is_empty()) {
1789         HeapObject* target;
1790         int size;
1791         promotion_queue()->remove(&target, &size);
1792
1793         // Promoted object might be already partially visited
1794         // during old space pointer iteration. Thus we search specificly
1795         // for pointers to from semispace instead of looking for pointers
1796         // to new space.
1797         ASSERT(!target->IsMap());
1798         IterateAndMarkPointersToFromSpace(target->address(),
1799                                           target->address() + size,
1800                                           &ScavengeObject);
1801       }
1802     }
1803
1804     // Take another spin if there are now unswept objects in new space
1805     // (there are currently no more unswept promoted objects).
1806   } while (new_space_front != new_space_.top());
1807
1808   return new_space_front;
1809 }
1810
1811
1812 STATIC_ASSERT((FixedDoubleArray::kHeaderSize &
1813                kDoubleAlignmentMask) == 0);  // NOLINT
1814 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset &
1815                kDoubleAlignmentMask) == 0);  // NOLINT
1816 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1817                kDoubleAlignmentMask) == 0);  // NOLINT
1818
1819
1820 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1821                                               HeapObject* object,
1822                                               int size));
1823
1824 static HeapObject* EnsureDoubleAligned(Heap* heap,
1825                                        HeapObject* object,
1826                                        int size) {
1827   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1828     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1829     return HeapObject::FromAddress(object->address() + kPointerSize);
1830   } else {
1831     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1832                                kPointerSize);
1833     return object;
1834   }
1835 }
1836
1837
1838 enum LoggingAndProfiling {
1839   LOGGING_AND_PROFILING_ENABLED,
1840   LOGGING_AND_PROFILING_DISABLED
1841 };
1842
1843
1844 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1845
1846
1847 template<MarksHandling marks_handling,
1848          LoggingAndProfiling logging_and_profiling_mode>
1849 class ScavengingVisitor : public StaticVisitorBase {
1850  public:
1851   static void Initialize() {
1852     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1853     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1854     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1855     table_.Register(kVisitByteArray, &EvacuateByteArray);
1856     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1857     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1858     table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1859     table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1860
1861     table_.Register(kVisitNativeContext,
1862                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1863                         template VisitSpecialized<Context::kSize>);
1864
1865     table_.Register(kVisitConsString,
1866                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1867                         template VisitSpecialized<ConsString::kSize>);
1868
1869     table_.Register(kVisitSlicedString,
1870                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1871                         template VisitSpecialized<SlicedString::kSize>);
1872
1873     table_.Register(kVisitSymbol,
1874                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1875                         template VisitSpecialized<Symbol::kSize>);
1876
1877     table_.Register(kVisitSharedFunctionInfo,
1878                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1879                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1880
1881     table_.Register(kVisitJSWeakCollection,
1882                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1883                     Visit);
1884
1885     table_.Register(kVisitJSArrayBuffer,
1886                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1887                     Visit);
1888
1889     table_.Register(kVisitJSTypedArray,
1890                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1891                     Visit);
1892
1893     table_.Register(kVisitJSDataView,
1894                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1895                     Visit);
1896
1897     table_.Register(kVisitJSRegExp,
1898                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1899                     Visit);
1900
1901     if (marks_handling == IGNORE_MARKS) {
1902       table_.Register(kVisitJSFunction,
1903                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1904                           template VisitSpecialized<JSFunction::kSize>);
1905     } else {
1906       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1907     }
1908
1909     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1910                                    kVisitDataObject,
1911                                    kVisitDataObjectGeneric>();
1912
1913     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1914                                    kVisitJSObject,
1915                                    kVisitJSObjectGeneric>();
1916
1917     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1918                                    kVisitStruct,
1919                                    kVisitStructGeneric>();
1920   }
1921
1922   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1923     return &table_;
1924   }
1925
1926  private:
1927   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
1928
1929   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1930     bool should_record = false;
1931 #ifdef DEBUG
1932     should_record = FLAG_heap_stats;
1933 #endif
1934     should_record = should_record || FLAG_log_gc;
1935     if (should_record) {
1936       if (heap->new_space()->Contains(obj)) {
1937         heap->new_space()->RecordAllocation(obj);
1938       } else {
1939         heap->new_space()->RecordPromotion(obj);
1940       }
1941     }
1942   }
1943
1944   // Helper function used by CopyObject to copy a source object to an
1945   // allocated target object and update the forwarding pointer in the source
1946   // object.  Returns the target object.
1947   INLINE(static void MigrateObject(Heap* heap,
1948                                    HeapObject* source,
1949                                    HeapObject* target,
1950                                    int size)) {
1951     // Copy the content of source to target.
1952     heap->CopyBlock(target->address(), source->address(), size);
1953
1954     // Set the forwarding address.
1955     source->set_map_word(MapWord::FromForwardingAddress(target));
1956
1957     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1958       // Update NewSpace stats if necessary.
1959       RecordCopiedObject(heap, target);
1960       Isolate* isolate = heap->isolate();
1961       HeapProfiler* heap_profiler = isolate->heap_profiler();
1962       if (heap_profiler->is_tracking_object_moves()) {
1963         heap_profiler->ObjectMoveEvent(source->address(), target->address(),
1964                                        size);
1965       }
1966       if (isolate->logger()->is_logging_code_events() ||
1967           isolate->cpu_profiler()->is_profiling()) {
1968         if (target->IsSharedFunctionInfo()) {
1969           PROFILE(isolate, SharedFunctionInfoMoveEvent(
1970               source->address(), target->address()));
1971         }
1972       }
1973     }
1974
1975     if (marks_handling == TRANSFER_MARKS) {
1976       if (Marking::TransferColor(source, target)) {
1977         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1978       }
1979     }
1980   }
1981
1982
1983   template<ObjectContents object_contents, int alignment>
1984   static inline void EvacuateObject(Map* map,
1985                                     HeapObject** slot,
1986                                     HeapObject* object,
1987                                     int object_size) {
1988     SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
1989     SLOW_ASSERT(object->Size() == object_size);
1990
1991     int allocation_size = object_size;
1992     if (alignment != kObjectAlignment) {
1993       ASSERT(alignment == kDoubleAlignment);
1994       allocation_size += kPointerSize;
1995     }
1996
1997     Heap* heap = map->GetHeap();
1998     if (heap->ShouldBePromoted(object->address(), object_size)) {
1999       AllocationResult allocation;
2000
2001       if (object_contents == DATA_OBJECT) {
2002         ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2003         allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2004       } else {
2005         ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2006         allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2007       }
2008
2009       HeapObject* target = NULL;  // Initialization to please compiler.
2010       if (allocation.To(&target)) {
2011         if (alignment != kObjectAlignment) {
2012           target = EnsureDoubleAligned(heap, target, allocation_size);
2013         }
2014
2015         // Order is important: slot might be inside of the target if target
2016         // was allocated over a dead object and slot comes from the store
2017         // buffer.
2018         *slot = target;
2019         MigrateObject(heap, object, target, object_size);
2020
2021         if (object_contents == POINTER_OBJECT) {
2022           if (map->instance_type() == JS_FUNCTION_TYPE) {
2023             heap->promotion_queue()->insert(
2024                 target, JSFunction::kNonWeakFieldsEndOffset);
2025           } else {
2026             heap->promotion_queue()->insert(target, object_size);
2027           }
2028         }
2029
2030         heap->IncrementPromotedObjectsSize(object_size);
2031         return;
2032       }
2033     }
2034     ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
2035     AllocationResult allocation =
2036         heap->new_space()->AllocateRaw(allocation_size);
2037     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2038     HeapObject* target = HeapObject::cast(allocation.ToObjectChecked());
2039
2040     if (alignment != kObjectAlignment) {
2041       target = EnsureDoubleAligned(heap, target, allocation_size);
2042     }
2043
2044     // Order is important: slot might be inside of the target if target
2045     // was allocated over a dead object and slot comes from the store
2046     // buffer.
2047     *slot = target;
2048     MigrateObject(heap, object, target, object_size);
2049     heap->IncrementSemiSpaceCopiedObjectSize(object_size);
2050     return;
2051   }
2052
2053
2054   static inline void EvacuateJSFunction(Map* map,
2055                                         HeapObject** slot,
2056                                         HeapObject* object) {
2057     ObjectEvacuationStrategy<POINTER_OBJECT>::
2058         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2059
2060     HeapObject* target = *slot;
2061     MarkBit mark_bit = Marking::MarkBitFrom(target);
2062     if (Marking::IsBlack(mark_bit)) {
2063       // This object is black and it might not be rescanned by marker.
2064       // We should explicitly record code entry slot for compaction because
2065       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2066       // miss it as it is not HeapObject-tagged.
2067       Address code_entry_slot =
2068           target->address() + JSFunction::kCodeEntryOffset;
2069       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2070       map->GetHeap()->mark_compact_collector()->
2071           RecordCodeEntrySlot(code_entry_slot, code);
2072     }
2073   }
2074
2075
2076   static inline void EvacuateFixedArray(Map* map,
2077                                         HeapObject** slot,
2078                                         HeapObject* object) {
2079     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2080     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2081         map, slot, object, object_size);
2082   }
2083
2084
2085   static inline void EvacuateFixedDoubleArray(Map* map,
2086                                               HeapObject** slot,
2087                                               HeapObject* object) {
2088     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2089     int object_size = FixedDoubleArray::SizeFor(length);
2090     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2091         map, slot, object, object_size);
2092   }
2093
2094
2095   static inline void EvacuateFixedTypedArray(Map* map,
2096                                              HeapObject** slot,
2097                                              HeapObject* object) {
2098     int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2099     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2100         map, slot, object, object_size);
2101   }
2102
2103
2104   static inline void EvacuateFixedFloat64Array(Map* map,
2105                                                HeapObject** slot,
2106                                                HeapObject* object) {
2107     int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2108     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2109         map, slot, object, object_size);
2110   }
2111
2112
2113   static inline void EvacuateByteArray(Map* map,
2114                                        HeapObject** slot,
2115                                        HeapObject* object) {
2116     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2117     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2118         map, slot, object, object_size);
2119   }
2120
2121
2122   static inline void EvacuateSeqOneByteString(Map* map,
2123                                             HeapObject** slot,
2124                                             HeapObject* object) {
2125     int object_size = SeqOneByteString::cast(object)->
2126         SeqOneByteStringSize(map->instance_type());
2127     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2128         map, slot, object, object_size);
2129   }
2130
2131
2132   static inline void EvacuateSeqTwoByteString(Map* map,
2133                                               HeapObject** slot,
2134                                               HeapObject* object) {
2135     int object_size = SeqTwoByteString::cast(object)->
2136         SeqTwoByteStringSize(map->instance_type());
2137     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2138         map, slot, object, object_size);
2139   }
2140
2141
2142   static inline bool IsShortcutCandidate(int type) {
2143     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2144   }
2145
2146   static inline void EvacuateShortcutCandidate(Map* map,
2147                                                HeapObject** slot,
2148                                                HeapObject* object) {
2149     ASSERT(IsShortcutCandidate(map->instance_type()));
2150
2151     Heap* heap = map->GetHeap();
2152
2153     if (marks_handling == IGNORE_MARKS &&
2154         ConsString::cast(object)->unchecked_second() ==
2155         heap->empty_string()) {
2156       HeapObject* first =
2157           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2158
2159       *slot = first;
2160
2161       if (!heap->InNewSpace(first)) {
2162         object->set_map_word(MapWord::FromForwardingAddress(first));
2163         return;
2164       }
2165
2166       MapWord first_word = first->map_word();
2167       if (first_word.IsForwardingAddress()) {
2168         HeapObject* target = first_word.ToForwardingAddress();
2169
2170         *slot = target;
2171         object->set_map_word(MapWord::FromForwardingAddress(target));
2172         return;
2173       }
2174
2175       heap->DoScavengeObject(first->map(), slot, first);
2176       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2177       return;
2178     }
2179
2180     int object_size = ConsString::kSize;
2181     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2182         map, slot, object, object_size);
2183   }
2184
2185   template<ObjectContents object_contents>
2186   class ObjectEvacuationStrategy {
2187    public:
2188     template<int object_size>
2189     static inline void VisitSpecialized(Map* map,
2190                                         HeapObject** slot,
2191                                         HeapObject* object) {
2192       EvacuateObject<object_contents, kObjectAlignment>(
2193           map, slot, object, object_size);
2194     }
2195
2196     static inline void Visit(Map* map,
2197                              HeapObject** slot,
2198                              HeapObject* object) {
2199       int object_size = map->instance_size();
2200       EvacuateObject<object_contents, kObjectAlignment>(
2201           map, slot, object, object_size);
2202     }
2203   };
2204
2205   static VisitorDispatchTable<ScavengingCallback> table_;
2206 };
2207
2208
2209 template<MarksHandling marks_handling,
2210          LoggingAndProfiling logging_and_profiling_mode>
2211 VisitorDispatchTable<ScavengingCallback>
2212     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2213
2214
2215 static void InitializeScavengingVisitorsTables() {
2216   ScavengingVisitor<TRANSFER_MARKS,
2217                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2218   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2219   ScavengingVisitor<TRANSFER_MARKS,
2220                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2221   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2222 }
2223
2224
2225 void Heap::SelectScavengingVisitorsTable() {
2226   bool logging_and_profiling =
2227       isolate()->logger()->is_logging() ||
2228       isolate()->cpu_profiler()->is_profiling() ||
2229       (isolate()->heap_profiler() != NULL &&
2230        isolate()->heap_profiler()->is_tracking_object_moves());
2231
2232   if (!incremental_marking()->IsMarking()) {
2233     if (!logging_and_profiling) {
2234       scavenging_visitors_table_.CopyFrom(
2235           ScavengingVisitor<IGNORE_MARKS,
2236                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2237     } else {
2238       scavenging_visitors_table_.CopyFrom(
2239           ScavengingVisitor<IGNORE_MARKS,
2240                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2241     }
2242   } else {
2243     if (!logging_and_profiling) {
2244       scavenging_visitors_table_.CopyFrom(
2245           ScavengingVisitor<TRANSFER_MARKS,
2246                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2247     } else {
2248       scavenging_visitors_table_.CopyFrom(
2249           ScavengingVisitor<TRANSFER_MARKS,
2250                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2251     }
2252
2253     if (incremental_marking()->IsCompacting()) {
2254       // When compacting forbid short-circuiting of cons-strings.
2255       // Scavenging code relies on the fact that new space object
2256       // can't be evacuated into evacuation candidate but
2257       // short-circuiting violates this assumption.
2258       scavenging_visitors_table_.Register(
2259           StaticVisitorBase::kVisitShortcutCandidate,
2260           scavenging_visitors_table_.GetVisitorById(
2261               StaticVisitorBase::kVisitConsString));
2262     }
2263   }
2264 }
2265
2266
2267 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2268   SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
2269   MapWord first_word = object->map_word();
2270   SLOW_ASSERT(!first_word.IsForwardingAddress());
2271   Map* map = first_word.ToMap();
2272   map->GetHeap()->DoScavengeObject(map, p, object);
2273 }
2274
2275
2276 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2277                                           int instance_size) {
2278   Object* result;
2279   AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2280   if (!allocation.To(&result)) return allocation;
2281
2282   // Map::cast cannot be used due to uninitialized map field.
2283   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2284   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2285   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2286   reinterpret_cast<Map*>(result)->set_visitor_id(
2287         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2288   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2289   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2290   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2291   reinterpret_cast<Map*>(result)->set_bit_field(0);
2292   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2293   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2294                    Map::OwnsDescriptors::encode(true);
2295   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2296   return result;
2297 }
2298
2299
2300 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2301                                    int instance_size,
2302                                    ElementsKind elements_kind) {
2303   HeapObject* result;
2304   AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2305   if (!allocation.To(&result)) return allocation;
2306
2307   result->set_map_no_write_barrier(meta_map());
2308   Map* map = Map::cast(result);
2309   map->set_instance_type(instance_type);
2310   map->set_visitor_id(
2311       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2312   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2313   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2314   map->set_instance_size(instance_size);
2315   map->set_inobject_properties(0);
2316   map->set_pre_allocated_property_fields(0);
2317   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2318   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2319                           SKIP_WRITE_BARRIER);
2320   map->init_back_pointer(undefined_value());
2321   map->set_unused_property_fields(0);
2322   map->set_instance_descriptors(empty_descriptor_array());
2323   map->set_bit_field(0);
2324   map->set_bit_field2(1 << Map::kIsExtensible);
2325   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2326                    Map::OwnsDescriptors::encode(true);
2327   map->set_bit_field3(bit_field3);
2328   map->set_elements_kind(elements_kind);
2329
2330   return map;
2331 }
2332
2333
2334 AllocationResult Heap::AllocateFillerObject(int size,
2335                                             bool double_align,
2336                                             AllocationSpace space) {
2337   HeapObject* obj;
2338   { AllocationResult allocation = AllocateRaw(size, space, space);
2339     if (!allocation.To(&obj)) return allocation;
2340   }
2341 #ifdef DEBUG
2342   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2343   ASSERT(chunk->owner()->identity() == space);
2344 #endif
2345   CreateFillerObjectAt(obj->address(), size);
2346   return obj;
2347 }
2348
2349
2350 const Heap::StringTypeTable Heap::string_type_table[] = {
2351 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2352   {type, size, k##camel_name##MapRootIndex},
2353   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2354 #undef STRING_TYPE_ELEMENT
2355 };
2356
2357
2358 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2359 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2360   {contents, k##name##RootIndex},
2361   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2362 #undef CONSTANT_STRING_ELEMENT
2363 };
2364
2365
2366 const Heap::StructTable Heap::struct_table[] = {
2367 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2368   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2369   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2370 #undef STRUCT_TABLE_ELEMENT
2371 };
2372
2373
2374 bool Heap::CreateInitialMaps() {
2375   HeapObject* obj;
2376   { AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2377     if (!allocation.To(&obj)) return false;
2378   }
2379   // Map::cast cannot be used due to uninitialized map field.
2380   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2381   set_meta_map(new_meta_map);
2382   new_meta_map->set_map(new_meta_map);
2383
2384   {  // Partial map allocation
2385 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)                  \
2386     { Map* map;                                                                \
2387       if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2388       set_##field_name##_map(map);                                             \
2389     }
2390
2391     ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2392     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2393     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2394     ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2395                          constant_pool_array);
2396
2397 #undef ALLOCATE_PARTIAL_MAP
2398   }
2399
2400   // Allocate the empty array.
2401   { AllocationResult allocation = AllocateEmptyFixedArray();
2402     if (!allocation.To(&obj)) return false;
2403   }
2404   set_empty_fixed_array(FixedArray::cast(obj));
2405
2406   { AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2407     if (!allocation.To(&obj)) return false;
2408   }
2409   set_null_value(Oddball::cast(obj));
2410   Oddball::cast(obj)->set_kind(Oddball::kNull);
2411
2412   { AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2413     if (!allocation.To(&obj)) return false;
2414   }
2415   set_undefined_value(Oddball::cast(obj));
2416   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2417   ASSERT(!InNewSpace(undefined_value()));
2418
2419   // Set preliminary exception sentinel value before actually initializing it.
2420   set_exception(null_value());
2421
2422   // Allocate the empty descriptor array.
2423   { AllocationResult allocation = AllocateEmptyFixedArray();
2424     if (!allocation.To(&obj)) return false;
2425   }
2426   set_empty_descriptor_array(DescriptorArray::cast(obj));
2427
2428   // Allocate the constant pool array.
2429   { AllocationResult allocation = AllocateEmptyConstantPoolArray();
2430     if (!allocation.To(&obj)) return false;
2431   }
2432   set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2433
2434   // Fix the instance_descriptors for the existing maps.
2435   meta_map()->set_code_cache(empty_fixed_array());
2436   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2437   meta_map()->init_back_pointer(undefined_value());
2438   meta_map()->set_instance_descriptors(empty_descriptor_array());
2439
2440   fixed_array_map()->set_code_cache(empty_fixed_array());
2441   fixed_array_map()->set_dependent_code(
2442       DependentCode::cast(empty_fixed_array()));
2443   fixed_array_map()->init_back_pointer(undefined_value());
2444   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2445
2446   undefined_map()->set_code_cache(empty_fixed_array());
2447   undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2448   undefined_map()->init_back_pointer(undefined_value());
2449   undefined_map()->set_instance_descriptors(empty_descriptor_array());
2450
2451   null_map()->set_code_cache(empty_fixed_array());
2452   null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2453   null_map()->init_back_pointer(undefined_value());
2454   null_map()->set_instance_descriptors(empty_descriptor_array());
2455
2456   constant_pool_array_map()->set_code_cache(empty_fixed_array());
2457   constant_pool_array_map()->set_dependent_code(
2458       DependentCode::cast(empty_fixed_array()));
2459   constant_pool_array_map()->init_back_pointer(undefined_value());
2460   constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2461
2462   // Fix prototype object for existing maps.
2463   meta_map()->set_prototype(null_value());
2464   meta_map()->set_constructor(null_value());
2465
2466   fixed_array_map()->set_prototype(null_value());
2467   fixed_array_map()->set_constructor(null_value());
2468
2469   undefined_map()->set_prototype(null_value());
2470   undefined_map()->set_constructor(null_value());
2471
2472   null_map()->set_prototype(null_value());
2473   null_map()->set_constructor(null_value());
2474
2475   constant_pool_array_map()->set_prototype(null_value());
2476   constant_pool_array_map()->set_constructor(null_value());
2477
2478   {  // Map allocation
2479 #define ALLOCATE_MAP(instance_type, size, field_name)                          \
2480     { Map* map;                                                                \
2481       if (!AllocateMap((instance_type), size).To(&map)) return false;          \
2482       set_##field_name##_map(map);                                             \
2483     }
2484
2485 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name)                        \
2486     ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2487
2488     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2489     ASSERT(fixed_array_map() != fixed_cow_array_map());
2490
2491     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2492     ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2493     ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2494     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2495
2496     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2497     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2498     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2499     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2500     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2501     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2502     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2503
2504     for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2505       const StringTypeTable& entry = string_type_table[i];
2506       { AllocationResult allocation = AllocateMap(entry.type, entry.size);
2507         if (!allocation.To(&obj)) return false;
2508       }
2509       // Mark cons string maps as unstable, because their objects can change
2510       // maps during GC.
2511       Map* map = Map::cast(obj);
2512       if (StringShape(entry.type).IsCons()) map->mark_unstable();
2513       roots_[entry.index] = map;
2514     }
2515
2516     ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2517     undetectable_string_map()->set_is_undetectable();
2518
2519     ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
2520     undetectable_ascii_string_map()->set_is_undetectable();
2521
2522     ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2523     ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2524     ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2525
2526 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)            \
2527     ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize,   \
2528         external_##type##_array)
2529
2530      TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2531 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2532
2533 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size)         \
2534     ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE,                           \
2535         fixed_##type##_array)
2536
2537      TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2538 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2539
2540     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2541
2542     ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2543
2544     ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2545     ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2546     ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2547     ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2548
2549
2550     for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2551       const StructTable& entry = struct_table[i];
2552       Map* map;
2553       if (!AllocateMap(entry.type, entry.size).To(&map))
2554         return false;
2555       roots_[entry.index] = map;
2556     }
2557
2558     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2559     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2560
2561     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2562     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2563     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2564     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2565     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2566     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2567
2568     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2569     native_context_map()->set_dictionary_map(true);
2570     native_context_map()->set_visitor_id(
2571         StaticVisitorBase::kVisitNativeContext);
2572
2573     ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2574         shared_function_info)
2575
2576     ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
2577         message_object)
2578     ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
2579         external)
2580     external_map()->set_is_extensible(false);
2581 #undef ALLOCATE_VARSIZE_MAP
2582 #undef ALLOCATE_MAP
2583   }
2584
2585   { // Empty arrays
2586     { ByteArray* byte_array;
2587       if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2588       set_empty_byte_array(byte_array);
2589     }
2590
2591 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)           \
2592     { ExternalArray* obj;                                                      \
2593       if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj))        \
2594           return false;                                                        \
2595       set_empty_external_##type##_array(obj);                                  \
2596     }
2597
2598     TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2599 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2600
2601 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size)        \
2602     { FixedTypedArrayBase* obj;                                                \
2603       if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj))      \
2604           return false;                                                        \
2605       set_empty_fixed_##type##_array(obj);                                     \
2606     }
2607
2608     TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2609 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2610   }
2611   ASSERT(!InNewSpace(empty_fixed_array()));
2612   return true;
2613 }
2614
2615
2616 AllocationResult Heap::AllocateHeapNumber(double value,
2617                                           PretenureFlag pretenure) {
2618   // Statically ensure that it is safe to allocate heap numbers in paged
2619   // spaces.
2620   int size = HeapNumber::kSize;
2621   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2622
2623   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2624
2625   HeapObject* result;
2626   { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2627     if (!allocation.To(&result)) return allocation;
2628   }
2629
2630   result->set_map_no_write_barrier(heap_number_map());
2631   HeapNumber::cast(result)->set_value(value);
2632   return result;
2633 }
2634
2635
2636 #define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
2637   V(Float32x4, float32x4)                  \
2638   V(Float64x2, float64x2)                  \
2639   V(Int32x4, int32x4)
2640
2641
2642 #define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type)               \
2643 AllocationResult Heap::Allocate##TYPE(type##_value_t value,           \
2644                                       PretenureFlag pretenure) {      \
2645   STATIC_ASSERT(TYPE::kSize <= Page::kMaxRegularHeapObjectSize);      \
2646                                                                       \
2647   AllocationSpace space =                                             \
2648       SelectSpace(TYPE::kSize, OLD_DATA_SPACE, pretenure);            \
2649                                                                       \
2650   HeapObject* result;                                                 \
2651   { AllocationResult allocation =                                     \
2652         AllocateRaw(TYPE::kSize, space, OLD_DATA_SPACE);              \
2653     if (!allocation.To(&result)) return allocation;                   \
2654   }                                                                   \
2655                                                                       \
2656   result->set_map_no_write_barrier(                                   \
2657   isolate()->native_context()->type##_function()->initial_map());     \
2658   JSObject::cast(result)->set_properties(empty_fixed_array());        \
2659   JSObject::cast(result)->set_elements(empty_fixed_array());          \
2660                                                                       \
2661   HeapObject* storage;                                                \
2662   int storage_size =                                                  \
2663       FixedTypedArrayBase::kDataOffset + k##TYPE##Size;               \
2664   space = SelectSpace(storage_size, OLD_DATA_SPACE, pretenure);       \
2665   { AllocationResult allocation =                                     \
2666        AllocateRaw(storage_size, space, OLD_DATA_SPACE);              \
2667     if (!allocation.To(&storage)) return allocation;                  \
2668   }                                                                   \
2669                                                                       \
2670   storage->set_map(                                                   \
2671       *isolate()->factory()->fixed_##type##_array_map());             \
2672   FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(storage); \
2673   elements->set_length(static_cast<int>(1));                          \
2674   memset(elements->DataPtr(), 0, elements->DataSize());               \
2675   Fixed##TYPE##Array::cast(storage)->set(0, value);                   \
2676   TYPE::cast(result)->set_value(storage);                             \
2677   return result;                                                      \
2678 }
2679
2680
2681 SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
2682
2683
2684 AllocationResult Heap::AllocateCell(Object* value) {
2685   int size = Cell::kSize;
2686   STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2687
2688   HeapObject* result;
2689   { AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2690     if (!allocation.To(&result)) return allocation;
2691   }
2692   result->set_map_no_write_barrier(cell_map());
2693   Cell::cast(result)->set_value(value);
2694   return result;
2695 }
2696
2697
2698 AllocationResult Heap::AllocatePropertyCell() {
2699   int size = PropertyCell::kSize;
2700   STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2701
2702   HeapObject* result;
2703   AllocationResult allocation =
2704       AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2705   if (!allocation.To(&result)) return allocation;
2706
2707   result->set_map_no_write_barrier(global_property_cell_map());
2708   PropertyCell* cell = PropertyCell::cast(result);
2709   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2710                            SKIP_WRITE_BARRIER);
2711   cell->set_value(the_hole_value());
2712   cell->set_type(HeapType::None());
2713   return result;
2714 }
2715
2716
2717 void Heap::CreateApiObjects() {
2718   HandleScope scope(isolate());
2719   Factory* factory = isolate()->factory();
2720   Handle<Map> new_neander_map =
2721       factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2722
2723   // Don't use Smi-only elements optimizations for objects with the neander
2724   // map. There are too many cases where element values are set directly with a
2725   // bottleneck to trap the Smi-only -> fast elements transition, and there
2726   // appears to be no benefit for optimize this case.
2727   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2728   set_neander_map(*new_neander_map);
2729
2730   Handle<JSObject> listeners = factory->NewNeanderObject();
2731   Handle<FixedArray> elements = factory->NewFixedArray(2);
2732   elements->set(0, Smi::FromInt(0));
2733   listeners->set_elements(*elements);
2734   set_message_listeners(*listeners);
2735 }
2736
2737
2738 void Heap::CreateJSEntryStub() {
2739   JSEntryStub stub(isolate());
2740   set_js_entry_code(*stub.GetCode());
2741 }
2742
2743
2744 void Heap::CreateJSConstructEntryStub() {
2745   JSConstructEntryStub stub(isolate());
2746   set_js_construct_entry_code(*stub.GetCode());
2747 }
2748
2749
2750 void Heap::CreateFixedStubs() {
2751   // Here we create roots for fixed stubs. They are needed at GC
2752   // for cooking and uncooking (check out frames.cc).
2753   // The eliminates the need for doing dictionary lookup in the
2754   // stub cache for these stubs.
2755   HandleScope scope(isolate());
2756
2757   // Create stubs that should be there, so we don't unexpectedly have to
2758   // create them if we need them during the creation of another stub.
2759   // Stub creation mixes raw pointers and handles in an unsafe manner so
2760   // we cannot create stubs while we are creating stubs.
2761   CodeStub::GenerateStubsAheadOfTime(isolate());
2762
2763   // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2764   // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2765   // is created.
2766
2767   // gcc-4.4 has problem generating correct code of following snippet:
2768   // {  JSEntryStub stub;
2769   //    js_entry_code_ = *stub.GetCode();
2770   // }
2771   // {  JSConstructEntryStub stub;
2772   //    js_construct_entry_code_ = *stub.GetCode();
2773   // }
2774   // To workaround the problem, make separate functions without inlining.
2775   Heap::CreateJSEntryStub();
2776   Heap::CreateJSConstructEntryStub();
2777 }
2778
2779
2780 void Heap::CreateInitialObjects() {
2781   HandleScope scope(isolate());
2782   Factory* factory = isolate()->factory();
2783
2784   // The -0 value must be set before NumberFromDouble works.
2785   set_minus_zero_value(*factory->NewHeapNumber(-0.0, TENURED));
2786   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2787
2788   set_nan_value(*factory->NewHeapNumber(OS::nan_value(), TENURED));
2789   set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, TENURED));
2790
2791   // The hole has not been created yet, but we want to put something
2792   // predictable in the gaps in the string table, so lets make that Smi zero.
2793   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2794
2795   // Allocate initial string table.
2796   set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2797
2798   // Finish initializing oddballs after creating the string table.
2799   Oddball::Initialize(isolate(),
2800                       factory->undefined_value(),
2801                       "undefined",
2802                       factory->nan_value(),
2803                       Oddball::kUndefined);
2804
2805   // Initialize the null_value.
2806   Oddball::Initialize(isolate(),
2807                       factory->null_value(),
2808                       "null",
2809                       handle(Smi::FromInt(0), isolate()),
2810                       Oddball::kNull);
2811
2812   set_true_value(*factory->NewOddball(factory->boolean_map(),
2813                                       "true",
2814                                       handle(Smi::FromInt(1), isolate()),
2815                                       Oddball::kTrue));
2816
2817   set_false_value(*factory->NewOddball(factory->boolean_map(),
2818                                        "false",
2819                                        handle(Smi::FromInt(0), isolate()),
2820                                        Oddball::kFalse));
2821
2822   set_the_hole_value(*factory->NewOddball(factory->the_hole_map(),
2823                                           "hole",
2824                                           handle(Smi::FromInt(-1), isolate()),
2825                                           Oddball::kTheHole));
2826
2827   set_uninitialized_value(
2828       *factory->NewOddball(factory->uninitialized_map(),
2829                            "uninitialized",
2830                            handle(Smi::FromInt(-1), isolate()),
2831                            Oddball::kUninitialized));
2832
2833   set_arguments_marker(*factory->NewOddball(factory->arguments_marker_map(),
2834                                             "arguments_marker",
2835                                             handle(Smi::FromInt(-4), isolate()),
2836                                             Oddball::kArgumentMarker));
2837
2838   set_no_interceptor_result_sentinel(
2839       *factory->NewOddball(factory->no_interceptor_result_sentinel_map(),
2840                            "no_interceptor_result_sentinel",
2841                            handle(Smi::FromInt(-2), isolate()),
2842                            Oddball::kOther));
2843
2844   set_termination_exception(
2845       *factory->NewOddball(factory->termination_exception_map(),
2846                            "termination_exception",
2847                            handle(Smi::FromInt(-3), isolate()),
2848                            Oddball::kOther));
2849
2850   set_exception(
2851       *factory->NewOddball(factory->exception_map(),
2852                            "exception",
2853                            handle(Smi::FromInt(-5), isolate()),
2854                            Oddball::kException));
2855
2856   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
2857     Handle<String> str =
2858         factory->InternalizeUtf8String(constant_string_table[i].contents);
2859     roots_[constant_string_table[i].index] = *str;
2860   }
2861
2862   // Allocate the hidden string which is used to identify the hidden properties
2863   // in JSObjects. The hash code has a special value so that it will not match
2864   // the empty string when searching for the property. It cannot be part of the
2865   // loop above because it needs to be allocated manually with the special
2866   // hash code in place. The hash code for the hidden_string is zero to ensure
2867   // that it will always be at the first entry in property descriptors.
2868   hidden_string_ = *factory->NewOneByteInternalizedString(
2869       OneByteVector("", 0), String::kEmptyStringHash);
2870
2871   // Create the code_stubs dictionary. The initial size is set to avoid
2872   // expanding the dictionary during bootstrapping.
2873   set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2874
2875   // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2876   // is set to avoid expanding the dictionary during bootstrapping.
2877   set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2878
2879   set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2880       *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2881
2882   set_instanceof_cache_function(Smi::FromInt(0));
2883   set_instanceof_cache_map(Smi::FromInt(0));
2884   set_instanceof_cache_answer(Smi::FromInt(0));
2885
2886   CreateFixedStubs();
2887
2888   // Allocate the dictionary of intrinsic function names.
2889   Handle<NameDictionary> intrinsic_names =
2890       NameDictionary::New(isolate(), Runtime::kNumFunctions);
2891   Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2892   set_intrinsic_function_names(*intrinsic_names);
2893
2894   set_number_string_cache(*factory->NewFixedArray(
2895       kInitialNumberStringCacheSize * 2, TENURED));
2896
2897   // Allocate cache for single character one byte strings.
2898   set_single_character_string_cache(*factory->NewFixedArray(
2899       String::kMaxOneByteCharCode + 1, TENURED));
2900
2901   // Allocate cache for string split and regexp-multiple.
2902   set_string_split_cache(*factory->NewFixedArray(
2903       RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2904   set_regexp_multiple_cache(*factory->NewFixedArray(
2905       RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2906
2907   // Allocate cache for external strings pointing to native source code.
2908   set_natives_source_cache(*factory->NewFixedArray(
2909       Natives::GetBuiltinsCount()));
2910
2911   set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2912
2913   // The symbol registry is initialized lazily.
2914   set_symbol_registry(undefined_value());
2915
2916   // Allocate object to hold object observation state.
2917   set_observation_state(*factory->NewJSObjectFromMap(
2918       factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2919
2920   // Microtask queue uses the empty fixed array as a sentinel for "empty".
2921   // Number of queued microtasks stored in Isolate::pending_microtask_count().
2922   set_microtask_queue(empty_fixed_array());
2923
2924   set_frozen_symbol(*factory->NewPrivateSymbol());
2925   set_nonexistent_symbol(*factory->NewPrivateSymbol());
2926   set_elements_transition_symbol(*factory->NewPrivateSymbol());
2927   set_uninitialized_symbol(*factory->NewPrivateSymbol());
2928   set_megamorphic_symbol(*factory->NewPrivateSymbol());
2929   set_observed_symbol(*factory->NewPrivateSymbol());
2930
2931   Handle<SeededNumberDictionary> slow_element_dictionary =
2932       SeededNumberDictionary::New(isolate(), 0, TENURED);
2933   slow_element_dictionary->set_requires_slow_elements();
2934   set_empty_slow_element_dictionary(*slow_element_dictionary);
2935
2936   set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2937
2938   // Handling of script id generation is in Factory::NewScript.
2939   set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2940
2941   set_allocation_sites_scratchpad(*factory->NewFixedArray(
2942       kAllocationSiteScratchpadSize, TENURED));
2943   InitializeAllocationSitesScratchpad();
2944
2945   // Initialize keyed lookup cache.
2946   isolate_->keyed_lookup_cache()->Clear();
2947
2948   // Initialize context slot cache.
2949   isolate_->context_slot_cache()->Clear();
2950
2951   // Initialize descriptor cache.
2952   isolate_->descriptor_lookup_cache()->Clear();
2953
2954   // Initialize compilation cache.
2955   isolate_->compilation_cache()->Clear();
2956 }
2957
2958
2959 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2960   RootListIndex writable_roots[] = {
2961     kStoreBufferTopRootIndex,
2962     kStackLimitRootIndex,
2963     kNumberStringCacheRootIndex,
2964     kInstanceofCacheFunctionRootIndex,
2965     kInstanceofCacheMapRootIndex,
2966     kInstanceofCacheAnswerRootIndex,
2967     kCodeStubsRootIndex,
2968     kNonMonomorphicCacheRootIndex,
2969     kPolymorphicCodeCacheRootIndex,
2970     kLastScriptIdRootIndex,
2971     kEmptyScriptRootIndex,
2972     kRealStackLimitRootIndex,
2973     kArgumentsAdaptorDeoptPCOffsetRootIndex,
2974     kConstructStubDeoptPCOffsetRootIndex,
2975     kGetterStubDeoptPCOffsetRootIndex,
2976     kSetterStubDeoptPCOffsetRootIndex,
2977     kStringTableRootIndex,
2978   };
2979
2980   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
2981     if (root_index == writable_roots[i])
2982       return true;
2983   }
2984   return false;
2985 }
2986
2987
2988 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2989   return !RootCanBeWrittenAfterInitialization(root_index) &&
2990       !InNewSpace(roots_array_start()[root_index]);
2991 }
2992
2993
2994 Object* RegExpResultsCache::Lookup(Heap* heap,
2995                                    String* key_string,
2996                                    Object* key_pattern,
2997                                    ResultsCacheType type) {
2998   FixedArray* cache;
2999   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3000   if (type == STRING_SPLIT_SUBSTRINGS) {
3001     ASSERT(key_pattern->IsString());
3002     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3003     cache = heap->string_split_cache();
3004   } else {
3005     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3006     ASSERT(key_pattern->IsFixedArray());
3007     cache = heap->regexp_multiple_cache();
3008   }
3009
3010   uint32_t hash = key_string->Hash();
3011   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3012       ~(kArrayEntriesPerCacheEntry - 1));
3013   if (cache->get(index + kStringOffset) == key_string &&
3014       cache->get(index + kPatternOffset) == key_pattern) {
3015     return cache->get(index + kArrayOffset);
3016   }
3017   index =
3018       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3019   if (cache->get(index + kStringOffset) == key_string &&
3020       cache->get(index + kPatternOffset) == key_pattern) {
3021     return cache->get(index + kArrayOffset);
3022   }
3023   return Smi::FromInt(0);
3024 }
3025
3026
3027 void RegExpResultsCache::Enter(Isolate* isolate,
3028                                Handle<String> key_string,
3029                                Handle<Object> key_pattern,
3030                                Handle<FixedArray> value_array,
3031                                ResultsCacheType type) {
3032   Factory* factory = isolate->factory();
3033   Handle<FixedArray> cache;
3034   if (!key_string->IsInternalizedString()) return;
3035   if (type == STRING_SPLIT_SUBSTRINGS) {
3036     ASSERT(key_pattern->IsString());
3037     if (!key_pattern->IsInternalizedString()) return;
3038     cache = factory->string_split_cache();
3039   } else {
3040     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3041     ASSERT(key_pattern->IsFixedArray());
3042     cache = factory->regexp_multiple_cache();
3043   }
3044
3045   uint32_t hash = key_string->Hash();
3046   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3047       ~(kArrayEntriesPerCacheEntry - 1));
3048   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3049     cache->set(index + kStringOffset, *key_string);
3050     cache->set(index + kPatternOffset, *key_pattern);
3051     cache->set(index + kArrayOffset, *value_array);
3052   } else {
3053     uint32_t index2 =
3054         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3055     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3056       cache->set(index2 + kStringOffset, *key_string);
3057       cache->set(index2 + kPatternOffset, *key_pattern);
3058       cache->set(index2 + kArrayOffset, *value_array);
3059     } else {
3060       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3061       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3062       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3063       cache->set(index + kStringOffset, *key_string);
3064       cache->set(index + kPatternOffset, *key_pattern);
3065       cache->set(index + kArrayOffset, *value_array);
3066     }
3067   }
3068   // If the array is a reasonably short list of substrings, convert it into a
3069   // list of internalized strings.
3070   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3071     for (int i = 0; i < value_array->length(); i++) {
3072       Handle<String> str(String::cast(value_array->get(i)), isolate);
3073       Handle<String> internalized_str = factory->InternalizeString(str);
3074       value_array->set(i, *internalized_str);
3075     }
3076   }
3077   // Convert backing store to a copy-on-write array.
3078   value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3079 }
3080
3081
3082 void RegExpResultsCache::Clear(FixedArray* cache) {
3083   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3084     cache->set(i, Smi::FromInt(0));
3085   }
3086 }
3087
3088
3089 int Heap::FullSizeNumberStringCacheLength() {
3090   // Compute the size of the number string cache based on the max newspace size.
3091   // The number string cache has a minimum size based on twice the initial cache
3092   // size to ensure that it is bigger after being made 'full size'.
3093   int number_string_cache_size = max_semi_space_size_ / 512;
3094   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3095                                  Min(0x4000, number_string_cache_size));
3096   // There is a string and a number per entry so the length is twice the number
3097   // of entries.
3098   return number_string_cache_size * 2;
3099 }
3100
3101
3102 void Heap::FlushNumberStringCache() {
3103   // Flush the number to string cache.
3104   int len = number_string_cache()->length();
3105   for (int i = 0; i < len; i++) {
3106     number_string_cache()->set_undefined(i);
3107   }
3108 }
3109
3110
3111 void Heap::FlushAllocationSitesScratchpad() {
3112   for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3113     allocation_sites_scratchpad()->set_undefined(i);
3114   }
3115   allocation_sites_scratchpad_length_ = 0;
3116 }
3117
3118
3119 void Heap::InitializeAllocationSitesScratchpad() {
3120   ASSERT(allocation_sites_scratchpad()->length() ==
3121          kAllocationSiteScratchpadSize);
3122   for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3123     allocation_sites_scratchpad()->set_undefined(i);
3124   }
3125 }
3126
3127
3128 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3129                                          ScratchpadSlotMode mode) {
3130   if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3131     // We cannot use the normal write-barrier because slots need to be
3132     // recorded with non-incremental marking as well. We have to explicitly
3133     // record the slot to take evacuation candidates into account.
3134     allocation_sites_scratchpad()->set(
3135         allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
3136     Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3137         allocation_sites_scratchpad_length_);
3138
3139     if (mode == RECORD_SCRATCHPAD_SLOT) {
3140       // We need to allow slots buffer overflow here since the evacuation
3141       // candidates are not part of the global list of old space pages and
3142       // releasing an evacuation candidate due to a slots buffer overflow
3143       // results in lost pages.
3144       mark_compact_collector()->RecordSlot(
3145           slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
3146     }
3147     allocation_sites_scratchpad_length_++;
3148   }
3149 }
3150
3151
3152 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3153   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3154 }
3155
3156
3157 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3158     ExternalArrayType array_type) {
3159   switch (array_type) {
3160 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)               \
3161     case kExternal##Type##Array:                                              \
3162       return kExternal##Type##ArrayMapRootIndex;
3163
3164     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3165 #undef ARRAY_TYPE_TO_ROOT_INDEX
3166
3167     default:
3168       UNREACHABLE();
3169       return kUndefinedValueRootIndex;
3170   }
3171 }
3172
3173
3174 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3175   return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3176 }
3177
3178
3179 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3180     ExternalArrayType array_type) {
3181   switch (array_type) {
3182 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)               \
3183     case kExternal##Type##Array:                                              \
3184       return kFixed##Type##ArrayMapRootIndex;
3185
3186     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3187 #undef ARRAY_TYPE_TO_ROOT_INDEX
3188
3189     default:
3190       UNREACHABLE();
3191       return kUndefinedValueRootIndex;
3192   }
3193 }
3194
3195
3196 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3197     ElementsKind elementsKind) {
3198   switch (elementsKind) {
3199 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)             \
3200     case EXTERNAL_##TYPE##_ELEMENTS:                                          \
3201       return kEmptyExternal##Type##ArrayRootIndex;
3202
3203     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3204 #undef ELEMENT_KIND_TO_ROOT_INDEX
3205
3206     default:
3207       UNREACHABLE();
3208       return kUndefinedValueRootIndex;
3209   }
3210 }
3211
3212
3213 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3214     ElementsKind elementsKind) {
3215   switch (elementsKind) {
3216 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)             \
3217     case TYPE##_ELEMENTS:                                                     \
3218       return kEmptyFixed##Type##ArrayRootIndex;
3219
3220     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3221 #undef ELEMENT_KIND_TO_ROOT_INDEX
3222     default:
3223       UNREACHABLE();
3224       return kUndefinedValueRootIndex;
3225   }
3226 }
3227
3228
3229 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3230   return ExternalArray::cast(
3231       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3232 }
3233
3234
3235 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3236   return FixedTypedArrayBase::cast(
3237       roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3238 }
3239
3240
3241 AllocationResult Heap::AllocateForeign(Address address,
3242                                        PretenureFlag pretenure) {
3243   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3244   STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3245   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3246   Foreign* result;
3247   AllocationResult allocation = Allocate(foreign_map(), space);
3248   if (!allocation.To(&result)) return allocation;
3249   result->set_foreign_address(address);
3250   return result;
3251 }
3252
3253
3254 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3255   if (length < 0 || length > ByteArray::kMaxLength) {
3256     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3257   }
3258   int size = ByteArray::SizeFor(length);
3259   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3260   HeapObject* result;
3261   { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3262     if (!allocation.To(&result)) return allocation;
3263   }
3264
3265   result->set_map_no_write_barrier(byte_array_map());
3266   ByteArray::cast(result)->set_length(length);
3267   return result;
3268 }
3269
3270
3271 void Heap::CreateFillerObjectAt(Address addr, int size) {
3272   if (size == 0) return;
3273   HeapObject* filler = HeapObject::FromAddress(addr);
3274   if (size == kPointerSize) {
3275     filler->set_map_no_write_barrier(one_pointer_filler_map());
3276   } else if (size == 2 * kPointerSize) {
3277     filler->set_map_no_write_barrier(two_pointer_filler_map());
3278   } else {
3279     filler->set_map_no_write_barrier(free_space_map());
3280     FreeSpace::cast(filler)->set_size(size);
3281   }
3282 }
3283
3284
3285 bool Heap::CanMoveObjectStart(HeapObject* object) {
3286   Address address = object->address();
3287   bool is_in_old_pointer_space = InOldPointerSpace(address);
3288   bool is_in_old_data_space = InOldDataSpace(address);
3289
3290   if (lo_space()->Contains(object)) return false;
3291
3292   Page* page = Page::FromAddress(address);
3293   // We can move the object start if:
3294   // (1) the object is not in old pointer or old data space,
3295   // (2) the page of the object was already swept,
3296   // (3) the page was already concurrently swept. This case is an optimization
3297   // for concurrent sweeping. The WasSwept predicate for concurrently swept
3298   // pages is set after sweeping all pages.
3299   return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3300          page->WasSwept() ||
3301          (mark_compact_collector()->AreSweeperThreadsActivated() &&
3302               page->parallel_sweeping() <=
3303                   MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
3304 }
3305
3306
3307 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3308   if (incremental_marking()->IsMarking() &&
3309       Marking::IsBlack(Marking::MarkBitFrom(address))) {
3310     if (mode == FROM_GC) {
3311       MemoryChunk::IncrementLiveBytesFromGC(address, by);
3312     } else {
3313       MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3314     }
3315   }
3316 }
3317
3318
3319 AllocationResult Heap::AllocateExternalArray(int length,
3320                                          ExternalArrayType array_type,
3321                                          void* external_pointer,
3322                                          PretenureFlag pretenure) {
3323   int size = ExternalArray::kAlignedSize;
3324   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3325   HeapObject* result;
3326   { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3327     if (!allocation.To(&result)) return allocation;
3328   }
3329
3330   result->set_map_no_write_barrier(
3331       MapForExternalArrayType(array_type));
3332   ExternalArray::cast(result)->set_length(length);
3333   ExternalArray::cast(result)->set_external_pointer(external_pointer);
3334   return result;
3335 }
3336
3337 static void ForFixedTypedArray(ExternalArrayType array_type,
3338                                int* element_size,
3339                                ElementsKind* element_kind) {
3340   switch (array_type) {
3341 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)                       \
3342     case kExternal##Type##Array:                                              \
3343       *element_size = size;                                                   \
3344       *element_kind = TYPE##_ELEMENTS;                                        \
3345       return;
3346
3347     TYPED_ARRAYS(TYPED_ARRAY_CASE)
3348 #undef TYPED_ARRAY_CASE
3349
3350     default:
3351       *element_size = 0;  // Bogus
3352       *element_kind = UINT8_ELEMENTS;  // Bogus
3353       UNREACHABLE();
3354   }
3355 }
3356
3357
3358 AllocationResult Heap::AllocateFixedTypedArray(int length,
3359                                                ExternalArrayType array_type,
3360                                                PretenureFlag pretenure) {
3361   int element_size;
3362   ElementsKind elements_kind;
3363   ForFixedTypedArray(array_type, &element_size, &elements_kind);
3364   int size = OBJECT_POINTER_ALIGN(
3365       length * element_size + FixedTypedArrayBase::kDataOffset);
3366 #ifndef V8_HOST_ARCH_64_BIT
3367   if (array_type == kExternalFloat64Array) {
3368     size += kPointerSize;
3369   }
3370 #endif
3371   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3372
3373   HeapObject* object;
3374   AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3375   if (!allocation.To(&object)) return allocation;
3376
3377   if (array_type == kExternalFloat64Array) {
3378     object = EnsureDoubleAligned(this, object, size);
3379   }
3380
3381   object->set_map(MapForFixedTypedArray(array_type));
3382   FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3383   elements->set_length(length);
3384   memset(elements->DataPtr(), 0, elements->DataSize());
3385   return elements;
3386 }
3387
3388
3389 AllocationResult Heap::AllocateCode(int object_size,
3390                                 bool immovable) {
3391   ASSERT(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3392   AllocationResult allocation;
3393   // Large code objects and code objects which should stay at a fixed address
3394   // are allocated in large object space.
3395   HeapObject* result;
3396   bool force_lo_space = object_size > code_space()->AreaSize();
3397   if (force_lo_space) {
3398     allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3399   } else {
3400     allocation = AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3401   }
3402   if (!allocation.To(&result)) return allocation;
3403
3404   if (immovable && !force_lo_space &&
3405      // Objects on the first page of each space are never moved.
3406      !code_space_->FirstPage()->Contains(result->address())) {
3407     // Discard the first code allocation, which was on a page where it could be
3408     // moved.
3409     CreateFillerObjectAt(result->address(), object_size);
3410     allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3411     if (!allocation.To(&result)) return allocation;
3412   }
3413
3414   result->set_map_no_write_barrier(code_map());
3415   Code* code = Code::cast(result);
3416   ASSERT(isolate_->code_range() == NULL ||
3417          !isolate_->code_range()->valid() ||
3418          isolate_->code_range()->contains(code->address()));
3419   code->set_gc_metadata(Smi::FromInt(0));
3420   code->set_ic_age(global_ic_age_);
3421   return code;
3422 }
3423
3424
3425 AllocationResult Heap::CopyCode(Code* code) {
3426   AllocationResult allocation;
3427   HeapObject* new_constant_pool;
3428   if (FLAG_enable_ool_constant_pool &&
3429       code->constant_pool() != empty_constant_pool_array()) {
3430     // Copy the constant pool, since edits to the copied code may modify
3431     // the constant pool.
3432     allocation = CopyConstantPoolArray(code->constant_pool());
3433     if (!allocation.To(&new_constant_pool)) return allocation;
3434   } else {
3435     new_constant_pool = empty_constant_pool_array();
3436   }
3437
3438   // Allocate an object the same size as the code object.
3439   int obj_size = code->Size();
3440   if (obj_size > code_space()->AreaSize()) {
3441     allocation = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3442   } else {
3443     allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3444   }
3445
3446   HeapObject* result;
3447   if (!allocation.To(&result)) return allocation;
3448
3449   // Copy code object.
3450   Address old_addr = code->address();
3451   Address new_addr = result->address();
3452   CopyBlock(new_addr, old_addr, obj_size);
3453   Code* new_code = Code::cast(result);
3454
3455   // Update the constant pool.
3456   new_code->set_constant_pool(new_constant_pool);
3457
3458   // Relocate the copy.
3459   ASSERT(isolate_->code_range() == NULL ||
3460          !isolate_->code_range()->valid() ||
3461          isolate_->code_range()->contains(code->address()));
3462   new_code->Relocate(new_addr - old_addr);
3463   return new_code;
3464 }
3465
3466
3467 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3468   // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3469   // do not risk leaving uninitialized Code object (and breaking the heap).
3470   ByteArray* reloc_info_array;
3471   { AllocationResult allocation =
3472         AllocateByteArray(reloc_info.length(), TENURED);
3473     if (!allocation.To(&reloc_info_array)) return allocation;
3474   }
3475   HeapObject* new_constant_pool;
3476   if (FLAG_enable_ool_constant_pool &&
3477       code->constant_pool() != empty_constant_pool_array()) {
3478     // Copy the constant pool, since edits to the copied code may modify
3479     // the constant pool.
3480     AllocationResult allocation =
3481         CopyConstantPoolArray(code->constant_pool());
3482     if (!allocation.To(&new_constant_pool)) return allocation;
3483   } else {
3484     new_constant_pool = empty_constant_pool_array();
3485   }
3486
3487   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3488
3489   int new_obj_size = Code::SizeFor(new_body_size);
3490
3491   Address old_addr = code->address();
3492
3493   size_t relocation_offset =
3494       static_cast<size_t>(code->instruction_end() - old_addr);
3495
3496   AllocationResult allocation;
3497   if (new_obj_size > code_space()->AreaSize()) {
3498     allocation = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3499   } else {
3500     allocation = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3501   }
3502
3503   HeapObject* result;
3504   if (!allocation.To(&result)) return allocation;
3505
3506   // Copy code object.
3507   Address new_addr = result->address();
3508
3509   // Copy header and instructions.
3510   CopyBytes(new_addr, old_addr, relocation_offset);
3511
3512   Code* new_code = Code::cast(result);
3513   new_code->set_relocation_info(reloc_info_array);
3514
3515   // Update constant pool.
3516   new_code->set_constant_pool(new_constant_pool);
3517
3518   // Copy patched rinfo.
3519   CopyBytes(new_code->relocation_start(),
3520             reloc_info.start(),
3521             static_cast<size_t>(reloc_info.length()));
3522
3523   // Relocate the copy.
3524   ASSERT(isolate_->code_range() == NULL ||
3525          !isolate_->code_range()->valid() ||
3526          isolate_->code_range()->contains(code->address()));
3527   new_code->Relocate(new_addr - old_addr);
3528
3529 #ifdef VERIFY_HEAP
3530   if (FLAG_verify_heap) code->ObjectVerify();
3531 #endif
3532   return new_code;
3533 }
3534
3535
3536 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3537                                        AllocationSite* allocation_site) {
3538   memento->set_map_no_write_barrier(allocation_memento_map());
3539   ASSERT(allocation_site->map() == allocation_site_map());
3540   memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3541   if (FLAG_allocation_site_pretenuring) {
3542     allocation_site->IncrementMementoCreateCount();
3543   }
3544 }
3545
3546
3547 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3548                             AllocationSite* allocation_site) {
3549   ASSERT(gc_state_ == NOT_IN_GC);
3550   ASSERT(map->instance_type() != MAP_TYPE);
3551   // If allocation failures are disallowed, we may allocate in a different
3552   // space when new space is full and the object is not a large object.
3553   AllocationSpace retry_space =
3554       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3555   int size = map->instance_size();
3556   if (allocation_site != NULL) {
3557     size += AllocationMemento::kSize;
3558   }
3559   HeapObject* result;
3560   AllocationResult allocation = AllocateRaw(size, space, retry_space);
3561   if (!allocation.To(&result)) return allocation;
3562   // No need for write barrier since object is white and map is in old space.
3563   result->set_map_no_write_barrier(map);
3564   if (allocation_site != NULL) {
3565     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3566         reinterpret_cast<Address>(result) + map->instance_size());
3567     InitializeAllocationMemento(alloc_memento, allocation_site);
3568   }
3569   return result;
3570 }
3571
3572
3573 AllocationResult Heap::AllocateArgumentsObject(Object* callee, int length) {
3574   // To get fast allocation and map sharing for arguments objects we
3575   // allocate them based on an arguments boilerplate.
3576
3577   JSObject* boilerplate;
3578   int arguments_object_size;
3579   bool strict_mode_callee = callee->IsJSFunction() &&
3580       JSFunction::cast(callee)->shared()->strict_mode() == STRICT;
3581   if (strict_mode_callee) {
3582     boilerplate =
3583         isolate()->context()->native_context()->strict_arguments_boilerplate();
3584     arguments_object_size = kStrictArgumentsObjectSize;
3585   } else {
3586     boilerplate =
3587         isolate()->context()->native_context()->sloppy_arguments_boilerplate();
3588     arguments_object_size = kSloppyArgumentsObjectSize;
3589   }
3590
3591   // Check that the size of the boilerplate matches our
3592   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
3593   // on the size being a known constant.
3594   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
3595
3596   // Do the allocation.
3597   HeapObject* result;
3598   { AllocationResult allocation =
3599         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
3600     if (!allocation.To(&result)) return allocation;
3601   }
3602
3603   // Copy the content. The arguments boilerplate doesn't have any
3604   // fields that point to new space so it's safe to skip the write
3605   // barrier here.
3606   CopyBlock(result->address(), boilerplate->address(), JSObject::kHeaderSize);
3607
3608   // Set the length property.
3609   JSObject* js_obj = JSObject::cast(result);
3610   js_obj->InObjectPropertyAtPut(
3611       kArgumentsLengthIndex, Smi::FromInt(length), SKIP_WRITE_BARRIER);
3612   // Set the callee property for sloppy mode arguments object only.
3613   if (!strict_mode_callee) {
3614     js_obj->InObjectPropertyAtPut(kArgumentsCalleeIndex, callee);
3615   }
3616
3617   // Check the state of the object
3618   ASSERT(js_obj->HasFastProperties());
3619   ASSERT(js_obj->HasFastObjectElements());
3620
3621   return js_obj;
3622 }
3623
3624
3625 void Heap::InitializeJSObjectFromMap(JSObject* obj,
3626                                      FixedArray* properties,
3627                                      Map* map) {
3628   obj->set_properties(properties);
3629   obj->initialize_elements();
3630   // TODO(1240798): Initialize the object's body using valid initial values
3631   // according to the object's initial map.  For example, if the map's
3632   // instance type is JS_ARRAY_TYPE, the length field should be initialized
3633   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3634   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
3635   // verification code has to cope with (temporarily) invalid objects.  See
3636   // for example, JSArray::JSArrayVerify).
3637   Object* filler;
3638   // We cannot always fill with one_pointer_filler_map because objects
3639   // created from API functions expect their internal fields to be initialized
3640   // with undefined_value.
3641   // Pre-allocated fields need to be initialized with undefined_value as well
3642   // so that object accesses before the constructor completes (e.g. in the
3643   // debugger) will not cause a crash.
3644   if (map->constructor()->IsJSFunction() &&
3645       JSFunction::cast(map->constructor())->
3646           IsInobjectSlackTrackingInProgress()) {
3647     // We might want to shrink the object later.
3648     ASSERT(obj->GetInternalFieldCount() == 0);
3649     filler = Heap::one_pointer_filler_map();
3650   } else {
3651     filler = Heap::undefined_value();
3652   }
3653   obj->InitializeBody(map, Heap::undefined_value(), filler);
3654 }
3655
3656
3657 AllocationResult Heap::AllocateJSObjectFromMap(
3658     Map* map,
3659     PretenureFlag pretenure,
3660     bool allocate_properties,
3661     AllocationSite* allocation_site) {
3662   // JSFunctions should be allocated using AllocateFunction to be
3663   // properly initialized.
3664   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3665
3666   // Both types of global objects should be allocated using
3667   // AllocateGlobalObject to be properly initialized.
3668   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3669   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3670
3671   // Allocate the backing storage for the properties.
3672   FixedArray* properties;
3673   if (allocate_properties) {
3674     int prop_size = map->InitialPropertiesLength();
3675     ASSERT(prop_size >= 0);
3676     { AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3677       if (!allocation.To(&properties)) return allocation;
3678     }
3679   } else {
3680     properties = empty_fixed_array();
3681   }
3682
3683   // Allocate the JSObject.
3684   int size = map->instance_size();
3685   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3686   JSObject* js_obj;
3687   AllocationResult allocation = Allocate(map, space, allocation_site);
3688   if (!allocation.To(&js_obj)) return allocation;
3689
3690   // Initialize the JSObject.
3691   InitializeJSObjectFromMap(js_obj, properties, map);
3692   ASSERT(js_obj->HasFastElements() ||
3693          js_obj->HasExternalArrayElements() ||
3694          js_obj->HasFixedTypedArrayElements());
3695   return js_obj;
3696 }
3697
3698
3699 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3700                                         PretenureFlag pretenure,
3701                                         AllocationSite* allocation_site) {
3702   ASSERT(constructor->has_initial_map());
3703
3704   // Allocate the object based on the constructors initial map.
3705   AllocationResult allocation = AllocateJSObjectFromMap(
3706       constructor->initial_map(), pretenure, true, allocation_site);
3707 #ifdef DEBUG
3708   // Make sure result is NOT a global object if valid.
3709   HeapObject* obj;
3710   ASSERT(!allocation.To(&obj) || !obj->IsGlobalObject());
3711 #endif
3712   return allocation;
3713 }
3714
3715
3716 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3717   // Never used to copy functions.  If functions need to be copied we
3718   // have to be careful to clear the literals array.
3719   SLOW_ASSERT(!source->IsJSFunction());
3720
3721   // Make the clone.
3722   Map* map = source->map();
3723   int object_size = map->instance_size();
3724   HeapObject* clone;
3725
3726   ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3727
3728   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3729
3730   // If we're forced to always allocate, we use the general allocation
3731   // functions which may leave us with an object in old space.
3732   if (always_allocate()) {
3733     { AllocationResult allocation =
3734           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3735       if (!allocation.To(&clone)) return allocation;
3736     }
3737     Address clone_address = clone->address();
3738     CopyBlock(clone_address,
3739               source->address(),
3740               object_size);
3741     // Update write barrier for all fields that lie beyond the header.
3742     RecordWrites(clone_address,
3743                  JSObject::kHeaderSize,
3744                  (object_size - JSObject::kHeaderSize) / kPointerSize);
3745   } else {
3746     wb_mode = SKIP_WRITE_BARRIER;
3747
3748     { int adjusted_object_size = site != NULL
3749           ? object_size + AllocationMemento::kSize
3750           : object_size;
3751     AllocationResult allocation =
3752           AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3753       if (!allocation.To(&clone)) return allocation;
3754     }
3755     SLOW_ASSERT(InNewSpace(clone));
3756     // Since we know the clone is allocated in new space, we can copy
3757     // the contents without worrying about updating the write barrier.
3758     CopyBlock(clone->address(),
3759               source->address(),
3760               object_size);
3761
3762     if (site != NULL) {
3763       AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3764           reinterpret_cast<Address>(clone) + object_size);
3765       InitializeAllocationMemento(alloc_memento, site);
3766     }
3767   }
3768
3769   SLOW_ASSERT(
3770       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
3771   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3772   FixedArray* properties = FixedArray::cast(source->properties());
3773   // Update elements if necessary.
3774   if (elements->length() > 0) {
3775     FixedArrayBase* elem;
3776     { AllocationResult allocation;
3777       if (elements->map() == fixed_cow_array_map()) {
3778         allocation = FixedArray::cast(elements);
3779       } else if (source->HasFastDoubleElements()) {
3780         allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3781       } else {
3782         allocation = CopyFixedArray(FixedArray::cast(elements));
3783       }
3784       if (!allocation.To(&elem)) return allocation;
3785     }
3786     JSObject::cast(clone)->set_elements(elem, wb_mode);
3787   }
3788   // Update properties if necessary.
3789   if (properties->length() > 0) {
3790     FixedArray* prop;
3791     { AllocationResult allocation = CopyFixedArray(properties);
3792       if (!allocation.To(&prop)) return allocation;
3793     }
3794     JSObject::cast(clone)->set_properties(prop, wb_mode);
3795   }
3796   // Return the new clone.
3797   return clone;
3798 }
3799
3800
3801 static inline void WriteOneByteData(Vector<const char> vector,
3802                                     uint8_t* chars,
3803                                     int len) {
3804   // Only works for ascii.
3805   ASSERT(vector.length() == len);
3806   MemCopy(chars, vector.start(), len);
3807 }
3808
3809 static inline void WriteTwoByteData(Vector<const char> vector,
3810                                     uint16_t* chars,
3811                                     int len) {
3812   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3813   unsigned stream_length = vector.length();
3814   while (stream_length != 0) {
3815     unsigned consumed = 0;
3816     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3817     ASSERT(c != unibrow::Utf8::kBadChar);
3818     ASSERT(consumed <= stream_length);
3819     stream_length -= consumed;
3820     stream += consumed;
3821     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3822       len -= 2;
3823       if (len < 0) break;
3824       *chars++ = unibrow::Utf16::LeadSurrogate(c);
3825       *chars++ = unibrow::Utf16::TrailSurrogate(c);
3826     } else {
3827       len -= 1;
3828       if (len < 0) break;
3829       *chars++ = c;
3830     }
3831   }
3832   ASSERT(stream_length == 0);
3833   ASSERT(len == 0);
3834 }
3835
3836
3837 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3838   ASSERT(s->length() == len);
3839   String::WriteToFlat(s, chars, 0, len);
3840 }
3841
3842
3843 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3844   ASSERT(s->length() == len);
3845   String::WriteToFlat(s, chars, 0, len);
3846 }
3847
3848
3849 template<bool is_one_byte, typename T>
3850 AllocationResult Heap::AllocateInternalizedStringImpl(
3851     T t, int chars, uint32_t hash_field) {
3852   ASSERT(chars >= 0);
3853   // Compute map and object size.
3854   int size;
3855   Map* map;
3856
3857   ASSERT_LE(0, chars);
3858   ASSERT_GE(String::kMaxLength, chars);
3859   if (is_one_byte) {
3860     map = ascii_internalized_string_map();
3861     size = SeqOneByteString::SizeFor(chars);
3862   } else {
3863     map = internalized_string_map();
3864     size = SeqTwoByteString::SizeFor(chars);
3865   }
3866   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
3867
3868   // Allocate string.
3869   HeapObject* result;
3870   { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3871     if (!allocation.To(&result)) return allocation;
3872   }
3873
3874   result->set_map_no_write_barrier(map);
3875   // Set length and hash fields of the allocated string.
3876   String* answer = String::cast(result);
3877   answer->set_length(chars);
3878   answer->set_hash_field(hash_field);
3879
3880   ASSERT_EQ(size, answer->Size());
3881
3882   if (is_one_byte) {
3883     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3884   } else {
3885     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3886   }
3887   return answer;
3888 }
3889
3890
3891 // Need explicit instantiations.
3892 template
3893 AllocationResult Heap::AllocateInternalizedStringImpl<true>(
3894     String*, int, uint32_t);
3895 template
3896 AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3897     String*, int, uint32_t);
3898 template
3899 AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3900     Vector<const char>, int, uint32_t);
3901
3902
3903 AllocationResult Heap::AllocateRawOneByteString(int length,
3904                                                 PretenureFlag pretenure) {
3905   ASSERT_LE(0, length);
3906   ASSERT_GE(String::kMaxLength, length);
3907   int size = SeqOneByteString::SizeFor(length);
3908   ASSERT(size <= SeqOneByteString::kMaxSize);
3909   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3910
3911   HeapObject* result;
3912   { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3913     if (!allocation.To(&result)) return allocation;
3914   }
3915
3916   // Partially initialize the object.
3917   result->set_map_no_write_barrier(ascii_string_map());
3918   String::cast(result)->set_length(length);
3919   String::cast(result)->set_hash_field(String::kEmptyHashField);
3920   ASSERT_EQ(size, HeapObject::cast(result)->Size());
3921
3922   return result;
3923 }
3924
3925
3926 AllocationResult Heap::AllocateRawTwoByteString(int length,
3927                                                 PretenureFlag pretenure) {
3928   ASSERT_LE(0, length);
3929   ASSERT_GE(String::kMaxLength, length);
3930   int size = SeqTwoByteString::SizeFor(length);
3931   ASSERT(size <= SeqTwoByteString::kMaxSize);
3932   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3933
3934   HeapObject* result;
3935   { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3936     if (!allocation.To(&result)) return allocation;
3937   }
3938
3939   // Partially initialize the object.
3940   result->set_map_no_write_barrier(string_map());
3941   String::cast(result)->set_length(length);
3942   String::cast(result)->set_hash_field(String::kEmptyHashField);
3943   ASSERT_EQ(size, HeapObject::cast(result)->Size());
3944   return result;
3945 }
3946
3947
3948 AllocationResult Heap::AllocateEmptyFixedArray() {
3949   int size = FixedArray::SizeFor(0);
3950   HeapObject* result;
3951   { AllocationResult allocation =
3952         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3953     if (!allocation.To(&result)) return allocation;
3954   }
3955   // Initialize the object.
3956   result->set_map_no_write_barrier(fixed_array_map());
3957   FixedArray::cast(result)->set_length(0);
3958   return result;
3959 }
3960
3961
3962 AllocationResult Heap::AllocateEmptyExternalArray(
3963     ExternalArrayType array_type) {
3964   return AllocateExternalArray(0, array_type, NULL, TENURED);
3965 }
3966
3967
3968 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3969   if (!InNewSpace(src)) {
3970     return src;
3971   }
3972
3973   int len = src->length();
3974   HeapObject* obj;
3975   { AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3976     if (!allocation.To(&obj)) return allocation;
3977   }
3978   obj->set_map_no_write_barrier(fixed_array_map());
3979   FixedArray* result = FixedArray::cast(obj);
3980   result->set_length(len);
3981
3982   // Copy the content
3983   DisallowHeapAllocation no_gc;
3984   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3985   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3986
3987   // TODO(mvstanton): The map is set twice because of protection against calling
3988   // set() on a COW FixedArray. Issue v8:3221 created to track this, and
3989   // we might then be able to remove this whole method.
3990   HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
3991   return result;
3992 }
3993
3994
3995 AllocationResult Heap::AllocateEmptyFixedTypedArray(
3996     ExternalArrayType array_type) {
3997   return AllocateFixedTypedArray(0, array_type, TENURED);
3998 }
3999
4000
4001 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4002   int len = src->length();
4003   HeapObject* obj;
4004   { AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
4005     if (!allocation.To(&obj)) return allocation;
4006   }
4007   if (InNewSpace(obj)) {
4008     obj->set_map_no_write_barrier(map);
4009     CopyBlock(obj->address() + kPointerSize,
4010               src->address() + kPointerSize,
4011               FixedArray::SizeFor(len) - kPointerSize);
4012     return obj;
4013   }
4014   obj->set_map_no_write_barrier(map);
4015   FixedArray* result = FixedArray::cast(obj);
4016   result->set_length(len);
4017
4018   // Copy the content
4019   DisallowHeapAllocation no_gc;
4020   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4021   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4022   return result;
4023 }
4024
4025
4026 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4027                                                    Map* map) {
4028   int len = src->length();
4029   HeapObject* obj;
4030   { AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4031     if (!allocation.To(&obj)) return allocation;
4032   }
4033   obj->set_map_no_write_barrier(map);
4034   CopyBlock(
4035       obj->address() + FixedDoubleArray::kLengthOffset,
4036       src->address() + FixedDoubleArray::kLengthOffset,
4037       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4038   return obj;
4039 }
4040
4041
4042 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4043                                                     Map* map) {
4044   HeapObject* obj;
4045   if (src->is_extended_layout()) {
4046     ConstantPoolArray::NumberOfEntries small(src,
4047         ConstantPoolArray::SMALL_SECTION);
4048     ConstantPoolArray::NumberOfEntries extended(src,
4049         ConstantPoolArray::EXTENDED_SECTION);
4050     AllocationResult allocation =
4051         AllocateExtendedConstantPoolArray(small, extended);
4052     if (!allocation.To(&obj)) return allocation;
4053   } else {
4054     ConstantPoolArray::NumberOfEntries small(src,
4055         ConstantPoolArray::SMALL_SECTION);
4056     AllocationResult allocation = AllocateConstantPoolArray(small);
4057     if (!allocation.To(&obj)) return allocation;
4058   }
4059   obj->set_map_no_write_barrier(map);
4060   CopyBlock(
4061       obj->address() + ConstantPoolArray::kFirstEntryOffset,
4062       src->address() + ConstantPoolArray::kFirstEntryOffset,
4063       src->size() - ConstantPoolArray::kFirstEntryOffset);
4064   return obj;
4065 }
4066
4067
4068 AllocationResult Heap::AllocateRawFixedArray(int length,
4069                                              PretenureFlag pretenure) {
4070   if (length < 0 || length > FixedArray::kMaxLength) {
4071     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4072   }
4073   int size = FixedArray::SizeFor(length);
4074   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4075
4076   return AllocateRaw(size, space, OLD_POINTER_SPACE);
4077 }
4078
4079
4080 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4081                                                     PretenureFlag pretenure,
4082                                                     Object* filler) {
4083   ASSERT(length >= 0);
4084   ASSERT(empty_fixed_array()->IsFixedArray());
4085   if (length == 0) return empty_fixed_array();
4086
4087   ASSERT(!InNewSpace(filler));
4088   HeapObject* result;
4089   { AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4090     if (!allocation.To(&result)) return allocation;
4091   }
4092
4093   result->set_map_no_write_barrier(fixed_array_map());
4094   FixedArray* array = FixedArray::cast(result);
4095   array->set_length(length);
4096   MemsetPointer(array->data_start(), filler, length);
4097   return array;
4098 }
4099
4100
4101 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4102   return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4103 }
4104
4105
4106 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4107   if (length == 0) return empty_fixed_array();
4108
4109   HeapObject* obj;
4110   { AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4111     if (!allocation.To(&obj)) return allocation;
4112   }
4113
4114   obj->set_map_no_write_barrier(fixed_array_map());
4115   FixedArray::cast(obj)->set_length(length);
4116   return obj;
4117 }
4118
4119
4120 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4121     int length,
4122     PretenureFlag pretenure) {
4123   if (length == 0) return empty_fixed_array();
4124
4125   HeapObject* elements;
4126   AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4127   if (!allocation.To(&elements)) return allocation;
4128
4129   elements->set_map_no_write_barrier(fixed_double_array_map());
4130   FixedDoubleArray::cast(elements)->set_length(length);
4131   return elements;
4132 }
4133
4134
4135 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4136                                                    PretenureFlag pretenure) {
4137   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4138     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4139   }
4140   int size = FixedDoubleArray::SizeFor(length);
4141 #ifndef V8_HOST_ARCH_64_BIT
4142   size += kPointerSize;
4143 #endif
4144   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4145
4146   HeapObject* object;
4147   { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4148     if (!allocation.To(&object)) return allocation;
4149   }
4150
4151   return EnsureDoubleAligned(this, object, size);
4152 }
4153
4154
4155 AllocationResult Heap::AllocateConstantPoolArray(
4156       const ConstantPoolArray::NumberOfEntries& small) {
4157   CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4158   int size = ConstantPoolArray::SizeFor(small);
4159 #ifndef V8_HOST_ARCH_64_BIT
4160   size += kPointerSize;
4161 #endif
4162   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4163
4164   HeapObject* object;
4165   { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4166     if (!allocation.To(&object)) return allocation;
4167   }
4168   object = EnsureDoubleAligned(this, object, size);
4169   object->set_map_no_write_barrier(constant_pool_array_map());
4170
4171   ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4172   constant_pool->Init(small);
4173   constant_pool->ClearPtrEntries(isolate());
4174   return constant_pool;
4175 }
4176
4177
4178 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4179     const ConstantPoolArray::NumberOfEntries& small,
4180     const ConstantPoolArray::NumberOfEntries& extended) {
4181   CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4182   CHECK(extended.are_in_range(0, kMaxInt));
4183   int size = ConstantPoolArray::SizeForExtended(small, extended);
4184 #ifndef V8_HOST_ARCH_64_BIT
4185   size += kPointerSize;
4186 #endif
4187   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4188
4189   HeapObject* object;
4190   { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4191     if (!allocation.To(&object)) return allocation;
4192   }
4193   object = EnsureDoubleAligned(this, object, size);
4194   object->set_map_no_write_barrier(constant_pool_array_map());
4195
4196   ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4197   constant_pool->InitExtended(small, extended);
4198   constant_pool->ClearPtrEntries(isolate());
4199   return constant_pool;
4200 }
4201
4202
4203 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4204   ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4205   int size = ConstantPoolArray::SizeFor(small);
4206   HeapObject* result;
4207   { AllocationResult allocation =
4208         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4209     if (!allocation.To(&result)) return allocation;
4210   }
4211   result->set_map_no_write_barrier(constant_pool_array_map());
4212   ConstantPoolArray::cast(result)->Init(small);
4213   return result;
4214 }
4215
4216
4217 AllocationResult Heap::AllocateSymbol() {
4218   // Statically ensure that it is safe to allocate symbols in paged spaces.
4219   STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4220
4221   HeapObject* result;
4222   AllocationResult allocation =
4223       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4224   if (!allocation.To(&result)) return allocation;
4225
4226   result->set_map_no_write_barrier(symbol_map());
4227
4228   // Generate a random hash value.
4229   int hash;
4230   int attempts = 0;
4231   do {
4232     hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4233     attempts++;
4234   } while (hash == 0 && attempts < 30);
4235   if (hash == 0) hash = 1;  // never return 0
4236
4237   Symbol::cast(result)->set_hash_field(
4238       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4239   Symbol::cast(result)->set_name(undefined_value());
4240   Symbol::cast(result)->set_flags(Smi::FromInt(0));
4241
4242   ASSERT(!Symbol::cast(result)->is_private());
4243   return result;
4244 }
4245
4246
4247 AllocationResult Heap::AllocateStruct(InstanceType type) {
4248   Map* map;
4249   switch (type) {
4250 #define MAKE_CASE(NAME, Name, name) \
4251     case NAME##_TYPE: map = name##_map(); break;
4252 STRUCT_LIST(MAKE_CASE)
4253 #undef MAKE_CASE
4254     default:
4255       UNREACHABLE();
4256       return exception();
4257   }
4258   int size = map->instance_size();
4259   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4260   Struct* result;
4261   { AllocationResult allocation = Allocate(map, space);
4262     if (!allocation.To(&result)) return allocation;
4263   }
4264   result->InitializeBody(size);
4265   return result;
4266 }
4267
4268
4269 bool Heap::IsHeapIterable() {
4270   return (!old_pointer_space()->was_swept_conservatively() &&
4271           !old_data_space()->was_swept_conservatively() &&
4272           new_space_top_after_last_gc_ == new_space()->top());
4273 }
4274
4275
4276 void Heap::MakeHeapIterable() {
4277   ASSERT(AllowHeapAllocation::IsAllowed());
4278   if (!IsHeapIterable()) {
4279     CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4280   }
4281   ASSERT(IsHeapIterable());
4282 }
4283
4284
4285 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4286   incremental_marking()->Step(step_size,
4287                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
4288
4289   if (incremental_marking()->IsComplete()) {
4290     bool uncommit = false;
4291     if (gc_count_at_last_idle_gc_ == gc_count_) {
4292       // No GC since the last full GC, the mutator is probably not active.
4293       isolate_->compilation_cache()->Clear();
4294       uncommit = true;
4295     }
4296     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
4297     mark_sweeps_since_idle_round_started_++;
4298     gc_count_at_last_idle_gc_ = gc_count_;
4299     if (uncommit) {
4300       new_space_.Shrink();
4301       UncommitFromSpace();
4302     }
4303   }
4304 }
4305
4306
4307 bool Heap::IdleNotification(int hint) {
4308   // Hints greater than this value indicate that
4309   // the embedder is requesting a lot of GC work.
4310   const int kMaxHint = 1000;
4311   const int kMinHintForIncrementalMarking = 10;
4312   // Minimal hint that allows to do full GC.
4313   const int kMinHintForFullGC = 100;
4314   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
4315   // The size factor is in range [5..250]. The numbers here are chosen from
4316   // experiments. If you changes them, make sure to test with
4317   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
4318   intptr_t step_size =
4319       size_factor * IncrementalMarking::kAllocatedThreshold;
4320
4321   if (contexts_disposed_ > 0) {
4322     contexts_disposed_ = 0;
4323     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
4324     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
4325         incremental_marking()->IsStopped()) {
4326       HistogramTimerScope scope(isolate_->counters()->gc_context());
4327       CollectAllGarbage(kReduceMemoryFootprintMask,
4328                         "idle notification: contexts disposed");
4329     } else {
4330       AdvanceIdleIncrementalMarking(step_size);
4331     }
4332
4333     // After context disposal there is likely a lot of garbage remaining, reset
4334     // the idle notification counters in order to trigger more incremental GCs
4335     // on subsequent idle notifications.
4336     StartIdleRound();
4337     return false;
4338   }
4339
4340   if (!FLAG_incremental_marking || isolate_->serializer_enabled()) {
4341     return IdleGlobalGC();
4342   }
4343
4344   // By doing small chunks of GC work in each IdleNotification,
4345   // perform a round of incremental GCs and after that wait until
4346   // the mutator creates enough garbage to justify a new round.
4347   // An incremental GC progresses as follows:
4348   // 1. many incremental marking steps,
4349   // 2. one old space mark-sweep-compact,
4350   // Use mark-sweep-compact events to count incremental GCs in a round.
4351
4352   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4353     if (EnoughGarbageSinceLastIdleRound()) {
4354       StartIdleRound();
4355     } else {
4356       return true;
4357     }
4358   }
4359
4360   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
4361                               mark_sweeps_since_idle_round_started_;
4362
4363   if (incremental_marking()->IsStopped()) {
4364     // If there are no more than two GCs left in this idle round and we are
4365     // allowed to do a full GC, then make those GCs full in order to compact
4366     // the code space.
4367     // TODO(ulan): Once we enable code compaction for incremental marking,
4368     // we can get rid of this special case and always start incremental marking.
4369     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
4370       CollectAllGarbage(kReduceMemoryFootprintMask,
4371                         "idle notification: finalize idle round");
4372       mark_sweeps_since_idle_round_started_++;
4373     } else if (hint > kMinHintForIncrementalMarking) {
4374       incremental_marking()->Start();
4375     }
4376   }
4377   if (!incremental_marking()->IsStopped() &&
4378       hint > kMinHintForIncrementalMarking) {
4379     AdvanceIdleIncrementalMarking(step_size);
4380   }
4381
4382   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
4383     FinishIdleRound();
4384     return true;
4385   }
4386
4387   // If the IdleNotifcation is called with a large hint we will wait for
4388   // the sweepter threads here.
4389   if (hint >= kMinHintForFullGC &&
4390       mark_compact_collector()->IsConcurrentSweepingInProgress()) {
4391     mark_compact_collector()->WaitUntilSweepingCompleted();
4392   }
4393
4394   return false;
4395 }
4396
4397
4398 bool Heap::IdleGlobalGC() {
4399   static const int kIdlesBeforeScavenge = 4;
4400   static const int kIdlesBeforeMarkSweep = 7;
4401   static const int kIdlesBeforeMarkCompact = 8;
4402   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
4403   static const unsigned int kGCsBetweenCleanup = 4;
4404
4405   if (!last_idle_notification_gc_count_init_) {
4406     last_idle_notification_gc_count_ = gc_count_;
4407     last_idle_notification_gc_count_init_ = true;
4408   }
4409
4410   bool uncommit = true;
4411   bool finished = false;
4412
4413   // Reset the number of idle notifications received when a number of
4414   // GCs have taken place. This allows another round of cleanup based
4415   // on idle notifications if enough work has been carried out to
4416   // provoke a number of garbage collections.
4417   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
4418     number_idle_notifications_ =
4419         Min(number_idle_notifications_ + 1, kMaxIdleCount);
4420   } else {
4421     number_idle_notifications_ = 0;
4422     last_idle_notification_gc_count_ = gc_count_;
4423   }
4424
4425   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
4426     CollectGarbage(NEW_SPACE, "idle notification");
4427     new_space_.Shrink();
4428     last_idle_notification_gc_count_ = gc_count_;
4429   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
4430     // Before doing the mark-sweep collections we clear the
4431     // compilation cache to avoid hanging on to source code and
4432     // generated code for cached functions.
4433     isolate_->compilation_cache()->Clear();
4434
4435     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
4436     new_space_.Shrink();
4437     last_idle_notification_gc_count_ = gc_count_;
4438
4439   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
4440     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
4441     new_space_.Shrink();
4442     last_idle_notification_gc_count_ = gc_count_;
4443     number_idle_notifications_ = 0;
4444     finished = true;
4445   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
4446     // If we have received more than kIdlesBeforeMarkCompact idle
4447     // notifications we do not perform any cleanup because we don't
4448     // expect to gain much by doing so.
4449     finished = true;
4450   }
4451
4452   if (uncommit) UncommitFromSpace();
4453
4454   return finished;
4455 }
4456
4457
4458 #ifdef DEBUG
4459
4460 void Heap::Print() {
4461   if (!HasBeenSetUp()) return;
4462   isolate()->PrintStack(stdout);
4463   AllSpaces spaces(this);
4464   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4465     space->Print();
4466   }
4467 }
4468
4469
4470 void Heap::ReportCodeStatistics(const char* title) {
4471   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4472   PagedSpace::ResetCodeStatistics(isolate());
4473   // We do not look for code in new space, map space, or old space.  If code
4474   // somehow ends up in those spaces, we would miss it here.
4475   code_space_->CollectCodeStatistics();
4476   lo_space_->CollectCodeStatistics();
4477   PagedSpace::ReportCodeStatistics(isolate());
4478 }
4479
4480
4481 // This function expects that NewSpace's allocated objects histogram is
4482 // populated (via a call to CollectStatistics or else as a side effect of a
4483 // just-completed scavenge collection).
4484 void Heap::ReportHeapStatistics(const char* title) {
4485   USE(title);
4486   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4487          title, gc_count_);
4488   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4489          old_generation_allocation_limit_);
4490
4491   PrintF("\n");
4492   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4493   isolate_->global_handles()->PrintStats();
4494   PrintF("\n");
4495
4496   PrintF("Heap statistics : ");
4497   isolate_->memory_allocator()->ReportStatistics();
4498   PrintF("To space : ");
4499   new_space_.ReportStatistics();
4500   PrintF("Old pointer space : ");
4501   old_pointer_space_->ReportStatistics();
4502   PrintF("Old data space : ");
4503   old_data_space_->ReportStatistics();
4504   PrintF("Code space : ");
4505   code_space_->ReportStatistics();
4506   PrintF("Map space : ");
4507   map_space_->ReportStatistics();
4508   PrintF("Cell space : ");
4509   cell_space_->ReportStatistics();
4510   PrintF("PropertyCell space : ");
4511   property_cell_space_->ReportStatistics();
4512   PrintF("Large object space : ");
4513   lo_space_->ReportStatistics();
4514   PrintF(">>>>>> ========================================= >>>>>>\n");
4515 }
4516
4517 #endif  // DEBUG
4518
4519 bool Heap::Contains(HeapObject* value) {
4520   return Contains(value->address());
4521 }
4522
4523
4524 bool Heap::Contains(Address addr) {
4525   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4526   return HasBeenSetUp() &&
4527     (new_space_.ToSpaceContains(addr) ||
4528      old_pointer_space_->Contains(addr) ||
4529      old_data_space_->Contains(addr) ||
4530      code_space_->Contains(addr) ||
4531      map_space_->Contains(addr) ||
4532      cell_space_->Contains(addr) ||
4533      property_cell_space_->Contains(addr) ||
4534      lo_space_->SlowContains(addr));
4535 }
4536
4537
4538 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4539   return InSpace(value->address(), space);
4540 }
4541
4542
4543 bool Heap::InSpace(Address addr, AllocationSpace space) {
4544   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4545   if (!HasBeenSetUp()) return false;
4546
4547   switch (space) {
4548     case NEW_SPACE:
4549       return new_space_.ToSpaceContains(addr);
4550     case OLD_POINTER_SPACE:
4551       return old_pointer_space_->Contains(addr);
4552     case OLD_DATA_SPACE:
4553       return old_data_space_->Contains(addr);
4554     case CODE_SPACE:
4555       return code_space_->Contains(addr);
4556     case MAP_SPACE:
4557       return map_space_->Contains(addr);
4558     case CELL_SPACE:
4559       return cell_space_->Contains(addr);
4560     case PROPERTY_CELL_SPACE:
4561       return property_cell_space_->Contains(addr);
4562     case LO_SPACE:
4563       return lo_space_->SlowContains(addr);
4564     case INVALID_SPACE:
4565       break;
4566   }
4567   UNREACHABLE();
4568   return false;
4569 }
4570
4571
4572 #ifdef VERIFY_HEAP
4573 void Heap::Verify() {
4574   CHECK(HasBeenSetUp());
4575   HandleScope scope(isolate());
4576
4577   store_buffer()->Verify();
4578
4579   VerifyPointersVisitor visitor;
4580   IterateRoots(&visitor, VISIT_ONLY_STRONG);
4581
4582   VerifySmisVisitor smis_visitor;
4583   IterateSmiRoots(&smis_visitor);
4584
4585   new_space_.Verify();
4586
4587   old_pointer_space_->Verify(&visitor);
4588   map_space_->Verify(&visitor);
4589
4590   VerifyPointersVisitor no_dirty_regions_visitor;
4591   old_data_space_->Verify(&no_dirty_regions_visitor);
4592   code_space_->Verify(&no_dirty_regions_visitor);
4593   cell_space_->Verify(&no_dirty_regions_visitor);
4594   property_cell_space_->Verify(&no_dirty_regions_visitor);
4595
4596   lo_space_->Verify();
4597 }
4598 #endif
4599
4600
4601 void Heap::ZapFromSpace() {
4602   NewSpacePageIterator it(new_space_.FromSpaceStart(),
4603                           new_space_.FromSpaceEnd());
4604   while (it.has_next()) {
4605     NewSpacePage* page = it.next();
4606     for (Address cursor = page->area_start(), limit = page->area_end();
4607          cursor < limit;
4608          cursor += kPointerSize) {
4609       Memory::Address_at(cursor) = kFromSpaceZapValue;
4610     }
4611   }
4612 }
4613
4614
4615 void Heap::IterateAndMarkPointersToFromSpace(Address start,
4616                                              Address end,
4617                                              ObjectSlotCallback callback) {
4618   Address slot_address = start;
4619
4620   // We are not collecting slots on new space objects during mutation
4621   // thus we have to scan for pointers to evacuation candidates when we
4622   // promote objects. But we should not record any slots in non-black
4623   // objects. Grey object's slots would be rescanned.
4624   // White object might not survive until the end of collection
4625   // it would be a violation of the invariant to record it's slots.
4626   bool record_slots = false;
4627   if (incremental_marking()->IsCompacting()) {
4628     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4629     record_slots = Marking::IsBlack(mark_bit);
4630   }
4631
4632   while (slot_address < end) {
4633     Object** slot = reinterpret_cast<Object**>(slot_address);
4634     Object* object = *slot;
4635     // If the store buffer becomes overfull we mark pages as being exempt from
4636     // the store buffer.  These pages are scanned to find pointers that point
4637     // to the new space.  In that case we may hit newly promoted objects and
4638     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
4639     if (object->IsHeapObject()) {
4640       if (Heap::InFromSpace(object)) {
4641         callback(reinterpret_cast<HeapObject**>(slot),
4642                  HeapObject::cast(object));
4643         Object* new_object = *slot;
4644         if (InNewSpace(new_object)) {
4645           SLOW_ASSERT(Heap::InToSpace(new_object));
4646           SLOW_ASSERT(new_object->IsHeapObject());
4647           store_buffer_.EnterDirectlyIntoStoreBuffer(
4648               reinterpret_cast<Address>(slot));
4649         }
4650         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4651       } else if (record_slots &&
4652                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4653         mark_compact_collector()->RecordSlot(slot, slot, object);
4654       }
4655     }
4656     slot_address += kPointerSize;
4657   }
4658 }
4659
4660
4661 #ifdef DEBUG
4662 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4663
4664
4665 bool IsAMapPointerAddress(Object** addr) {
4666   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4667   int mod = a % Map::kSize;
4668   return mod >= Map::kPointerFieldsBeginOffset &&
4669          mod < Map::kPointerFieldsEndOffset;
4670 }
4671
4672
4673 bool EverythingsAPointer(Object** addr) {
4674   return true;
4675 }
4676
4677
4678 static void CheckStoreBuffer(Heap* heap,
4679                              Object** current,
4680                              Object** limit,
4681                              Object**** store_buffer_position,
4682                              Object*** store_buffer_top,
4683                              CheckStoreBufferFilter filter,
4684                              Address special_garbage_start,
4685                              Address special_garbage_end) {
4686   Map* free_space_map = heap->free_space_map();
4687   for ( ; current < limit; current++) {
4688     Object* o = *current;
4689     Address current_address = reinterpret_cast<Address>(current);
4690     // Skip free space.
4691     if (o == free_space_map) {
4692       Address current_address = reinterpret_cast<Address>(current);
4693       FreeSpace* free_space =
4694           FreeSpace::cast(HeapObject::FromAddress(current_address));
4695       int skip = free_space->Size();
4696       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
4697       ASSERT(skip > 0);
4698       current_address += skip - kPointerSize;
4699       current = reinterpret_cast<Object**>(current_address);
4700       continue;
4701     }
4702     // Skip the current linear allocation space between top and limit which is
4703     // unmarked with the free space map, but can contain junk.
4704     if (current_address == special_garbage_start &&
4705         special_garbage_end != special_garbage_start) {
4706       current_address = special_garbage_end - kPointerSize;
4707       current = reinterpret_cast<Object**>(current_address);
4708       continue;
4709     }
4710     if (!(*filter)(current)) continue;
4711     ASSERT(current_address < special_garbage_start ||
4712            current_address >= special_garbage_end);
4713     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4714     // We have to check that the pointer does not point into new space
4715     // without trying to cast it to a heap object since the hash field of
4716     // a string can contain values like 1 and 3 which are tagged null
4717     // pointers.
4718     if (!heap->InNewSpace(o)) continue;
4719     while (**store_buffer_position < current &&
4720            *store_buffer_position < store_buffer_top) {
4721       (*store_buffer_position)++;
4722     }
4723     if (**store_buffer_position != current ||
4724         *store_buffer_position == store_buffer_top) {
4725       Object** obj_start = current;
4726       while (!(*obj_start)->IsMap()) obj_start--;
4727       UNREACHABLE();
4728     }
4729   }
4730 }
4731
4732
4733 // Check that the store buffer contains all intergenerational pointers by
4734 // scanning a page and ensuring that all pointers to young space are in the
4735 // store buffer.
4736 void Heap::OldPointerSpaceCheckStoreBuffer() {
4737   OldSpace* space = old_pointer_space();
4738   PageIterator pages(space);
4739
4740   store_buffer()->SortUniq();
4741
4742   while (pages.has_next()) {
4743     Page* page = pages.next();
4744     Object** current = reinterpret_cast<Object**>(page->area_start());
4745
4746     Address end = page->area_end();
4747
4748     Object*** store_buffer_position = store_buffer()->Start();
4749     Object*** store_buffer_top = store_buffer()->Top();
4750
4751     Object** limit = reinterpret_cast<Object**>(end);
4752     CheckStoreBuffer(this,
4753                      current,
4754                      limit,
4755                      &store_buffer_position,
4756                      store_buffer_top,
4757                      &EverythingsAPointer,
4758                      space->top(),
4759                      space->limit());
4760   }
4761 }
4762
4763
4764 void Heap::MapSpaceCheckStoreBuffer() {
4765   MapSpace* space = map_space();
4766   PageIterator pages(space);
4767
4768   store_buffer()->SortUniq();
4769
4770   while (pages.has_next()) {
4771     Page* page = pages.next();
4772     Object** current = reinterpret_cast<Object**>(page->area_start());
4773
4774     Address end = page->area_end();
4775
4776     Object*** store_buffer_position = store_buffer()->Start();
4777     Object*** store_buffer_top = store_buffer()->Top();
4778
4779     Object** limit = reinterpret_cast<Object**>(end);
4780     CheckStoreBuffer(this,
4781                      current,
4782                      limit,
4783                      &store_buffer_position,
4784                      store_buffer_top,
4785                      &IsAMapPointerAddress,
4786                      space->top(),
4787                      space->limit());
4788   }
4789 }
4790
4791
4792 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4793   LargeObjectIterator it(lo_space());
4794   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4795     // We only have code, sequential strings, or fixed arrays in large
4796     // object space, and only fixed arrays can possibly contain pointers to
4797     // the young generation.
4798     if (object->IsFixedArray()) {
4799       Object*** store_buffer_position = store_buffer()->Start();
4800       Object*** store_buffer_top = store_buffer()->Top();
4801       Object** current = reinterpret_cast<Object**>(object->address());
4802       Object** limit =
4803           reinterpret_cast<Object**>(object->address() + object->Size());
4804       CheckStoreBuffer(this,
4805                        current,
4806                        limit,
4807                        &store_buffer_position,
4808                        store_buffer_top,
4809                        &EverythingsAPointer,
4810                        NULL,
4811                        NULL);
4812     }
4813   }
4814 }
4815 #endif
4816
4817
4818 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4819   IterateStrongRoots(v, mode);
4820   IterateWeakRoots(v, mode);
4821 }
4822
4823
4824 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4825   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4826   v->Synchronize(VisitorSynchronization::kStringTable);
4827   if (mode != VISIT_ALL_IN_SCAVENGE &&
4828       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4829     // Scavenge collections have special processing for this.
4830     external_string_table_.Iterate(v);
4831   }
4832   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4833 }
4834
4835
4836 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4837   // Acquire execution access since we are going to read stack limit values.
4838   ExecutionAccess access(isolate());
4839   v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4840   v->Synchronize(VisitorSynchronization::kSmiRootList);
4841 }
4842
4843
4844 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4845   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4846   v->Synchronize(VisitorSynchronization::kStrongRootList);
4847
4848   v->VisitPointer(BitCast<Object**>(&hidden_string_));
4849   v->Synchronize(VisitorSynchronization::kInternalizedString);
4850
4851   isolate_->bootstrapper()->Iterate(v);
4852   v->Synchronize(VisitorSynchronization::kBootstrapper);
4853   isolate_->Iterate(v);
4854   v->Synchronize(VisitorSynchronization::kTop);
4855   Relocatable::Iterate(isolate_, v);
4856   v->Synchronize(VisitorSynchronization::kRelocatable);
4857
4858   if (isolate_->deoptimizer_data() != NULL) {
4859     isolate_->deoptimizer_data()->Iterate(v);
4860   }
4861   v->Synchronize(VisitorSynchronization::kDebug);
4862   isolate_->compilation_cache()->Iterate(v);
4863   v->Synchronize(VisitorSynchronization::kCompilationCache);
4864
4865   // Iterate over local handles in handle scopes.
4866   isolate_->handle_scope_implementer()->Iterate(v);
4867   isolate_->IterateDeferredHandles(v);
4868   v->Synchronize(VisitorSynchronization::kHandleScope);
4869
4870   // Iterate over the builtin code objects and code stubs in the
4871   // heap. Note that it is not necessary to iterate over code objects
4872   // on scavenge collections.
4873   if (mode != VISIT_ALL_IN_SCAVENGE) {
4874     isolate_->builtins()->IterateBuiltins(v);
4875   }
4876   v->Synchronize(VisitorSynchronization::kBuiltins);
4877
4878   // Iterate over global handles.
4879   switch (mode) {
4880     case VISIT_ONLY_STRONG:
4881       isolate_->global_handles()->IterateStrongRoots(v);
4882       break;
4883     case VISIT_ALL_IN_SCAVENGE:
4884       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4885       break;
4886     case VISIT_ALL_IN_SWEEP_NEWSPACE:
4887     case VISIT_ALL:
4888       isolate_->global_handles()->IterateAllRoots(v);
4889       break;
4890   }
4891   v->Synchronize(VisitorSynchronization::kGlobalHandles);
4892
4893   // Iterate over eternal handles.
4894   if (mode == VISIT_ALL_IN_SCAVENGE) {
4895     isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4896   } else {
4897     isolate_->eternal_handles()->IterateAllRoots(v);
4898   }
4899   v->Synchronize(VisitorSynchronization::kEternalHandles);
4900
4901   // Iterate over pointers being held by inactive threads.
4902   isolate_->thread_manager()->Iterate(v);
4903   v->Synchronize(VisitorSynchronization::kThreadManager);
4904
4905   // Iterate over the pointers the Serialization/Deserialization code is
4906   // holding.
4907   // During garbage collection this keeps the partial snapshot cache alive.
4908   // During deserialization of the startup snapshot this creates the partial
4909   // snapshot cache and deserializes the objects it refers to.  During
4910   // serialization this does nothing, since the partial snapshot cache is
4911   // empty.  However the next thing we do is create the partial snapshot,
4912   // filling up the partial snapshot cache with objects it needs as we go.
4913   SerializerDeserializer::Iterate(isolate_, v);
4914   // We don't do a v->Synchronize call here, because in debug mode that will
4915   // output a flag to the snapshot.  However at this point the serializer and
4916   // deserializer are deliberately a little unsynchronized (see above) so the
4917   // checking of the sync flag in the snapshot would fail.
4918 }
4919
4920
4921 // TODO(1236194): Since the heap size is configurable on the command line
4922 // and through the API, we should gracefully handle the case that the heap
4923 // size is not big enough to fit all the initial objects.
4924 bool Heap::ConfigureHeap(int max_semi_space_size,
4925                          int max_old_space_size,
4926                          int max_executable_size,
4927                          size_t code_range_size) {
4928   if (HasBeenSetUp()) return false;
4929
4930   // Overwrite default configuration.
4931   if (max_semi_space_size > 0) {
4932     max_semi_space_size_ = max_semi_space_size * MB;
4933   }
4934   if (max_old_space_size > 0) {
4935     max_old_generation_size_ = max_old_space_size * MB;
4936   }
4937   if (max_executable_size > 0) {
4938     max_executable_size_ = max_executable_size * MB;
4939   }
4940
4941   // If max space size flags are specified overwrite the configuration.
4942   if (FLAG_max_semi_space_size > 0) {
4943     max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4944   }
4945   if (FLAG_max_old_space_size > 0) {
4946     max_old_generation_size_ = FLAG_max_old_space_size * MB;
4947   }
4948   if (FLAG_max_executable_size > 0) {
4949     max_executable_size_ = FLAG_max_executable_size * MB;
4950   }
4951
4952   if (FLAG_stress_compaction) {
4953     // This will cause more frequent GCs when stressing.
4954     max_semi_space_size_ = Page::kPageSize;
4955   }
4956
4957   if (Snapshot::IsEnabled()) {
4958     // If we are using a snapshot we always reserve the default amount
4959     // of memory for each semispace because code in the snapshot has
4960     // write-barrier code that relies on the size and alignment of new
4961     // space.  We therefore cannot use a larger max semispace size
4962     // than the default reserved semispace size.
4963     if (max_semi_space_size_ > reserved_semispace_size_) {
4964       max_semi_space_size_ = reserved_semispace_size_;
4965       if (FLAG_trace_gc) {
4966         PrintPID("Max semi-space size cannot be more than %d kbytes\n",
4967                  reserved_semispace_size_ >> 10);
4968       }
4969     }
4970   } else {
4971     // If we are not using snapshots we reserve space for the actual
4972     // max semispace size.
4973     reserved_semispace_size_ = max_semi_space_size_;
4974   }
4975
4976   // The max executable size must be less than or equal to the max old
4977   // generation size.
4978   if (max_executable_size_ > max_old_generation_size_) {
4979     max_executable_size_ = max_old_generation_size_;
4980   }
4981
4982   // The new space size must be a power of two to support single-bit testing
4983   // for containment.
4984   max_semi_space_size_ = RoundUpToPowerOf2(max_semi_space_size_);
4985   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
4986
4987   if (FLAG_min_semi_space_size > 0) {
4988     int initial_semispace_size = FLAG_min_semi_space_size * MB;
4989     if (initial_semispace_size > max_semi_space_size_) {
4990       initial_semispace_size_ = max_semi_space_size_;
4991       if (FLAG_trace_gc) {
4992         PrintPID("Min semi-space size cannot be more than the maximum"
4993                  "semi-space size of %d MB\n", max_semi_space_size_);
4994       }
4995     } else {
4996       initial_semispace_size_ = initial_semispace_size;
4997     }
4998   }
4999
5000   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
5001
5002   // The old generation is paged and needs at least one page for each space.
5003   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5004   max_old_generation_size_ =
5005       Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
5006           max_old_generation_size_);
5007
5008   // We rely on being able to allocate new arrays in paged spaces.
5009   ASSERT(Page::kMaxRegularHeapObjectSize >=
5010          (JSArray::kSize +
5011           FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
5012           AllocationMemento::kSize));
5013
5014   code_range_size_ = code_range_size * MB;
5015
5016   configured_ = true;
5017   return true;
5018 }
5019
5020
5021 bool Heap::ConfigureHeapDefault() {
5022   return ConfigureHeap(0, 0, 0, 0);
5023 }
5024
5025
5026 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5027   *stats->start_marker = HeapStats::kStartMarker;
5028   *stats->end_marker = HeapStats::kEndMarker;
5029   *stats->new_space_size = new_space_.SizeAsInt();
5030   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5031   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
5032   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5033   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
5034   *stats->old_data_space_capacity = old_data_space_->Capacity();
5035   *stats->code_space_size = code_space_->SizeOfObjects();
5036   *stats->code_space_capacity = code_space_->Capacity();
5037   *stats->map_space_size = map_space_->SizeOfObjects();
5038   *stats->map_space_capacity = map_space_->Capacity();
5039   *stats->cell_space_size = cell_space_->SizeOfObjects();
5040   *stats->cell_space_capacity = cell_space_->Capacity();
5041   *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
5042   *stats->property_cell_space_capacity = property_cell_space_->Capacity();
5043   *stats->lo_space_size = lo_space_->Size();
5044   isolate_->global_handles()->RecordStats(stats);
5045   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5046   *stats->memory_allocator_capacity =
5047       isolate()->memory_allocator()->Size() +
5048       isolate()->memory_allocator()->Available();
5049   *stats->os_error = OS::GetLastError();
5050       isolate()->memory_allocator()->Available();
5051   if (take_snapshot) {
5052     HeapIterator iterator(this);
5053     for (HeapObject* obj = iterator.next();
5054          obj != NULL;
5055          obj = iterator.next()) {
5056       InstanceType type = obj->map()->instance_type();
5057       ASSERT(0 <= type && type <= LAST_TYPE);
5058       stats->objects_per_type[type]++;
5059       stats->size_per_type[type] += obj->Size();
5060     }
5061   }
5062 }
5063
5064
5065 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5066   return old_pointer_space_->SizeOfObjects()
5067       + old_data_space_->SizeOfObjects()
5068       + code_space_->SizeOfObjects()
5069       + map_space_->SizeOfObjects()
5070       + cell_space_->SizeOfObjects()
5071       + property_cell_space_->SizeOfObjects()
5072       + lo_space_->SizeOfObjects();
5073 }
5074
5075
5076 int64_t Heap::PromotedExternalMemorySize() {
5077   if (amount_of_external_allocated_memory_
5078       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
5079   return amount_of_external_allocated_memory_
5080       - amount_of_external_allocated_memory_at_last_global_gc_;
5081 }
5082
5083
5084 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
5085                                             int freed_global_handles) {
5086   const int kMaxHandles = 1000;
5087   const int kMinHandles = 100;
5088   double min_factor = 1.1;
5089   double max_factor = 4;
5090   // We set the old generation growing factor to 2 to grow the heap slower on
5091   // memory-constrained devices.
5092   if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5093     max_factor = 2;
5094   }
5095   // If there are many freed global handles, then the next full GC will
5096   // likely collect a lot of garbage. Choose the heap growing factor
5097   // depending on freed global handles.
5098   // TODO(ulan, hpayer): Take into account mutator utilization.
5099   double factor;
5100   if (freed_global_handles <= kMinHandles) {
5101     factor = max_factor;
5102   } else if (freed_global_handles >= kMaxHandles) {
5103     factor = min_factor;
5104   } else {
5105     // Compute factor using linear interpolation between points
5106     // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5107     factor = max_factor -
5108              (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5109              (kMaxHandles - kMinHandles);
5110   }
5111
5112   if (FLAG_stress_compaction ||
5113       mark_compact_collector()->reduce_memory_footprint_) {
5114     factor = min_factor;
5115   }
5116
5117   intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5118   limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5119   limit += new_space_.Capacity();
5120   intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5121   return Min(limit, halfway_to_the_max);
5122 }
5123
5124
5125 void Heap::EnableInlineAllocation() {
5126   if (!inline_allocation_disabled_) return;
5127   inline_allocation_disabled_ = false;
5128
5129   // Update inline allocation limit for new space.
5130   new_space()->UpdateInlineAllocationLimit(0);
5131 }
5132
5133
5134 void Heap::DisableInlineAllocation() {
5135   if (inline_allocation_disabled_) return;
5136   inline_allocation_disabled_ = true;
5137
5138   // Update inline allocation limit for new space.
5139   new_space()->UpdateInlineAllocationLimit(0);
5140
5141   // Update inline allocation limit for old spaces.
5142   PagedSpaces spaces(this);
5143   for (PagedSpace* space = spaces.next();
5144        space != NULL;
5145        space = spaces.next()) {
5146     space->EmptyAllocationInfo();
5147   }
5148 }
5149
5150
5151 V8_DECLARE_ONCE(initialize_gc_once);
5152
5153 static void InitializeGCOnce() {
5154   InitializeScavengingVisitorsTables();
5155   NewSpaceScavenger::Initialize();
5156   MarkCompactCollector::Initialize();
5157 }
5158
5159
5160 bool Heap::SetUp() {
5161 #ifdef DEBUG
5162   allocation_timeout_ = FLAG_gc_interval;
5163 #endif
5164
5165   // Initialize heap spaces and initial maps and objects. Whenever something
5166   // goes wrong, just return false. The caller should check the results and
5167   // call Heap::TearDown() to release allocated memory.
5168   //
5169   // If the heap is not yet configured (e.g. through the API), configure it.
5170   // Configuration is based on the flags new-space-size (really the semispace
5171   // size) and old-space-size if set or the initial values of semispace_size_
5172   // and old_generation_size_ otherwise.
5173   if (!configured_) {
5174     if (!ConfigureHeapDefault()) return false;
5175   }
5176
5177   base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5178
5179   MarkMapPointersAsEncoded(false);
5180
5181   // Set up memory allocator.
5182   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5183       return false;
5184
5185   // Set up new space.
5186   if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5187     return false;
5188   }
5189   new_space_top_after_last_gc_ = new_space()->top();
5190
5191   // Initialize old pointer space.
5192   old_pointer_space_ =
5193       new OldSpace(this,
5194                    max_old_generation_size_,
5195                    OLD_POINTER_SPACE,
5196                    NOT_EXECUTABLE);
5197   if (old_pointer_space_ == NULL) return false;
5198   if (!old_pointer_space_->SetUp()) return false;
5199
5200   // Initialize old data space.
5201   old_data_space_ =
5202       new OldSpace(this,
5203                    max_old_generation_size_,
5204                    OLD_DATA_SPACE,
5205                    NOT_EXECUTABLE);
5206   if (old_data_space_ == NULL) return false;
5207   if (!old_data_space_->SetUp()) return false;
5208
5209   if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5210
5211   // Initialize the code space, set its maximum capacity to the old
5212   // generation size. It needs executable memory.
5213   code_space_ =
5214       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5215   if (code_space_ == NULL) return false;
5216   if (!code_space_->SetUp()) return false;
5217
5218   // Initialize map space.
5219   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5220   if (map_space_ == NULL) return false;
5221   if (!map_space_->SetUp()) return false;
5222
5223   // Initialize simple cell space.
5224   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5225   if (cell_space_ == NULL) return false;
5226   if (!cell_space_->SetUp()) return false;
5227
5228   // Initialize global property cell space.
5229   property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5230                                                PROPERTY_CELL_SPACE);
5231   if (property_cell_space_ == NULL) return false;
5232   if (!property_cell_space_->SetUp()) return false;
5233
5234   // The large object code space may contain code or data.  We set the memory
5235   // to be non-executable here for safety, but this means we need to enable it
5236   // explicitly when allocating large code objects.
5237   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5238   if (lo_space_ == NULL) return false;
5239   if (!lo_space_->SetUp()) return false;
5240
5241   // Set up the seed that is used to randomize the string hash function.
5242   ASSERT(hash_seed() == 0);
5243   if (FLAG_randomize_hashes) {
5244     if (FLAG_hash_seed == 0) {
5245       int rnd = isolate()->random_number_generator()->NextInt();
5246       set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5247     } else {
5248       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5249     }
5250   }
5251
5252   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5253   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5254
5255   store_buffer()->SetUp();
5256
5257   mark_compact_collector()->SetUp();
5258
5259   return true;
5260 }
5261
5262
5263 bool Heap::CreateHeapObjects() {
5264   // Create initial maps.
5265   if (!CreateInitialMaps()) return false;
5266   CreateApiObjects();
5267
5268   // Create initial objects
5269   CreateInitialObjects();
5270   CHECK_EQ(0, gc_count_);
5271
5272   set_native_contexts_list(undefined_value());
5273   set_array_buffers_list(undefined_value());
5274   set_allocation_sites_list(undefined_value());
5275   weak_object_to_code_table_ = undefined_value();
5276   return true;
5277 }
5278
5279
5280 void Heap::SetStackLimits() {
5281   ASSERT(isolate_ != NULL);
5282   ASSERT(isolate_ == isolate());
5283   // On 64 bit machines, pointers are generally out of range of Smis.  We write
5284   // something that looks like an out of range Smi to the GC.
5285
5286   // Set up the special root array entries containing the stack limits.
5287   // These are actually addresses, but the tag makes the GC ignore it.
5288   roots_[kStackLimitRootIndex] =
5289       reinterpret_cast<Object*>(
5290           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5291   roots_[kRealStackLimitRootIndex] =
5292       reinterpret_cast<Object*>(
5293           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5294 }
5295
5296
5297 void Heap::TearDown() {
5298 #ifdef VERIFY_HEAP
5299   if (FLAG_verify_heap) {
5300     Verify();
5301   }
5302 #endif
5303
5304   UpdateMaximumCommitted();
5305
5306   if (FLAG_print_cumulative_gc_stat) {
5307     PrintF("\n");
5308     PrintF("gc_count=%d ", gc_count_);
5309     PrintF("mark_sweep_count=%d ", ms_count_);
5310     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5311     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5312     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5313     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
5314            get_max_alive_after_gc());
5315     PrintF("total_marking_time=%.1f ", marking_time());
5316     PrintF("total_sweeping_time=%.1f ", sweeping_time());
5317     PrintF("\n\n");
5318   }
5319
5320   if (FLAG_print_max_heap_committed) {
5321     PrintF("\n");
5322     PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5323       MaximumCommittedMemory());
5324     PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5325       new_space_.MaximumCommittedMemory());
5326     PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5327       old_data_space_->MaximumCommittedMemory());
5328     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5329       old_pointer_space_->MaximumCommittedMemory());
5330     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5331       old_pointer_space_->MaximumCommittedMemory());
5332     PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5333       code_space_->MaximumCommittedMemory());
5334     PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5335       map_space_->MaximumCommittedMemory());
5336     PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5337       cell_space_->MaximumCommittedMemory());
5338     PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5339       property_cell_space_->MaximumCommittedMemory());
5340     PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5341       lo_space_->MaximumCommittedMemory());
5342     PrintF("\n\n");
5343   }
5344
5345   TearDownArrayBuffers();
5346
5347   isolate_->global_handles()->TearDown();
5348
5349   external_string_table_.TearDown();
5350
5351   mark_compact_collector()->TearDown();
5352
5353   new_space_.TearDown();
5354
5355   if (old_pointer_space_ != NULL) {
5356     old_pointer_space_->TearDown();
5357     delete old_pointer_space_;
5358     old_pointer_space_ = NULL;
5359   }
5360
5361   if (old_data_space_ != NULL) {
5362     old_data_space_->TearDown();
5363     delete old_data_space_;
5364     old_data_space_ = NULL;
5365   }
5366
5367   if (code_space_ != NULL) {
5368     code_space_->TearDown();
5369     delete code_space_;
5370     code_space_ = NULL;
5371   }
5372
5373   if (map_space_ != NULL) {
5374     map_space_->TearDown();
5375     delete map_space_;
5376     map_space_ = NULL;
5377   }
5378
5379   if (cell_space_ != NULL) {
5380     cell_space_->TearDown();
5381     delete cell_space_;
5382     cell_space_ = NULL;
5383   }
5384
5385   if (property_cell_space_ != NULL) {
5386     property_cell_space_->TearDown();
5387     delete property_cell_space_;
5388     property_cell_space_ = NULL;
5389   }
5390
5391   if (lo_space_ != NULL) {
5392     lo_space_->TearDown();
5393     delete lo_space_;
5394     lo_space_ = NULL;
5395   }
5396
5397   store_buffer()->TearDown();
5398   incremental_marking()->TearDown();
5399
5400   isolate_->memory_allocator()->TearDown();
5401 }
5402
5403
5404 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5405                                  GCType gc_type,
5406                                  bool pass_isolate) {
5407   ASSERT(callback != NULL);
5408   GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5409   ASSERT(!gc_prologue_callbacks_.Contains(pair));
5410   return gc_prologue_callbacks_.Add(pair);
5411 }
5412
5413
5414 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5415   ASSERT(callback != NULL);
5416   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5417     if (gc_prologue_callbacks_[i].callback == callback) {
5418       gc_prologue_callbacks_.Remove(i);
5419       return;
5420     }
5421   }
5422   UNREACHABLE();
5423 }
5424
5425
5426 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5427                                  GCType gc_type,
5428                                  bool pass_isolate) {
5429   ASSERT(callback != NULL);
5430   GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5431   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
5432   return gc_epilogue_callbacks_.Add(pair);
5433 }
5434
5435
5436 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5437   ASSERT(callback != NULL);
5438   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5439     if (gc_epilogue_callbacks_[i].callback == callback) {
5440       gc_epilogue_callbacks_.Remove(i);
5441       return;
5442     }
5443   }
5444   UNREACHABLE();
5445 }
5446
5447
5448 // TODO(ishell): Find a better place for this.
5449 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
5450                                          Handle<DependentCode> dep) {
5451   ASSERT(!InNewSpace(*obj));
5452   ASSERT(!InNewSpace(*dep));
5453   // This handle scope keeps the table handle local to this function, which
5454   // allows us to safely skip write barriers in table update operations.
5455   HandleScope scope(isolate());
5456   Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
5457                               isolate());
5458   table = WeakHashTable::Put(table, obj, dep);
5459
5460   if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
5461     WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
5462   }
5463   set_weak_object_to_code_table(*table);
5464   ASSERT_EQ(*dep, table->Lookup(obj));
5465 }
5466
5467
5468 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
5469   Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
5470   if (dep->IsDependentCode()) return DependentCode::cast(dep);
5471   return DependentCode::cast(empty_fixed_array());
5472 }
5473
5474
5475 void Heap::EnsureWeakObjectToCodeTable() {
5476   if (!weak_object_to_code_table()->IsHashTable()) {
5477     set_weak_object_to_code_table(*WeakHashTable::New(
5478         isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY, TENURED));
5479   }
5480 }
5481
5482
5483 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5484   v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5485 }
5486
5487 #ifdef DEBUG
5488
5489 class PrintHandleVisitor: public ObjectVisitor {
5490  public:
5491   void VisitPointers(Object** start, Object** end) {
5492     for (Object** p = start; p < end; p++)
5493       PrintF("  handle %p to %p\n",
5494              reinterpret_cast<void*>(p),
5495              reinterpret_cast<void*>(*p));
5496   }
5497 };
5498
5499
5500 void Heap::PrintHandles() {
5501   PrintF("Handles:\n");
5502   PrintHandleVisitor v;
5503   isolate_->handle_scope_implementer()->Iterate(&v);
5504 }
5505
5506 #endif
5507
5508
5509 Space* AllSpaces::next() {
5510   switch (counter_++) {
5511     case NEW_SPACE:
5512       return heap_->new_space();
5513     case OLD_POINTER_SPACE:
5514       return heap_->old_pointer_space();
5515     case OLD_DATA_SPACE:
5516       return heap_->old_data_space();
5517     case CODE_SPACE:
5518       return heap_->code_space();
5519     case MAP_SPACE:
5520       return heap_->map_space();
5521     case CELL_SPACE:
5522       return heap_->cell_space();
5523     case PROPERTY_CELL_SPACE:
5524       return heap_->property_cell_space();
5525     case LO_SPACE:
5526       return heap_->lo_space();
5527     default:
5528       return NULL;
5529   }
5530 }
5531
5532
5533 PagedSpace* PagedSpaces::next() {
5534   switch (counter_++) {
5535     case OLD_POINTER_SPACE:
5536       return heap_->old_pointer_space();
5537     case OLD_DATA_SPACE:
5538       return heap_->old_data_space();
5539     case CODE_SPACE:
5540       return heap_->code_space();
5541     case MAP_SPACE:
5542       return heap_->map_space();
5543     case CELL_SPACE:
5544       return heap_->cell_space();
5545     case PROPERTY_CELL_SPACE:
5546       return heap_->property_cell_space();
5547     default:
5548       return NULL;
5549   }
5550 }
5551
5552
5553
5554 OldSpace* OldSpaces::next() {
5555   switch (counter_++) {
5556     case OLD_POINTER_SPACE:
5557       return heap_->old_pointer_space();
5558     case OLD_DATA_SPACE:
5559       return heap_->old_data_space();
5560     case CODE_SPACE:
5561       return heap_->code_space();
5562     default:
5563       return NULL;
5564   }
5565 }
5566
5567
5568 SpaceIterator::SpaceIterator(Heap* heap)
5569     : heap_(heap),
5570       current_space_(FIRST_SPACE),
5571       iterator_(NULL),
5572       size_func_(NULL) {
5573 }
5574
5575
5576 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5577     : heap_(heap),
5578       current_space_(FIRST_SPACE),
5579       iterator_(NULL),
5580       size_func_(size_func) {
5581 }
5582
5583
5584 SpaceIterator::~SpaceIterator() {
5585   // Delete active iterator if any.
5586   delete iterator_;
5587 }
5588
5589
5590 bool SpaceIterator::has_next() {
5591   // Iterate until no more spaces.
5592   return current_space_ != LAST_SPACE;
5593 }
5594
5595
5596 ObjectIterator* SpaceIterator::next() {
5597   if (iterator_ != NULL) {
5598     delete iterator_;
5599     iterator_ = NULL;
5600     // Move to the next space
5601     current_space_++;
5602     if (current_space_ > LAST_SPACE) {
5603       return NULL;
5604     }
5605   }
5606
5607   // Return iterator for the new current space.
5608   return CreateIterator();
5609 }
5610
5611
5612 // Create an iterator for the space to iterate.
5613 ObjectIterator* SpaceIterator::CreateIterator() {
5614   ASSERT(iterator_ == NULL);
5615
5616   switch (current_space_) {
5617     case NEW_SPACE:
5618       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5619       break;
5620     case OLD_POINTER_SPACE:
5621       iterator_ =
5622           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5623       break;
5624     case OLD_DATA_SPACE:
5625       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5626       break;
5627     case CODE_SPACE:
5628       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5629       break;
5630     case MAP_SPACE:
5631       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5632       break;
5633     case CELL_SPACE:
5634       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5635       break;
5636     case PROPERTY_CELL_SPACE:
5637       iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
5638                                          size_func_);
5639       break;
5640     case LO_SPACE:
5641       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5642       break;
5643   }
5644
5645   // Return the newly allocated iterator;
5646   ASSERT(iterator_ != NULL);
5647   return iterator_;
5648 }
5649
5650
5651 class HeapObjectsFilter {
5652  public:
5653   virtual ~HeapObjectsFilter() {}
5654   virtual bool SkipObject(HeapObject* object) = 0;
5655 };
5656
5657
5658 class UnreachableObjectsFilter : public HeapObjectsFilter {
5659  public:
5660   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5661     MarkReachableObjects();
5662   }
5663
5664   ~UnreachableObjectsFilter() {
5665     heap_->mark_compact_collector()->ClearMarkbits();
5666   }
5667
5668   bool SkipObject(HeapObject* object) {
5669     MarkBit mark_bit = Marking::MarkBitFrom(object);
5670     return !mark_bit.Get();
5671   }
5672
5673  private:
5674   class MarkingVisitor : public ObjectVisitor {
5675    public:
5676     MarkingVisitor() : marking_stack_(10) {}
5677
5678     void VisitPointers(Object** start, Object** end) {
5679       for (Object** p = start; p < end; p++) {
5680         if (!(*p)->IsHeapObject()) continue;
5681         HeapObject* obj = HeapObject::cast(*p);
5682         MarkBit mark_bit = Marking::MarkBitFrom(obj);
5683         if (!mark_bit.Get()) {
5684           mark_bit.Set();
5685           marking_stack_.Add(obj);
5686         }
5687       }
5688     }
5689
5690     void TransitiveClosure() {
5691       while (!marking_stack_.is_empty()) {
5692         HeapObject* obj = marking_stack_.RemoveLast();
5693         obj->Iterate(this);
5694       }
5695     }
5696
5697    private:
5698     List<HeapObject*> marking_stack_;
5699   };
5700
5701   void MarkReachableObjects() {
5702     MarkingVisitor visitor;
5703     heap_->IterateRoots(&visitor, VISIT_ALL);
5704     visitor.TransitiveClosure();
5705   }
5706
5707   Heap* heap_;
5708   DisallowHeapAllocation no_allocation_;
5709 };
5710
5711
5712 HeapIterator::HeapIterator(Heap* heap)
5713     : make_heap_iterable_helper_(heap),
5714       no_heap_allocation_(),
5715       heap_(heap),
5716       filtering_(HeapIterator::kNoFiltering),
5717       filter_(NULL) {
5718   Init();
5719 }
5720
5721
5722 HeapIterator::HeapIterator(Heap* heap,
5723                            HeapIterator::HeapObjectsFiltering filtering)
5724     : make_heap_iterable_helper_(heap),
5725       no_heap_allocation_(),
5726       heap_(heap),
5727       filtering_(filtering),
5728       filter_(NULL) {
5729   Init();
5730 }
5731
5732
5733 HeapIterator::~HeapIterator() {
5734   Shutdown();
5735 }
5736
5737
5738 void HeapIterator::Init() {
5739   // Start the iteration.
5740   space_iterator_ = new SpaceIterator(heap_);
5741   switch (filtering_) {
5742     case kFilterUnreachable:
5743       filter_ = new UnreachableObjectsFilter(heap_);
5744       break;
5745     default:
5746       break;
5747   }
5748   object_iterator_ = space_iterator_->next();
5749 }
5750
5751
5752 void HeapIterator::Shutdown() {
5753 #ifdef DEBUG
5754   // Assert that in filtering mode we have iterated through all
5755   // objects. Otherwise, heap will be left in an inconsistent state.
5756   if (filtering_ != kNoFiltering) {
5757     ASSERT(object_iterator_ == NULL);
5758   }
5759 #endif
5760   // Make sure the last iterator is deallocated.
5761   delete space_iterator_;
5762   space_iterator_ = NULL;
5763   object_iterator_ = NULL;
5764   delete filter_;
5765   filter_ = NULL;
5766 }
5767
5768
5769 HeapObject* HeapIterator::next() {
5770   if (filter_ == NULL) return NextObject();
5771
5772   HeapObject* obj = NextObject();
5773   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5774   return obj;
5775 }
5776
5777
5778 HeapObject* HeapIterator::NextObject() {
5779   // No iterator means we are done.
5780   if (object_iterator_ == NULL) return NULL;
5781
5782   if (HeapObject* obj = object_iterator_->next_object()) {
5783     // If the current iterator has more objects we are fine.
5784     return obj;
5785   } else {
5786     // Go though the spaces looking for one that has objects.
5787     while (space_iterator_->has_next()) {
5788       object_iterator_ = space_iterator_->next();
5789       if (HeapObject* obj = object_iterator_->next_object()) {
5790         return obj;
5791       }
5792     }
5793   }
5794   // Done with the last space.
5795   object_iterator_ = NULL;
5796   return NULL;
5797 }
5798
5799
5800 void HeapIterator::reset() {
5801   // Restart the iterator.
5802   Shutdown();
5803   Init();
5804 }
5805
5806
5807 #ifdef DEBUG
5808
5809 Object* const PathTracer::kAnyGlobalObject = NULL;
5810
5811 class PathTracer::MarkVisitor: public ObjectVisitor {
5812  public:
5813   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5814   void VisitPointers(Object** start, Object** end) {
5815     // Scan all HeapObject pointers in [start, end)
5816     for (Object** p = start; !tracer_->found() && (p < end); p++) {
5817       if ((*p)->IsHeapObject())
5818         tracer_->MarkRecursively(p, this);
5819     }
5820   }
5821
5822  private:
5823   PathTracer* tracer_;
5824 };
5825
5826
5827 class PathTracer::UnmarkVisitor: public ObjectVisitor {
5828  public:
5829   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5830   void VisitPointers(Object** start, Object** end) {
5831     // Scan all HeapObject pointers in [start, end)
5832     for (Object** p = start; p < end; p++) {
5833       if ((*p)->IsHeapObject())
5834         tracer_->UnmarkRecursively(p, this);
5835     }
5836   }
5837
5838  private:
5839   PathTracer* tracer_;
5840 };
5841
5842
5843 void PathTracer::VisitPointers(Object** start, Object** end) {
5844   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5845   // Visit all HeapObject pointers in [start, end)
5846   for (Object** p = start; !done && (p < end); p++) {
5847     if ((*p)->IsHeapObject()) {
5848       TracePathFrom(p);
5849       done = ((what_to_find_ == FIND_FIRST) && found_target_);
5850     }
5851   }
5852 }
5853
5854
5855 void PathTracer::Reset() {
5856   found_target_ = false;
5857   object_stack_.Clear();
5858 }
5859
5860
5861 void PathTracer::TracePathFrom(Object** root) {
5862   ASSERT((search_target_ == kAnyGlobalObject) ||
5863          search_target_->IsHeapObject());
5864   found_target_in_trace_ = false;
5865   Reset();
5866
5867   MarkVisitor mark_visitor(this);
5868   MarkRecursively(root, &mark_visitor);
5869
5870   UnmarkVisitor unmark_visitor(this);
5871   UnmarkRecursively(root, &unmark_visitor);
5872
5873   ProcessResults();
5874 }
5875
5876
5877 static bool SafeIsNativeContext(HeapObject* obj) {
5878   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
5879 }
5880
5881
5882 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5883   if (!(*p)->IsHeapObject()) return;
5884
5885   HeapObject* obj = HeapObject::cast(*p);
5886
5887   MapWord map_word = obj->map_word();
5888   if (!map_word.ToMap()->IsHeapObject()) return;  // visited before
5889
5890   if (found_target_in_trace_) return;  // stop if target found
5891   object_stack_.Add(obj);
5892   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5893       (obj == search_target_)) {
5894     found_target_in_trace_ = true;
5895     found_target_ = true;
5896     return;
5897   }
5898
5899   bool is_native_context = SafeIsNativeContext(obj);
5900
5901   // not visited yet
5902   Map* map = Map::cast(map_word.ToMap());
5903
5904   MapWord marked_map_word =
5905       MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5906   obj->set_map_word(marked_map_word);
5907
5908   // Scan the object body.
5909   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5910     // This is specialized to scan Context's properly.
5911     Object** start = reinterpret_cast<Object**>(obj->address() +
5912                                                 Context::kHeaderSize);
5913     Object** end = reinterpret_cast<Object**>(obj->address() +
5914         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
5915     mark_visitor->VisitPointers(start, end);
5916   } else {
5917     obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5918   }
5919
5920   // Scan the map after the body because the body is a lot more interesting
5921   // when doing leak detection.
5922   MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5923
5924   if (!found_target_in_trace_) {  // don't pop if found the target
5925     object_stack_.RemoveLast();
5926   }
5927 }
5928
5929
5930 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5931   if (!(*p)->IsHeapObject()) return;
5932
5933   HeapObject* obj = HeapObject::cast(*p);
5934
5935   MapWord map_word = obj->map_word();
5936   if (map_word.ToMap()->IsHeapObject()) return;  // unmarked already
5937
5938   MapWord unmarked_map_word =
5939       MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5940   obj->set_map_word(unmarked_map_word);
5941
5942   Map* map = Map::cast(unmarked_map_word.ToMap());
5943
5944   UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5945
5946   obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5947 }
5948
5949
5950 void PathTracer::ProcessResults() {
5951   if (found_target_) {
5952     PrintF("=====================================\n");
5953     PrintF("====        Path to object       ====\n");
5954     PrintF("=====================================\n\n");
5955
5956     ASSERT(!object_stack_.is_empty());
5957     for (int i = 0; i < object_stack_.length(); i++) {
5958       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
5959       Object* obj = object_stack_[i];
5960       obj->Print();
5961     }
5962     PrintF("=====================================\n");
5963   }
5964 }
5965
5966
5967 // Triggers a depth-first traversal of reachable objects from one
5968 // given root object and finds a path to a specific heap object and
5969 // prints it.
5970 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5971   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5972   tracer.VisitPointer(&root);
5973 }
5974
5975
5976 // Triggers a depth-first traversal of reachable objects from roots
5977 // and finds a path to a specific heap object and prints it.
5978 void Heap::TracePathToObject(Object* target) {
5979   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5980   IterateRoots(&tracer, VISIT_ONLY_STRONG);
5981 }
5982
5983
5984 // Triggers a depth-first traversal of reachable objects from roots
5985 // and finds a path to any global object and prints it. Useful for
5986 // determining the source for leaks of global objects.
5987 void Heap::TracePathToGlobal() {
5988   PathTracer tracer(PathTracer::kAnyGlobalObject,
5989                     PathTracer::FIND_ALL,
5990                     VISIT_ALL);
5991   IterateRoots(&tracer, VISIT_ONLY_STRONG);
5992 }
5993 #endif
5994
5995
5996 static intptr_t CountTotalHolesSize(Heap* heap) {
5997   intptr_t holes_size = 0;
5998   OldSpaces spaces(heap);
5999   for (OldSpace* space = spaces.next();
6000        space != NULL;
6001        space = spaces.next()) {
6002     holes_size += space->Waste() + space->Available();
6003   }
6004   return holes_size;
6005 }
6006
6007
6008 GCTracer::GCTracer(Heap* heap,
6009                    const char* gc_reason,
6010                    const char* collector_reason)
6011     : start_time_(0.0),
6012       start_object_size_(0),
6013       start_memory_size_(0),
6014       gc_count_(0),
6015       full_gc_count_(0),
6016       allocated_since_last_gc_(0),
6017       spent_in_mutator_(0),
6018       nodes_died_in_new_space_(0),
6019       nodes_copied_in_new_space_(0),
6020       nodes_promoted_(0),
6021       heap_(heap),
6022       gc_reason_(gc_reason),
6023       collector_reason_(collector_reason) {
6024   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6025   start_time_ = OS::TimeCurrentMillis();
6026   start_object_size_ = heap_->SizeOfObjects();
6027   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
6028
6029   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6030     scopes_[i] = 0;
6031   }
6032
6033   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
6034
6035   allocated_since_last_gc_ =
6036       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6037
6038   if (heap_->last_gc_end_timestamp_ > 0) {
6039     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6040   }
6041
6042   steps_count_ = heap_->incremental_marking()->steps_count();
6043   steps_took_ = heap_->incremental_marking()->steps_took();
6044   longest_step_ = heap_->incremental_marking()->longest_step();
6045   steps_count_since_last_gc_ =
6046       heap_->incremental_marking()->steps_count_since_last_gc();
6047   steps_took_since_last_gc_ =
6048       heap_->incremental_marking()->steps_took_since_last_gc();
6049 }
6050
6051
6052 GCTracer::~GCTracer() {
6053   // Printf ONE line iff flag is set.
6054   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6055
6056   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6057
6058   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
6059   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
6060
6061   double time = heap_->last_gc_end_timestamp_ - start_time_;
6062
6063   // Update cumulative GC statistics if required.
6064   if (FLAG_print_cumulative_gc_stat) {
6065     heap_->total_gc_time_ms_ += time;
6066     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
6067     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
6068                                      heap_->alive_after_last_gc_);
6069     if (!first_gc) {
6070       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
6071                                    spent_in_mutator_);
6072     }
6073   } else if (FLAG_trace_gc_verbose) {
6074     heap_->total_gc_time_ms_ += time;
6075   }
6076
6077   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
6078
6079   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
6080
6081   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
6082   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
6083
6084   if (!FLAG_trace_gc_nvp) {
6085     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6086
6087     double end_memory_size_mb =
6088         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
6089
6090     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
6091            CollectorString(),
6092            static_cast<double>(start_object_size_) / MB,
6093            static_cast<double>(start_memory_size_) / MB,
6094            SizeOfHeapObjects(),
6095            end_memory_size_mb);
6096
6097     if (external_time > 0) PrintF("%d / ", external_time);
6098     PrintF("%.1f ms", time);
6099     if (steps_count_ > 0) {
6100       if (collector_ == SCAVENGER) {
6101         PrintF(" (+ %.1f ms in %d steps since last GC)",
6102                steps_took_since_last_gc_,
6103                steps_count_since_last_gc_);
6104       } else {
6105         PrintF(" (+ %.1f ms in %d steps since start of marking, "
6106                    "biggest step %.1f ms)",
6107                steps_took_,
6108                steps_count_,
6109                longest_step_);
6110       }
6111     }
6112
6113     if (gc_reason_ != NULL) {
6114       PrintF(" [%s]", gc_reason_);
6115     }
6116
6117     if (collector_reason_ != NULL) {
6118       PrintF(" [%s]", collector_reason_);
6119     }
6120
6121     PrintF(".\n");
6122   } else {
6123     PrintF("pause=%.1f ", time);
6124     PrintF("mutator=%.1f ", spent_in_mutator_);
6125     PrintF("gc=");
6126     switch (collector_) {
6127       case SCAVENGER:
6128         PrintF("s");
6129         break;
6130       case MARK_COMPACTOR:
6131         PrintF("ms");
6132         break;
6133       default:
6134         UNREACHABLE();
6135     }
6136     PrintF(" ");
6137
6138     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
6139     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
6140     PrintF("sweep=%.2f ", scopes_[Scope::MC_SWEEP]);
6141     PrintF("sweepns=%.2f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
6142     PrintF("sweepos=%.2f ", scopes_[Scope::MC_SWEEP_OLDSPACE]);
6143     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
6144     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
6145     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
6146     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
6147     PrintF("compaction_ptrs=%.1f ",
6148         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
6149     PrintF("intracompaction_ptrs=%.1f ",
6150         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
6151     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
6152     PrintF("weakcollection_process=%.1f ",
6153         scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
6154     PrintF("weakcollection_clear=%.1f ",
6155         scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
6156
6157     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
6158     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
6159     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6160            in_free_list_or_wasted_before_gc_);
6161     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
6162
6163     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6164     PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size_);
6165     PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ",
6166         heap_->semi_space_copied_object_size_);
6167     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
6168     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
6169     PrintF("nodes_promoted=%d ", nodes_promoted_);
6170     PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
6171     PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
6172
6173     if (collector_ == SCAVENGER) {
6174       PrintF("stepscount=%d ", steps_count_since_last_gc_);
6175       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
6176     } else {
6177       PrintF("stepscount=%d ", steps_count_);
6178       PrintF("stepstook=%.1f ", steps_took_);
6179       PrintF("longeststep=%.1f ", longest_step_);
6180     }
6181
6182     PrintF("\n");
6183   }
6184
6185   heap_->PrintShortHeapStatistics();
6186 }
6187
6188
6189 const char* GCTracer::CollectorString() {
6190   switch (collector_) {
6191     case SCAVENGER:
6192       return "Scavenge";
6193     case MARK_COMPACTOR:
6194       return "Mark-sweep";
6195   }
6196   return "Unknown GC";
6197 }
6198
6199
6200 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
6201   DisallowHeapAllocation no_gc;
6202   // Uses only lower 32 bits if pointers are larger.
6203   uintptr_t addr_hash =
6204       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
6205   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
6206 }
6207
6208
6209 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
6210   DisallowHeapAllocation no_gc;
6211   int index = (Hash(map, name) & kHashMask);
6212   for (int i = 0; i < kEntriesPerBucket; i++) {
6213     Key& key = keys_[index + i];
6214     if ((key.map == *map) && key.name->Equals(*name)) {
6215       return field_offsets_[index + i];
6216     }
6217   }
6218   return kNotFound;
6219 }
6220
6221
6222 void KeyedLookupCache::Update(Handle<Map> map,
6223                               Handle<Name> name,
6224                               int field_offset) {
6225   DisallowHeapAllocation no_gc;
6226   if (!name->IsUniqueName()) {
6227     if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
6228                                                 Handle<String>::cast(name)).
6229         ToHandle(&name)) {
6230       return;
6231     }
6232   }
6233   // This cache is cleared only between mark compact passes, so we expect the
6234   // cache to only contain old space names.
6235   ASSERT(!map->GetIsolate()->heap()->InNewSpace(*name));
6236
6237   int index = (Hash(map, name) & kHashMask);
6238   // After a GC there will be free slots, so we use them in order (this may
6239   // help to get the most frequently used one in position 0).
6240   for (int i = 0; i< kEntriesPerBucket; i++) {
6241     Key& key = keys_[index];
6242     Object* free_entry_indicator = NULL;
6243     if (key.map == free_entry_indicator) {
6244       key.map = *map;
6245       key.name = *name;
6246       field_offsets_[index + i] = field_offset;
6247       return;
6248     }
6249   }
6250   // No free entry found in this bucket, so we move them all down one and
6251   // put the new entry at position zero.
6252   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
6253     Key& key = keys_[index + i];
6254     Key& key2 = keys_[index + i - 1];
6255     key = key2;
6256     field_offsets_[index + i] = field_offsets_[index + i - 1];
6257   }
6258
6259   // Write the new first entry.
6260   Key& key = keys_[index];
6261   key.map = *map;
6262   key.name = *name;
6263   field_offsets_[index] = field_offset;
6264 }
6265
6266
6267 void KeyedLookupCache::Clear() {
6268   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6269 }
6270
6271
6272 void DescriptorLookupCache::Clear() {
6273   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6274 }
6275
6276
6277 void ExternalStringTable::CleanUp() {
6278   int last = 0;
6279   for (int i = 0; i < new_space_strings_.length(); ++i) {
6280     if (new_space_strings_[i] == heap_->the_hole_value()) {
6281       continue;
6282     }
6283     ASSERT(new_space_strings_[i]->IsExternalString());
6284     if (heap_->InNewSpace(new_space_strings_[i])) {
6285       new_space_strings_[last++] = new_space_strings_[i];
6286     } else {
6287       old_space_strings_.Add(new_space_strings_[i]);
6288     }
6289   }
6290   new_space_strings_.Rewind(last);
6291   new_space_strings_.Trim();
6292
6293   last = 0;
6294   for (int i = 0; i < old_space_strings_.length(); ++i) {
6295     if (old_space_strings_[i] == heap_->the_hole_value()) {
6296       continue;
6297     }
6298     ASSERT(old_space_strings_[i]->IsExternalString());
6299     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
6300     old_space_strings_[last++] = old_space_strings_[i];
6301   }
6302   old_space_strings_.Rewind(last);
6303   old_space_strings_.Trim();
6304 #ifdef VERIFY_HEAP
6305   if (FLAG_verify_heap) {
6306     Verify();
6307   }
6308 #endif
6309 }
6310
6311
6312 void ExternalStringTable::TearDown() {
6313   for (int i = 0; i < new_space_strings_.length(); ++i) {
6314     heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6315   }
6316   new_space_strings_.Free();
6317   for (int i = 0; i < old_space_strings_.length(); ++i) {
6318     heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6319   }
6320   old_space_strings_.Free();
6321 }
6322
6323
6324 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6325   chunk->set_next_chunk(chunks_queued_for_free_);
6326   chunks_queued_for_free_ = chunk;
6327 }
6328
6329
6330 void Heap::FreeQueuedChunks() {
6331   if (chunks_queued_for_free_ == NULL) return;
6332   MemoryChunk* next;
6333   MemoryChunk* chunk;
6334   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6335     next = chunk->next_chunk();
6336     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6337
6338     if (chunk->owner()->identity() == LO_SPACE) {
6339       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6340       // If FromAnyPointerAddress encounters a slot that belongs to a large
6341       // chunk queued for deletion it will fail to find the chunk because
6342       // it try to perform a search in the list of pages owned by of the large
6343       // object space and queued chunks were detached from that list.
6344       // To work around this we split large chunk into normal kPageSize aligned
6345       // pieces and initialize size, owner and flags field of every piece.
6346       // If FromAnyPointerAddress encounters a slot that belongs to one of
6347       // these smaller pieces it will treat it as a slot on a normal Page.
6348       Address chunk_end = chunk->address() + chunk->size();
6349       MemoryChunk* inner = MemoryChunk::FromAddress(
6350           chunk->address() + Page::kPageSize);
6351       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6352       while (inner <= inner_last) {
6353         // Size of a large chunk is always a multiple of
6354         // OS::AllocateAlignment() so there is always
6355         // enough space for a fake MemoryChunk header.
6356         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6357         // Guard against overflow.
6358         if (area_end < inner->address()) area_end = chunk_end;
6359         inner->SetArea(inner->address(), area_end);
6360         inner->set_size(Page::kPageSize);
6361         inner->set_owner(lo_space());
6362         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6363         inner = MemoryChunk::FromAddress(
6364             inner->address() + Page::kPageSize);
6365       }
6366     }
6367   }
6368   isolate_->heap()->store_buffer()->Compact();
6369   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6370   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6371     next = chunk->next_chunk();
6372     isolate_->memory_allocator()->Free(chunk);
6373   }
6374   chunks_queued_for_free_ = NULL;
6375 }
6376
6377
6378 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6379   uintptr_t p = reinterpret_cast<uintptr_t>(page);
6380   // Tag the page pointer to make it findable in the dump file.
6381   if (compacted) {
6382     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
6383   } else {
6384     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
6385   }
6386   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6387       reinterpret_cast<Address>(p);
6388   remembered_unmapped_pages_index_++;
6389   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6390 }
6391
6392
6393 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6394   memset(object_counts_, 0, sizeof(object_counts_));
6395   memset(object_sizes_, 0, sizeof(object_sizes_));
6396   if (clear_last_time_stats) {
6397     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6398     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6399   }
6400 }
6401
6402
6403 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6404
6405
6406 void Heap::CheckpointObjectStats() {
6407   LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
6408   Counters* counters = isolate()->counters();
6409 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
6410   counters->count_of_##name()->Increment(                                      \
6411       static_cast<int>(object_counts_[name]));                                 \
6412   counters->count_of_##name()->Decrement(                                      \
6413       static_cast<int>(object_counts_last_time_[name]));                       \
6414   counters->size_of_##name()->Increment(                                       \
6415       static_cast<int>(object_sizes_[name]));                                  \
6416   counters->size_of_##name()->Decrement(                                       \
6417       static_cast<int>(object_sizes_last_time_[name]));
6418   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6419 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6420   int index;
6421 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
6422   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
6423   counters->count_of_CODE_TYPE_##name()->Increment(       \
6424       static_cast<int>(object_counts_[index]));           \
6425   counters->count_of_CODE_TYPE_##name()->Decrement(       \
6426       static_cast<int>(object_counts_last_time_[index])); \
6427   counters->size_of_CODE_TYPE_##name()->Increment(        \
6428       static_cast<int>(object_sizes_[index]));            \
6429   counters->size_of_CODE_TYPE_##name()->Decrement(        \
6430       static_cast<int>(object_sizes_last_time_[index]));
6431   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6432 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6433 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
6434   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
6435   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
6436       static_cast<int>(object_counts_[index]));           \
6437   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
6438       static_cast<int>(object_counts_last_time_[index])); \
6439   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
6440       static_cast<int>(object_sizes_[index]));            \
6441   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
6442       static_cast<int>(object_sizes_last_time_[index]));
6443   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6444 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6445 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
6446   index =                                                                     \
6447       FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6448   counters->count_of_CODE_AGE_##name()->Increment(                            \
6449       static_cast<int>(object_counts_[index]));                               \
6450   counters->count_of_CODE_AGE_##name()->Decrement(                            \
6451       static_cast<int>(object_counts_last_time_[index]));                     \
6452   counters->size_of_CODE_AGE_##name()->Increment(                             \
6453       static_cast<int>(object_sizes_[index]));                                \
6454   counters->size_of_CODE_AGE_##name()->Decrement(                             \
6455       static_cast<int>(object_sizes_last_time_[index]));
6456   CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6457 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6458
6459   MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6460   MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6461   ClearObjectStats();
6462 }
6463
6464 } }  // namespace v8::internal