Upstream version 10.39.233.0
[platform/framework/web/crosswalk.git] / src / v8 / src / heap / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/accessors.h"
8 #include "src/api.h"
9 #include "src/base/bits.h"
10 #include "src/base/once.h"
11 #include "src/base/utils/random-number-generator.h"
12 #include "src/bootstrapper.h"
13 #include "src/codegen.h"
14 #include "src/compilation-cache.h"
15 #include "src/conversions.h"
16 #include "src/cpu-profiler.h"
17 #include "src/debug.h"
18 #include "src/deoptimizer.h"
19 #include "src/global-handles.h"
20 #include "src/heap/gc-idle-time-handler.h"
21 #include "src/heap/incremental-marking.h"
22 #include "src/heap/mark-compact.h"
23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/objects-visiting.h"
25 #include "src/heap/store-buffer.h"
26 #include "src/heap-profiler.h"
27 #include "src/isolate-inl.h"
28 #include "src/natives.h"
29 #include "src/runtime-profiler.h"
30 #include "src/scopeinfo.h"
31 #include "src/snapshot.h"
32 #include "src/utils.h"
33 #include "src/v8threads.h"
34 #include "src/vm-state-inl.h"
35
36 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
37 #include "src/regexp-macro-assembler.h"          // NOLINT
38 #include "src/arm/regexp-macro-assembler-arm.h"  // NOLINT
39 #endif
40 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
41 #include "src/regexp-macro-assembler.h"            // NOLINT
42 #include "src/mips/regexp-macro-assembler-mips.h"  // NOLINT
43 #endif
44 #if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
45 #include "src/regexp-macro-assembler.h"
46 #include "src/mips64/regexp-macro-assembler-mips64.h"
47 #endif
48
49 namespace v8 {
50 namespace internal {
51
52
53 Heap::Heap()
54     : amount_of_external_allocated_memory_(0),
55       amount_of_external_allocated_memory_at_last_global_gc_(0),
56       isolate_(NULL),
57       code_range_size_(0),
58       // semispace_size_ should be a power of 2 and old_generation_size_ should
59       // be a multiple of Page::kPageSize.
60       reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
61       max_semi_space_size_(8 * (kPointerSize / 4) * MB),
62       initial_semispace_size_(Page::kPageSize),
63       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
64       max_executable_size_(256ul * (kPointerSize / 4) * MB),
65       // Variables set based on semispace_size_ and old_generation_size_ in
66       // ConfigureHeap.
67       // Will be 4 * reserved_semispace_size_ to ensure that young
68       // generation can be aligned to its size.
69       maximum_committed_(0),
70       survived_since_last_expansion_(0),
71       sweep_generation_(0),
72       always_allocate_scope_depth_(0),
73       contexts_disposed_(0),
74       global_ic_age_(0),
75       flush_monomorphic_ics_(false),
76       scan_on_scavenge_pages_(0),
77       new_space_(this),
78       old_pointer_space_(NULL),
79       old_data_space_(NULL),
80       code_space_(NULL),
81       map_space_(NULL),
82       cell_space_(NULL),
83       property_cell_space_(NULL),
84       lo_space_(NULL),
85       gc_state_(NOT_IN_GC),
86       gc_post_processing_depth_(0),
87       allocations_count_(0),
88       raw_allocations_hash_(0),
89       dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
90       ms_count_(0),
91       gc_count_(0),
92       remembered_unmapped_pages_index_(0),
93       unflattened_strings_length_(0),
94 #ifdef DEBUG
95       allocation_timeout_(0),
96 #endif  // DEBUG
97       old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
98       old_gen_exhausted_(false),
99       inline_allocation_disabled_(false),
100       store_buffer_rebuilder_(store_buffer()),
101       hidden_string_(NULL),
102       gc_safe_size_of_old_object_(NULL),
103       total_regexp_code_generated_(0),
104       tracer_(this),
105       high_survival_rate_period_length_(0),
106       promoted_objects_size_(0),
107       promotion_rate_(0),
108       semi_space_copied_object_size_(0),
109       semi_space_copied_rate_(0),
110       nodes_died_in_new_space_(0),
111       nodes_copied_in_new_space_(0),
112       nodes_promoted_(0),
113       maximum_size_scavenges_(0),
114       max_gc_pause_(0.0),
115       total_gc_time_ms_(0.0),
116       max_alive_after_gc_(0),
117       min_in_mutator_(kMaxInt),
118       marking_time_(0.0),
119       sweeping_time_(0.0),
120       mark_compact_collector_(this),
121       store_buffer_(this),
122       marking_(this),
123       incremental_marking_(this),
124       gc_count_at_last_idle_gc_(0),
125       full_codegen_bytes_generated_(0),
126       crankshaft_codegen_bytes_generated_(0),
127       gcs_since_last_deopt_(0),
128 #ifdef VERIFY_HEAP
129       no_weak_object_verification_scope_depth_(0),
130 #endif
131       allocation_sites_scratchpad_length_(0),
132       promotion_queue_(this),
133       configured_(false),
134       external_string_table_(this),
135       chunks_queued_for_free_(NULL),
136       gc_callbacks_depth_(0) {
137 // Allow build-time customization of the max semispace size. Building
138 // V8 with snapshots and a non-default max semispace size is much
139 // easier if you can define it as part of the build environment.
140 #if defined(V8_MAX_SEMISPACE_SIZE)
141   max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
142 #endif
143
144   // Ensure old_generation_size_ is a multiple of kPageSize.
145   DCHECK(MB >= Page::kPageSize);
146
147   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
148   set_native_contexts_list(NULL);
149   set_array_buffers_list(Smi::FromInt(0));
150   set_allocation_sites_list(Smi::FromInt(0));
151   set_encountered_weak_collections(Smi::FromInt(0));
152   // Put a dummy entry in the remembered pages so we can find the list the
153   // minidump even if there are no real unmapped pages.
154   RememberUnmappedPage(NULL, false);
155
156   ClearObjectStats(true);
157 }
158
159
160 intptr_t Heap::Capacity() {
161   if (!HasBeenSetUp()) return 0;
162
163   return new_space_.Capacity() + old_pointer_space_->Capacity() +
164          old_data_space_->Capacity() + code_space_->Capacity() +
165          map_space_->Capacity() + cell_space_->Capacity() +
166          property_cell_space_->Capacity();
167 }
168
169
170 intptr_t Heap::CommittedMemory() {
171   if (!HasBeenSetUp()) return 0;
172
173   return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
174          old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
175          map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
176          property_cell_space_->CommittedMemory() + lo_space_->Size();
177 }
178
179
180 size_t Heap::CommittedPhysicalMemory() {
181   if (!HasBeenSetUp()) return 0;
182
183   return new_space_.CommittedPhysicalMemory() +
184          old_pointer_space_->CommittedPhysicalMemory() +
185          old_data_space_->CommittedPhysicalMemory() +
186          code_space_->CommittedPhysicalMemory() +
187          map_space_->CommittedPhysicalMemory() +
188          cell_space_->CommittedPhysicalMemory() +
189          property_cell_space_->CommittedPhysicalMemory() +
190          lo_space_->CommittedPhysicalMemory();
191 }
192
193
194 intptr_t Heap::CommittedMemoryExecutable() {
195   if (!HasBeenSetUp()) return 0;
196
197   return isolate()->memory_allocator()->SizeExecutable();
198 }
199
200
201 void Heap::UpdateMaximumCommitted() {
202   if (!HasBeenSetUp()) return;
203
204   intptr_t current_committed_memory = CommittedMemory();
205   if (current_committed_memory > maximum_committed_) {
206     maximum_committed_ = current_committed_memory;
207   }
208 }
209
210
211 intptr_t Heap::Available() {
212   if (!HasBeenSetUp()) return 0;
213
214   return new_space_.Available() + old_pointer_space_->Available() +
215          old_data_space_->Available() + code_space_->Available() +
216          map_space_->Available() + cell_space_->Available() +
217          property_cell_space_->Available();
218 }
219
220
221 bool Heap::HasBeenSetUp() {
222   return old_pointer_space_ != NULL && old_data_space_ != NULL &&
223          code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
224          property_cell_space_ != NULL && lo_space_ != NULL;
225 }
226
227
228 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
229   if (IntrusiveMarking::IsMarked(object)) {
230     return IntrusiveMarking::SizeOfMarkedObject(object);
231   }
232   return object->SizeFromMap(object->map());
233 }
234
235
236 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
237                                               const char** reason) {
238   // Is global GC requested?
239   if (space != NEW_SPACE) {
240     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
241     *reason = "GC in old space requested";
242     return MARK_COMPACTOR;
243   }
244
245   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
246     *reason = "GC in old space forced by flags";
247     return MARK_COMPACTOR;
248   }
249
250   // Is enough data promoted to justify a global GC?
251   if (OldGenerationAllocationLimitReached()) {
252     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
253     *reason = "promotion limit reached";
254     return MARK_COMPACTOR;
255   }
256
257   // Have allocation in OLD and LO failed?
258   if (old_gen_exhausted_) {
259     isolate_->counters()
260         ->gc_compactor_caused_by_oldspace_exhaustion()
261         ->Increment();
262     *reason = "old generations exhausted";
263     return MARK_COMPACTOR;
264   }
265
266   // Is there enough space left in OLD to guarantee that a scavenge can
267   // succeed?
268   //
269   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
270   // for object promotion. It counts only the bytes that the memory
271   // allocator has not yet allocated from the OS and assigned to any space,
272   // and does not count available bytes already in the old space or code
273   // space.  Undercounting is safe---we may get an unrequested full GC when
274   // a scavenge would have succeeded.
275   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
276     isolate_->counters()
277         ->gc_compactor_caused_by_oldspace_exhaustion()
278         ->Increment();
279     *reason = "scavenge might not succeed";
280     return MARK_COMPACTOR;
281   }
282
283   // Default
284   *reason = NULL;
285   return SCAVENGER;
286 }
287
288
289 // TODO(1238405): Combine the infrastructure for --heap-stats and
290 // --log-gc to avoid the complicated preprocessor and flag testing.
291 void Heap::ReportStatisticsBeforeGC() {
292 // Heap::ReportHeapStatistics will also log NewSpace statistics when
293 // compiled --log-gc is set.  The following logic is used to avoid
294 // double logging.
295 #ifdef DEBUG
296   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
297   if (FLAG_heap_stats) {
298     ReportHeapStatistics("Before GC");
299   } else if (FLAG_log_gc) {
300     new_space_.ReportStatistics();
301   }
302   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
303 #else
304   if (FLAG_log_gc) {
305     new_space_.CollectStatistics();
306     new_space_.ReportStatistics();
307     new_space_.ClearHistograms();
308   }
309 #endif  // DEBUG
310 }
311
312
313 void Heap::PrintShortHeapStatistics() {
314   if (!FLAG_trace_gc_verbose) return;
315   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX
316            "d KB"
317            ", available: %6" V8_PTR_PREFIX "d KB\n",
318            isolate_->memory_allocator()->Size() / KB,
319            isolate_->memory_allocator()->Available() / KB);
320   PrintPID("New space,          used: %6" V8_PTR_PREFIX
321            "d KB"
322            ", available: %6" V8_PTR_PREFIX
323            "d KB"
324            ", committed: %6" V8_PTR_PREFIX "d KB\n",
325            new_space_.Size() / KB, new_space_.Available() / KB,
326            new_space_.CommittedMemory() / KB);
327   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX
328            "d KB"
329            ", available: %6" V8_PTR_PREFIX
330            "d KB"
331            ", committed: %6" V8_PTR_PREFIX "d KB\n",
332            old_pointer_space_->SizeOfObjects() / KB,
333            old_pointer_space_->Available() / KB,
334            old_pointer_space_->CommittedMemory() / KB);
335   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX
336            "d KB"
337            ", available: %6" V8_PTR_PREFIX
338            "d KB"
339            ", committed: %6" V8_PTR_PREFIX "d KB\n",
340            old_data_space_->SizeOfObjects() / KB,
341            old_data_space_->Available() / KB,
342            old_data_space_->CommittedMemory() / KB);
343   PrintPID("Code space,         used: %6" V8_PTR_PREFIX
344            "d KB"
345            ", available: %6" V8_PTR_PREFIX
346            "d KB"
347            ", committed: %6" V8_PTR_PREFIX "d KB\n",
348            code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
349            code_space_->CommittedMemory() / KB);
350   PrintPID("Map space,          used: %6" V8_PTR_PREFIX
351            "d KB"
352            ", available: %6" V8_PTR_PREFIX
353            "d KB"
354            ", committed: %6" V8_PTR_PREFIX "d KB\n",
355            map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
356            map_space_->CommittedMemory() / KB);
357   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX
358            "d KB"
359            ", available: %6" V8_PTR_PREFIX
360            "d KB"
361            ", committed: %6" V8_PTR_PREFIX "d KB\n",
362            cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
363            cell_space_->CommittedMemory() / KB);
364   PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
365            "d KB"
366            ", available: %6" V8_PTR_PREFIX
367            "d KB"
368            ", committed: %6" V8_PTR_PREFIX "d KB\n",
369            property_cell_space_->SizeOfObjects() / KB,
370            property_cell_space_->Available() / KB,
371            property_cell_space_->CommittedMemory() / KB);
372   PrintPID("Large object space, used: %6" V8_PTR_PREFIX
373            "d KB"
374            ", available: %6" V8_PTR_PREFIX
375            "d KB"
376            ", committed: %6" V8_PTR_PREFIX "d KB\n",
377            lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
378            lo_space_->CommittedMemory() / KB);
379   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX
380            "d KB"
381            ", available: %6" V8_PTR_PREFIX
382            "d KB"
383            ", committed: %6" V8_PTR_PREFIX "d KB\n",
384            this->SizeOfObjects() / KB, this->Available() / KB,
385            this->CommittedMemory() / KB);
386   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
387            static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
388   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
389 }
390
391
392 // TODO(1238405): Combine the infrastructure for --heap-stats and
393 // --log-gc to avoid the complicated preprocessor and flag testing.
394 void Heap::ReportStatisticsAfterGC() {
395 // Similar to the before GC, we use some complicated logic to ensure that
396 // NewSpace statistics are logged exactly once when --log-gc is turned on.
397 #if defined(DEBUG)
398   if (FLAG_heap_stats) {
399     new_space_.CollectStatistics();
400     ReportHeapStatistics("After GC");
401   } else if (FLAG_log_gc) {
402     new_space_.ReportStatistics();
403   }
404 #else
405   if (FLAG_log_gc) new_space_.ReportStatistics();
406 #endif  // DEBUG
407 }
408
409
410 void Heap::GarbageCollectionPrologue() {
411   {
412     AllowHeapAllocation for_the_first_part_of_prologue;
413     ClearJSFunctionResultCaches();
414     gc_count_++;
415     unflattened_strings_length_ = 0;
416
417     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
418       mark_compact_collector()->EnableCodeFlushing(true);
419     }
420
421 #ifdef VERIFY_HEAP
422     if (FLAG_verify_heap) {
423       Verify();
424     }
425 #endif
426   }
427
428   // Reset GC statistics.
429   promoted_objects_size_ = 0;
430   semi_space_copied_object_size_ = 0;
431   nodes_died_in_new_space_ = 0;
432   nodes_copied_in_new_space_ = 0;
433   nodes_promoted_ = 0;
434
435   UpdateMaximumCommitted();
436
437 #ifdef DEBUG
438   DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
439
440   if (FLAG_gc_verbose) Print();
441
442   ReportStatisticsBeforeGC();
443 #endif  // DEBUG
444
445   store_buffer()->GCPrologue();
446
447   if (isolate()->concurrent_osr_enabled()) {
448     isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
449   }
450
451   if (new_space_.IsAtMaximumCapacity()) {
452     maximum_size_scavenges_++;
453   } else {
454     maximum_size_scavenges_ = 0;
455   }
456   CheckNewSpaceExpansionCriteria();
457 }
458
459
460 intptr_t Heap::SizeOfObjects() {
461   intptr_t total = 0;
462   AllSpaces spaces(this);
463   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
464     total += space->SizeOfObjects();
465   }
466   return total;
467 }
468
469
470 void Heap::ClearAllICsByKind(Code::Kind kind) {
471   HeapObjectIterator it(code_space());
472
473   for (Object* object = it.Next(); object != NULL; object = it.Next()) {
474     Code* code = Code::cast(object);
475     Code::Kind current_kind = code->kind();
476     if (current_kind == Code::FUNCTION ||
477         current_kind == Code::OPTIMIZED_FUNCTION) {
478       code->ClearInlineCaches(kind);
479     }
480   }
481 }
482
483
484 void Heap::RepairFreeListsAfterBoot() {
485   PagedSpaces spaces(this);
486   for (PagedSpace* space = spaces.next(); space != NULL;
487        space = spaces.next()) {
488     space->RepairFreeListsAfterBoot();
489   }
490 }
491
492
493 void Heap::ProcessPretenuringFeedback() {
494   if (FLAG_allocation_site_pretenuring) {
495     int tenure_decisions = 0;
496     int dont_tenure_decisions = 0;
497     int allocation_mementos_found = 0;
498     int allocation_sites = 0;
499     int active_allocation_sites = 0;
500
501     // If the scratchpad overflowed, we have to iterate over the allocation
502     // sites list.
503     // TODO(hpayer): We iterate over the whole list of allocation sites when
504     // we grew to the maximum semi-space size to deopt maybe tenured
505     // allocation sites. We could hold the maybe tenured allocation sites
506     // in a seperate data structure if this is a performance problem.
507     bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
508     bool use_scratchpad =
509         allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
510         !deopt_maybe_tenured;
511
512     int i = 0;
513     Object* list_element = allocation_sites_list();
514     bool trigger_deoptimization = false;
515     bool maximum_size_scavenge = MaximumSizeScavenge();
516     while (use_scratchpad ? i < allocation_sites_scratchpad_length_
517                           : list_element->IsAllocationSite()) {
518       AllocationSite* site =
519           use_scratchpad
520               ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
521               : AllocationSite::cast(list_element);
522       allocation_mementos_found += site->memento_found_count();
523       if (site->memento_found_count() > 0) {
524         active_allocation_sites++;
525         if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
526           trigger_deoptimization = true;
527         }
528         if (site->GetPretenureMode() == TENURED) {
529           tenure_decisions++;
530         } else {
531           dont_tenure_decisions++;
532         }
533         allocation_sites++;
534       }
535
536       if (deopt_maybe_tenured && site->IsMaybeTenure()) {
537         site->set_deopt_dependent_code(true);
538         trigger_deoptimization = true;
539       }
540
541       if (use_scratchpad) {
542         i++;
543       } else {
544         list_element = site->weak_next();
545       }
546     }
547
548     if (trigger_deoptimization) {
549       isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
550     }
551
552     FlushAllocationSitesScratchpad();
553
554     if (FLAG_trace_pretenuring_statistics &&
555         (allocation_mementos_found > 0 || tenure_decisions > 0 ||
556          dont_tenure_decisions > 0)) {
557       PrintF(
558           "GC: (mode, #visited allocation sites, #active allocation sites, "
559           "#mementos, #tenure decisions, #donttenure decisions) "
560           "(%s, %d, %d, %d, %d, %d)\n",
561           use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
562           active_allocation_sites, allocation_mementos_found, tenure_decisions,
563           dont_tenure_decisions);
564     }
565   }
566 }
567
568
569 void Heap::DeoptMarkedAllocationSites() {
570   // TODO(hpayer): If iterating over the allocation sites list becomes a
571   // performance issue, use a cache heap data structure instead (similar to the
572   // allocation sites scratchpad).
573   Object* list_element = allocation_sites_list();
574   while (list_element->IsAllocationSite()) {
575     AllocationSite* site = AllocationSite::cast(list_element);
576     if (site->deopt_dependent_code()) {
577       site->dependent_code()->MarkCodeForDeoptimization(
578           isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
579       site->set_deopt_dependent_code(false);
580     }
581     list_element = site->weak_next();
582   }
583   Deoptimizer::DeoptimizeMarkedCode(isolate_);
584 }
585
586
587 void Heap::GarbageCollectionEpilogue() {
588   store_buffer()->GCEpilogue();
589
590   // In release mode, we only zap the from space under heap verification.
591   if (Heap::ShouldZapGarbage()) {
592     ZapFromSpace();
593   }
594
595   // Process pretenuring feedback and update allocation sites.
596   ProcessPretenuringFeedback();
597
598 #ifdef VERIFY_HEAP
599   if (FLAG_verify_heap) {
600     Verify();
601   }
602 #endif
603
604   AllowHeapAllocation for_the_rest_of_the_epilogue;
605
606 #ifdef DEBUG
607   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
608   if (FLAG_print_handles) PrintHandles();
609   if (FLAG_gc_verbose) Print();
610   if (FLAG_code_stats) ReportCodeStatistics("After GC");
611 #endif
612   if (FLAG_deopt_every_n_garbage_collections > 0) {
613     // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
614     // the topmost optimized frame can be deoptimized safely, because it
615     // might not have a lazy bailout point right after its current PC.
616     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
617       Deoptimizer::DeoptimizeAll(isolate());
618       gcs_since_last_deopt_ = 0;
619     }
620   }
621
622   UpdateMaximumCommitted();
623
624   isolate_->counters()->alive_after_last_gc()->Set(
625       static_cast<int>(SizeOfObjects()));
626
627   isolate_->counters()->string_table_capacity()->Set(
628       string_table()->Capacity());
629   isolate_->counters()->number_of_symbols()->Set(
630       string_table()->NumberOfElements());
631
632   if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
633     isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
634         static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
635                          (crankshaft_codegen_bytes_generated_ +
636                           full_codegen_bytes_generated_)));
637   }
638
639   if (CommittedMemory() > 0) {
640     isolate_->counters()->external_fragmentation_total()->AddSample(
641         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
642
643     isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
644         (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
645     isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
646         static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
647                          CommittedMemory()));
648     isolate_->counters()->heap_fraction_old_data_space()->AddSample(
649         static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
650                          CommittedMemory()));
651     isolate_->counters()->heap_fraction_code_space()->AddSample(
652         static_cast<int>((code_space()->CommittedMemory() * 100.0) /
653                          CommittedMemory()));
654     isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
655         (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
656     isolate_->counters()->heap_fraction_cell_space()->AddSample(
657         static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
658                          CommittedMemory()));
659     isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
660         static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
661                          CommittedMemory()));
662     isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
663         (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
664
665     isolate_->counters()->heap_sample_total_committed()->AddSample(
666         static_cast<int>(CommittedMemory() / KB));
667     isolate_->counters()->heap_sample_total_used()->AddSample(
668         static_cast<int>(SizeOfObjects() / KB));
669     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
670         static_cast<int>(map_space()->CommittedMemory() / KB));
671     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
672         static_cast<int>(cell_space()->CommittedMemory() / KB));
673     isolate_->counters()
674         ->heap_sample_property_cell_space_committed()
675         ->AddSample(
676             static_cast<int>(property_cell_space()->CommittedMemory() / KB));
677     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
678         static_cast<int>(code_space()->CommittedMemory() / KB));
679
680     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
681         static_cast<int>(MaximumCommittedMemory() / KB));
682   }
683
684 #define UPDATE_COUNTERS_FOR_SPACE(space)                \
685   isolate_->counters()->space##_bytes_available()->Set( \
686       static_cast<int>(space()->Available()));          \
687   isolate_->counters()->space##_bytes_committed()->Set( \
688       static_cast<int>(space()->CommittedMemory()));    \
689   isolate_->counters()->space##_bytes_used()->Set(      \
690       static_cast<int>(space()->SizeOfObjects()));
691 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
692   if (space()->CommittedMemory() > 0) {                                \
693     isolate_->counters()->external_fragmentation_##space()->AddSample( \
694         static_cast<int>(100 -                                         \
695                          (space()->SizeOfObjects() * 100.0) /          \
696                              space()->CommittedMemory()));             \
697   }
698 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
699   UPDATE_COUNTERS_FOR_SPACE(space)                         \
700   UPDATE_FRAGMENTATION_FOR_SPACE(space)
701
702   UPDATE_COUNTERS_FOR_SPACE(new_space)
703   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
704   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
705   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
706   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
707   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
708   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
709   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
710 #undef UPDATE_COUNTERS_FOR_SPACE
711 #undef UPDATE_FRAGMENTATION_FOR_SPACE
712 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
713
714 #ifdef DEBUG
715   ReportStatisticsAfterGC();
716 #endif  // DEBUG
717
718   // Remember the last top pointer so that we can later find out
719   // whether we allocated in new space since the last GC.
720   new_space_top_after_last_gc_ = new_space()->top();
721 }
722
723
724 void Heap::CollectAllGarbage(int flags, const char* gc_reason,
725                              const v8::GCCallbackFlags gc_callback_flags) {
726   // Since we are ignoring the return value, the exact choice of space does
727   // not matter, so long as we do not specify NEW_SPACE, which would not
728   // cause a full GC.
729   mark_compact_collector_.SetFlags(flags);
730   CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
731   mark_compact_collector_.SetFlags(kNoGCFlags);
732 }
733
734
735 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
736   // Since we are ignoring the return value, the exact choice of space does
737   // not matter, so long as we do not specify NEW_SPACE, which would not
738   // cause a full GC.
739   // Major GC would invoke weak handle callbacks on weakly reachable
740   // handles, but won't collect weakly reachable objects until next
741   // major GC.  Therefore if we collect aggressively and weak handle callback
742   // has been invoked, we rerun major GC to release objects which become
743   // garbage.
744   // Note: as weak callbacks can execute arbitrary code, we cannot
745   // hope that eventually there will be no weak callbacks invocations.
746   // Therefore stop recollecting after several attempts.
747   if (isolate()->concurrent_recompilation_enabled()) {
748     // The optimizing compiler may be unnecessarily holding on to memory.
749     DisallowHeapAllocation no_recursive_gc;
750     isolate()->optimizing_compiler_thread()->Flush();
751   }
752   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
753                                      kReduceMemoryFootprintMask);
754   isolate_->compilation_cache()->Clear();
755   const int kMaxNumberOfAttempts = 7;
756   const int kMinNumberOfAttempts = 2;
757   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
758     if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
759         attempt + 1 >= kMinNumberOfAttempts) {
760       break;
761     }
762   }
763   mark_compact_collector()->SetFlags(kNoGCFlags);
764   new_space_.Shrink();
765   UncommitFromSpace();
766   incremental_marking()->UncommitMarkingDeque();
767 }
768
769
770 void Heap::EnsureFillerObjectAtTop() {
771   // There may be an allocation memento behind every object in new space.
772   // If we evacuate a not full new space or if we are on the last page of
773   // the new space, then there may be uninitialized memory behind the top
774   // pointer of the new space page. We store a filler object there to
775   // identify the unused space.
776   Address from_top = new_space_.top();
777   Address from_limit = new_space_.limit();
778   if (from_top < from_limit) {
779     int remaining_in_page = static_cast<int>(from_limit - from_top);
780     CreateFillerObjectAt(from_top, remaining_in_page);
781   }
782 }
783
784
785 bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
786                           const char* collector_reason,
787                           const v8::GCCallbackFlags gc_callback_flags) {
788   // The VM is in the GC state until exiting this function.
789   VMState<GC> state(isolate_);
790
791 #ifdef DEBUG
792   // Reset the allocation timeout to the GC interval, but make sure to
793   // allow at least a few allocations after a collection. The reason
794   // for this is that we have a lot of allocation sequences and we
795   // assume that a garbage collection will allow the subsequent
796   // allocation attempts to go through.
797   allocation_timeout_ = Max(6, FLAG_gc_interval);
798 #endif
799
800   EnsureFillerObjectAtTop();
801
802   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
803     if (FLAG_trace_incremental_marking) {
804       PrintF("[IncrementalMarking] Scavenge during marking.\n");
805     }
806   }
807
808   if (collector == MARK_COMPACTOR &&
809       !mark_compact_collector()->abort_incremental_marking() &&
810       !incremental_marking()->IsStopped() &&
811       !incremental_marking()->should_hurry() &&
812       FLAG_incremental_marking_steps) {
813     // Make progress in incremental marking.
814     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
815     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
816                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
817     if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
818       if (FLAG_trace_incremental_marking) {
819         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
820       }
821       collector = SCAVENGER;
822       collector_reason = "incremental marking delaying mark-sweep";
823     }
824   }
825
826   bool next_gc_likely_to_collect_more = false;
827
828   {
829     tracer()->Start(collector, gc_reason, collector_reason);
830     DCHECK(AllowHeapAllocation::IsAllowed());
831     DisallowHeapAllocation no_allocation_during_gc;
832     GarbageCollectionPrologue();
833
834     {
835       HistogramTimerScope histogram_timer_scope(
836           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
837                                    : isolate_->counters()->gc_compactor());
838       next_gc_likely_to_collect_more =
839           PerformGarbageCollection(collector, gc_callback_flags);
840     }
841
842     GarbageCollectionEpilogue();
843     tracer()->Stop();
844   }
845
846   // Start incremental marking for the next cycle. The heap snapshot
847   // generator needs incremental marking to stay off after it aborted.
848   if (!mark_compact_collector()->abort_incremental_marking() &&
849       WorthActivatingIncrementalMarking()) {
850     incremental_marking()->Start();
851   }
852
853   return next_gc_likely_to_collect_more;
854 }
855
856
857 int Heap::NotifyContextDisposed() {
858   if (isolate()->concurrent_recompilation_enabled()) {
859     // Flush the queued recompilation tasks.
860     isolate()->optimizing_compiler_thread()->Flush();
861   }
862   flush_monomorphic_ics_ = true;
863   AgeInlineCaches();
864   return ++contexts_disposed_;
865 }
866
867
868 void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
869                         int len) {
870   if (len == 0) return;
871
872   DCHECK(array->map() != fixed_cow_array_map());
873   Object** dst_objects = array->data_start() + dst_index;
874   MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
875   if (!InNewSpace(array)) {
876     for (int i = 0; i < len; i++) {
877       // TODO(hpayer): check store buffer for entries
878       if (InNewSpace(dst_objects[i])) {
879         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
880       }
881     }
882   }
883   incremental_marking()->RecordWrites(array);
884 }
885
886
887 #ifdef VERIFY_HEAP
888 // Helper class for verifying the string table.
889 class StringTableVerifier : public ObjectVisitor {
890  public:
891   void VisitPointers(Object** start, Object** end) {
892     // Visit all HeapObject pointers in [start, end).
893     for (Object** p = start; p < end; p++) {
894       if ((*p)->IsHeapObject()) {
895         // Check that the string is actually internalized.
896         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
897               (*p)->IsInternalizedString());
898       }
899     }
900   }
901 };
902
903
904 static void VerifyStringTable(Heap* heap) {
905   StringTableVerifier verifier;
906   heap->string_table()->IterateElements(&verifier);
907 }
908 #endif  // VERIFY_HEAP
909
910
911 static bool AbortIncrementalMarkingAndCollectGarbage(
912     Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
913   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
914   bool result = heap->CollectGarbage(space, gc_reason);
915   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
916   return result;
917 }
918
919
920 void Heap::ReserveSpace(int* sizes, Address* locations_out) {
921   bool gc_performed = true;
922   int counter = 0;
923   static const int kThreshold = 20;
924   while (gc_performed && counter++ < kThreshold) {
925     gc_performed = false;
926     DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
927     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
928       if (sizes[space] != 0) {
929         AllocationResult allocation;
930         if (space == NEW_SPACE) {
931           allocation = new_space()->AllocateRaw(sizes[space]);
932         } else {
933           allocation = paged_space(space)->AllocateRaw(sizes[space]);
934         }
935         FreeListNode* node;
936         if (!allocation.To(&node)) {
937           if (space == NEW_SPACE) {
938             Heap::CollectGarbage(NEW_SPACE,
939                                  "failed to reserve space in the new space");
940           } else {
941             AbortIncrementalMarkingAndCollectGarbage(
942                 this, static_cast<AllocationSpace>(space),
943                 "failed to reserve space in paged space");
944           }
945           gc_performed = true;
946           break;
947         } else {
948           // Mark with a free list node, in case we have a GC before
949           // deserializing.
950           node->set_size(this, sizes[space]);
951           locations_out[space] = node->address();
952         }
953       }
954     }
955   }
956
957   if (gc_performed) {
958     // Failed to reserve the space after several attempts.
959     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
960   }
961 }
962
963
964 void Heap::EnsureFromSpaceIsCommitted() {
965   if (new_space_.CommitFromSpaceIfNeeded()) return;
966
967   // Committing memory to from space failed.
968   // Memory is exhausted and we will die.
969   V8::FatalProcessOutOfMemory("Committing semi space failed.");
970 }
971
972
973 void Heap::ClearJSFunctionResultCaches() {
974   if (isolate_->bootstrapper()->IsActive()) return;
975
976   Object* context = native_contexts_list();
977   while (!context->IsUndefined()) {
978     // Get the caches for this context. GC can happen when the context
979     // is not fully initialized, so the caches can be undefined.
980     Object* caches_or_undefined =
981         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
982     if (!caches_or_undefined->IsUndefined()) {
983       FixedArray* caches = FixedArray::cast(caches_or_undefined);
984       // Clear the caches:
985       int length = caches->length();
986       for (int i = 0; i < length; i++) {
987         JSFunctionResultCache::cast(caches->get(i))->Clear();
988       }
989     }
990     // Get the next context:
991     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
992   }
993 }
994
995
996 void Heap::ClearNormalizedMapCaches() {
997   if (isolate_->bootstrapper()->IsActive() &&
998       !incremental_marking()->IsMarking()) {
999     return;
1000   }
1001
1002   Object* context = native_contexts_list();
1003   while (!context->IsUndefined()) {
1004     // GC can happen when the context is not fully initialized,
1005     // so the cache can be undefined.
1006     Object* cache =
1007         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1008     if (!cache->IsUndefined()) {
1009       NormalizedMapCache::cast(cache)->Clear();
1010     }
1011     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1012   }
1013 }
1014
1015
1016 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
1017   if (start_new_space_size == 0) return;
1018
1019   promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
1020                      static_cast<double>(start_new_space_size) * 100);
1021
1022   semi_space_copied_rate_ =
1023       (static_cast<double>(semi_space_copied_object_size_) /
1024        static_cast<double>(start_new_space_size) * 100);
1025
1026   double survival_rate = promotion_rate_ + semi_space_copied_rate_;
1027
1028   if (survival_rate > kYoungSurvivalRateHighThreshold) {
1029     high_survival_rate_period_length_++;
1030   } else {
1031     high_survival_rate_period_length_ = 0;
1032   }
1033 }
1034
1035 bool Heap::PerformGarbageCollection(
1036     GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
1037   int freed_global_handles = 0;
1038
1039   if (collector != SCAVENGER) {
1040     PROFILE(isolate_, CodeMovingGCEvent());
1041   }
1042
1043 #ifdef VERIFY_HEAP
1044   if (FLAG_verify_heap) {
1045     VerifyStringTable(this);
1046   }
1047 #endif
1048
1049   GCType gc_type =
1050       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1051
1052   {
1053     GCCallbacksScope scope(this);
1054     if (scope.CheckReenter()) {
1055       AllowHeapAllocation allow_allocation;
1056       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1057       VMState<EXTERNAL> state(isolate_);
1058       HandleScope handle_scope(isolate_);
1059       CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1060     }
1061   }
1062
1063   EnsureFromSpaceIsCommitted();
1064
1065   int start_new_space_size = Heap::new_space()->SizeAsInt();
1066
1067   if (IsHighSurvivalRate()) {
1068     // We speed up the incremental marker if it is running so that it
1069     // does not fall behind the rate of promotion, which would cause a
1070     // constantly growing old space.
1071     incremental_marking()->NotifyOfHighPromotionRate();
1072   }
1073
1074   if (collector == MARK_COMPACTOR) {
1075     // Perform mark-sweep with optional compaction.
1076     MarkCompact();
1077     sweep_generation_++;
1078     // Temporarily set the limit for case when PostGarbageCollectionProcessing
1079     // allocates and triggers GC. The real limit is set at after
1080     // PostGarbageCollectionProcessing.
1081     old_generation_allocation_limit_ =
1082         OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
1083     old_gen_exhausted_ = false;
1084   } else {
1085     Scavenge();
1086   }
1087
1088   UpdateSurvivalStatistics(start_new_space_size);
1089
1090   isolate_->counters()->objs_since_last_young()->Set(0);
1091
1092   // Callbacks that fire after this point might trigger nested GCs and
1093   // restart incremental marking, the assertion can't be moved down.
1094   DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
1095
1096   gc_post_processing_depth_++;
1097   {
1098     AllowHeapAllocation allow_allocation;
1099     GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1100     freed_global_handles =
1101         isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
1102   }
1103   gc_post_processing_depth_--;
1104
1105   isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1106
1107   // Update relocatables.
1108   Relocatable::PostGarbageCollectionProcessing(isolate_);
1109
1110   if (collector == MARK_COMPACTOR) {
1111     // Register the amount of external allocated memory.
1112     amount_of_external_allocated_memory_at_last_global_gc_ =
1113         amount_of_external_allocated_memory_;
1114     old_generation_allocation_limit_ = OldGenerationAllocationLimit(
1115         PromotedSpaceSizeOfObjects(), freed_global_handles);
1116   }
1117
1118   {
1119     GCCallbacksScope scope(this);
1120     if (scope.CheckReenter()) {
1121       AllowHeapAllocation allow_allocation;
1122       GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
1123       VMState<EXTERNAL> state(isolate_);
1124       HandleScope handle_scope(isolate_);
1125       CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1126     }
1127   }
1128
1129 #ifdef VERIFY_HEAP
1130   if (FLAG_verify_heap) {
1131     VerifyStringTable(this);
1132   }
1133 #endif
1134
1135   return freed_global_handles > 0;
1136 }
1137
1138
1139 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1140   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1141     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1142       if (!gc_prologue_callbacks_[i].pass_isolate_) {
1143         v8::GCPrologueCallback callback =
1144             reinterpret_cast<v8::GCPrologueCallback>(
1145                 gc_prologue_callbacks_[i].callback);
1146         callback(gc_type, flags);
1147       } else {
1148         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1149         gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1150       }
1151     }
1152   }
1153 }
1154
1155
1156 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1157                                    GCCallbackFlags gc_callback_flags) {
1158   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1159     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1160       if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1161         v8::GCPrologueCallback callback =
1162             reinterpret_cast<v8::GCPrologueCallback>(
1163                 gc_epilogue_callbacks_[i].callback);
1164         callback(gc_type, gc_callback_flags);
1165       } else {
1166         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1167         gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
1168       }
1169     }
1170   }
1171 }
1172
1173
1174 void Heap::MarkCompact() {
1175   gc_state_ = MARK_COMPACT;
1176   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1177
1178   uint64_t size_of_objects_before_gc = SizeOfObjects();
1179
1180   mark_compact_collector_.Prepare();
1181
1182   ms_count_++;
1183
1184   MarkCompactPrologue();
1185
1186   mark_compact_collector_.CollectGarbage();
1187
1188   LOG(isolate_, ResourceEvent("markcompact", "end"));
1189
1190   gc_state_ = NOT_IN_GC;
1191
1192   isolate_->counters()->objs_since_last_full()->Set(0);
1193
1194   flush_monomorphic_ics_ = false;
1195
1196   if (FLAG_allocation_site_pretenuring) {
1197     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1198   }
1199 }
1200
1201
1202 void Heap::MarkCompactPrologue() {
1203   // At any old GC clear the keyed lookup cache to enable collection of unused
1204   // maps.
1205   isolate_->keyed_lookup_cache()->Clear();
1206   isolate_->context_slot_cache()->Clear();
1207   isolate_->descriptor_lookup_cache()->Clear();
1208   RegExpResultsCache::Clear(string_split_cache());
1209   RegExpResultsCache::Clear(regexp_multiple_cache());
1210
1211   isolate_->compilation_cache()->MarkCompactPrologue();
1212
1213   CompletelyClearInstanceofCache();
1214
1215   FlushNumberStringCache();
1216   if (FLAG_cleanup_code_caches_at_gc) {
1217     polymorphic_code_cache()->set_cache(undefined_value());
1218   }
1219
1220   ClearNormalizedMapCaches();
1221 }
1222
1223
1224 // Helper class for copying HeapObjects
1225 class ScavengeVisitor : public ObjectVisitor {
1226  public:
1227   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1228
1229   void VisitPointer(Object** p) { ScavengePointer(p); }
1230
1231   void VisitPointers(Object** start, Object** end) {
1232     // Copy all HeapObject pointers in [start, end)
1233     for (Object** p = start; p < end; p++) ScavengePointer(p);
1234   }
1235
1236  private:
1237   void ScavengePointer(Object** p) {
1238     Object* object = *p;
1239     if (!heap_->InNewSpace(object)) return;
1240     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1241                          reinterpret_cast<HeapObject*>(object));
1242   }
1243
1244   Heap* heap_;
1245 };
1246
1247
1248 #ifdef VERIFY_HEAP
1249 // Visitor class to verify pointers in code or data space do not point into
1250 // new space.
1251 class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
1252  public:
1253   explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1254   void VisitPointers(Object** start, Object** end) {
1255     for (Object** current = start; current < end; current++) {
1256       if ((*current)->IsHeapObject()) {
1257         CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1258       }
1259     }
1260   }
1261
1262  private:
1263   Heap* heap_;
1264 };
1265
1266
1267 static void VerifyNonPointerSpacePointers(Heap* heap) {
1268   // Verify that there are no pointers to new space in spaces where we
1269   // do not expect them.
1270   VerifyNonPointerSpacePointersVisitor v(heap);
1271   HeapObjectIterator code_it(heap->code_space());
1272   for (HeapObject* object = code_it.Next(); object != NULL;
1273        object = code_it.Next())
1274     object->Iterate(&v);
1275
1276     HeapObjectIterator data_it(heap->old_data_space());
1277     for (HeapObject* object = data_it.Next(); object != NULL;
1278          object = data_it.Next())
1279       object->Iterate(&v);
1280 }
1281 #endif  // VERIFY_HEAP
1282
1283
1284 void Heap::CheckNewSpaceExpansionCriteria() {
1285   if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
1286       survived_since_last_expansion_ > new_space_.TotalCapacity()) {
1287     // Grow the size of new space if there is room to grow, enough data
1288     // has survived scavenge since the last expansion and we are not in
1289     // high promotion mode.
1290     new_space_.Grow();
1291     survived_since_last_expansion_ = 0;
1292   }
1293 }
1294
1295
1296 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1297   return heap->InNewSpace(*p) &&
1298          !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1299 }
1300
1301
1302 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
1303                                        StoreBufferEvent event) {
1304   heap->store_buffer_rebuilder_.Callback(page, event);
1305 }
1306
1307
1308 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1309   if (event == kStoreBufferStartScanningPagesEvent) {
1310     start_of_current_page_ = NULL;
1311     current_page_ = NULL;
1312   } else if (event == kStoreBufferScanningPageEvent) {
1313     if (current_page_ != NULL) {
1314       // If this page already overflowed the store buffer during this iteration.
1315       if (current_page_->scan_on_scavenge()) {
1316         // Then we should wipe out the entries that have been added for it.
1317         store_buffer_->SetTop(start_of_current_page_);
1318       } else if (store_buffer_->Top() - start_of_current_page_ >=
1319                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1320         // Did we find too many pointers in the previous page?  The heuristic is
1321         // that no page can take more then 1/5 the remaining slots in the store
1322         // buffer.
1323         current_page_->set_scan_on_scavenge(true);
1324         store_buffer_->SetTop(start_of_current_page_);
1325       } else {
1326         // In this case the page we scanned took a reasonable number of slots in
1327         // the store buffer.  It has now been rehabilitated and is no longer
1328         // marked scan_on_scavenge.
1329         DCHECK(!current_page_->scan_on_scavenge());
1330       }
1331     }
1332     start_of_current_page_ = store_buffer_->Top();
1333     current_page_ = page;
1334   } else if (event == kStoreBufferFullEvent) {
1335     // The current page overflowed the store buffer again.  Wipe out its entries
1336     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1337     // several times while scanning.
1338     if (current_page_ == NULL) {
1339       // Store Buffer overflowed while scanning promoted objects.  These are not
1340       // in any particular page, though they are likely to be clustered by the
1341       // allocation routines.
1342       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1343     } else {
1344       // Store Buffer overflowed while scanning a particular old space page for
1345       // pointers to new space.
1346       DCHECK(current_page_ == page);
1347       DCHECK(page != NULL);
1348       current_page_->set_scan_on_scavenge(true);
1349       DCHECK(start_of_current_page_ != store_buffer_->Top());
1350       store_buffer_->SetTop(start_of_current_page_);
1351     }
1352   } else {
1353     UNREACHABLE();
1354   }
1355 }
1356
1357
1358 void PromotionQueue::Initialize() {
1359   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1360   // entries (where each is a pair of intptr_t). This allows us to simplify
1361   // the test fpr when to switch pages.
1362   DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
1363          0);
1364   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1365   front_ = rear_ =
1366       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1367   emergency_stack_ = NULL;
1368 }
1369
1370
1371 void PromotionQueue::RelocateQueueHead() {
1372   DCHECK(emergency_stack_ == NULL);
1373
1374   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1375   intptr_t* head_start = rear_;
1376   intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1377
1378   int entries_count =
1379       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1380
1381   emergency_stack_ = new List<Entry>(2 * entries_count);
1382
1383   while (head_start != head_end) {
1384     int size = static_cast<int>(*(head_start++));
1385     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1386     emergency_stack_->Add(Entry(obj, size));
1387   }
1388   rear_ = head_end;
1389 }
1390
1391
1392 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1393  public:
1394   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
1395
1396   virtual Object* RetainAs(Object* object) {
1397     if (!heap_->InFromSpace(object)) {
1398       return object;
1399     }
1400
1401     MapWord map_word = HeapObject::cast(object)->map_word();
1402     if (map_word.IsForwardingAddress()) {
1403       return map_word.ToForwardingAddress();
1404     }
1405     return NULL;
1406   }
1407
1408  private:
1409   Heap* heap_;
1410 };
1411
1412
1413 void Heap::Scavenge() {
1414   RelocationLock relocation_lock(this);
1415
1416 #ifdef VERIFY_HEAP
1417   if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1418 #endif
1419
1420   gc_state_ = SCAVENGE;
1421
1422   // Implements Cheney's copying algorithm
1423   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1424
1425   // Clear descriptor cache.
1426   isolate_->descriptor_lookup_cache()->Clear();
1427
1428   // Used for updating survived_since_last_expansion_ at function end.
1429   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1430
1431   SelectScavengingVisitorsTable();
1432
1433   incremental_marking()->PrepareForScavenge();
1434
1435   // Flip the semispaces.  After flipping, to space is empty, from space has
1436   // live objects.
1437   new_space_.Flip();
1438   new_space_.ResetAllocationInfo();
1439
1440   // We need to sweep newly copied objects which can be either in the
1441   // to space or promoted to the old generation.  For to-space
1442   // objects, we treat the bottom of the to space as a queue.  Newly
1443   // copied and unswept objects lie between a 'front' mark and the
1444   // allocation pointer.
1445   //
1446   // Promoted objects can go into various old-generation spaces, and
1447   // can be allocated internally in the spaces (from the free list).
1448   // We treat the top of the to space as a queue of addresses of
1449   // promoted objects.  The addresses of newly promoted and unswept
1450   // objects lie between a 'front' mark and a 'rear' mark that is
1451   // updated as a side effect of promoting an object.
1452   //
1453   // There is guaranteed to be enough room at the top of the to space
1454   // for the addresses of promoted objects: every object promoted
1455   // frees up its size in bytes from the top of the new space, and
1456   // objects are at least one pointer in size.
1457   Address new_space_front = new_space_.ToSpaceStart();
1458   promotion_queue_.Initialize();
1459
1460 #ifdef DEBUG
1461   store_buffer()->Clean();
1462 #endif
1463
1464   ScavengeVisitor scavenge_visitor(this);
1465   // Copy roots.
1466   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1467
1468   // Copy objects reachable from the old generation.
1469   {
1470     StoreBufferRebuildScope scope(this, store_buffer(),
1471                                   &ScavengeStoreBufferCallback);
1472     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1473   }
1474
1475   // Copy objects reachable from simple cells by scavenging cell values
1476   // directly.
1477   HeapObjectIterator cell_iterator(cell_space_);
1478   for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
1479        heap_object = cell_iterator.Next()) {
1480     if (heap_object->IsCell()) {
1481       Cell* cell = Cell::cast(heap_object);
1482       Address value_address = cell->ValueAddress();
1483       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1484     }
1485   }
1486
1487   // Copy objects reachable from global property cells by scavenging global
1488   // property cell values directly.
1489   HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1490   for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1491        heap_object != NULL;
1492        heap_object = js_global_property_cell_iterator.Next()) {
1493     if (heap_object->IsPropertyCell()) {
1494       PropertyCell* cell = PropertyCell::cast(heap_object);
1495       Address value_address = cell->ValueAddress();
1496       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1497       Address type_address = cell->TypeAddress();
1498       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1499     }
1500   }
1501
1502   // Copy objects reachable from the encountered weak collections list.
1503   scavenge_visitor.VisitPointer(&encountered_weak_collections_);
1504
1505   // Copy objects reachable from the code flushing candidates list.
1506   MarkCompactCollector* collector = mark_compact_collector();
1507   if (collector->is_code_flushing_enabled()) {
1508     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1509   }
1510
1511   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1512
1513   while (isolate()->global_handles()->IterateObjectGroups(
1514       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1515     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1516   }
1517   isolate()->global_handles()->RemoveObjectGroups();
1518   isolate()->global_handles()->RemoveImplicitRefGroups();
1519
1520   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1521       &IsUnscavengedHeapObject);
1522   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1523       &scavenge_visitor);
1524   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1525
1526   UpdateNewSpaceReferencesInExternalStringTable(
1527       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1528
1529   promotion_queue_.Destroy();
1530
1531   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1532
1533   ScavengeWeakObjectRetainer weak_object_retainer(this);
1534   ProcessWeakReferences(&weak_object_retainer);
1535
1536   DCHECK(new_space_front == new_space_.top());
1537
1538   // Set age mark.
1539   new_space_.set_age_mark(new_space_.top());
1540
1541   new_space_.LowerInlineAllocationLimit(
1542       new_space_.inline_allocation_limit_step());
1543
1544   // Update how much has survived scavenge.
1545   IncrementYoungSurvivorsCounter(static_cast<int>(
1546       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1547
1548   LOG(isolate_, ResourceEvent("scavenge", "end"));
1549
1550   gc_state_ = NOT_IN_GC;
1551 }
1552
1553
1554 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1555                                                                 Object** p) {
1556   MapWord first_word = HeapObject::cast(*p)->map_word();
1557
1558   if (!first_word.IsForwardingAddress()) {
1559     // Unreachable external string can be finalized.
1560     heap->FinalizeExternalString(String::cast(*p));
1561     return NULL;
1562   }
1563
1564   // String is still reachable.
1565   return String::cast(first_word.ToForwardingAddress());
1566 }
1567
1568
1569 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1570     ExternalStringTableUpdaterCallback updater_func) {
1571 #ifdef VERIFY_HEAP
1572   if (FLAG_verify_heap) {
1573     external_string_table_.Verify();
1574   }
1575 #endif
1576
1577   if (external_string_table_.new_space_strings_.is_empty()) return;
1578
1579   Object** start = &external_string_table_.new_space_strings_[0];
1580   Object** end = start + external_string_table_.new_space_strings_.length();
1581   Object** last = start;
1582
1583   for (Object** p = start; p < end; ++p) {
1584     DCHECK(InFromSpace(*p));
1585     String* target = updater_func(this, p);
1586
1587     if (target == NULL) continue;
1588
1589     DCHECK(target->IsExternalString());
1590
1591     if (InNewSpace(target)) {
1592       // String is still in new space.  Update the table entry.
1593       *last = target;
1594       ++last;
1595     } else {
1596       // String got promoted.  Move it to the old string list.
1597       external_string_table_.AddOldString(target);
1598     }
1599   }
1600
1601   DCHECK(last <= end);
1602   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1603 }
1604
1605
1606 void Heap::UpdateReferencesInExternalStringTable(
1607     ExternalStringTableUpdaterCallback updater_func) {
1608   // Update old space string references.
1609   if (external_string_table_.old_space_strings_.length() > 0) {
1610     Object** start = &external_string_table_.old_space_strings_[0];
1611     Object** end = start + external_string_table_.old_space_strings_.length();
1612     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1613   }
1614
1615   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1616 }
1617
1618
1619 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1620   ProcessArrayBuffers(retainer);
1621   ProcessNativeContexts(retainer);
1622   // TODO(mvstanton): AllocationSites only need to be processed during
1623   // MARK_COMPACT, as they live in old space. Verify and address.
1624   ProcessAllocationSites(retainer);
1625 }
1626
1627
1628 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
1629   Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
1630   // Update the head of the list of contexts.
1631   set_native_contexts_list(head);
1632 }
1633
1634
1635 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
1636   Object* array_buffer_obj =
1637       VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
1638   set_array_buffers_list(array_buffer_obj);
1639 }
1640
1641
1642 void Heap::TearDownArrayBuffers() {
1643   Object* undefined = undefined_value();
1644   for (Object* o = array_buffers_list(); o != undefined;) {
1645     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1646     Runtime::FreeArrayBuffer(isolate(), buffer);
1647     o = buffer->weak_next();
1648   }
1649   set_array_buffers_list(undefined);
1650 }
1651
1652
1653 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
1654   Object* allocation_site_obj =
1655       VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
1656   set_allocation_sites_list(allocation_site_obj);
1657 }
1658
1659
1660 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1661   DisallowHeapAllocation no_allocation_scope;
1662   Object* cur = allocation_sites_list();
1663   bool marked = false;
1664   while (cur->IsAllocationSite()) {
1665     AllocationSite* casted = AllocationSite::cast(cur);
1666     if (casted->GetPretenureMode() == flag) {
1667       casted->ResetPretenureDecision();
1668       casted->set_deopt_dependent_code(true);
1669       marked = true;
1670     }
1671     cur = casted->weak_next();
1672   }
1673   if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
1674 }
1675
1676
1677 void Heap::EvaluateOldSpaceLocalPretenuring(
1678     uint64_t size_of_objects_before_gc) {
1679   uint64_t size_of_objects_after_gc = SizeOfObjects();
1680   double old_generation_survival_rate =
1681       (static_cast<double>(size_of_objects_after_gc) * 100) /
1682       static_cast<double>(size_of_objects_before_gc);
1683
1684   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
1685     // Too many objects died in the old generation, pretenuring of wrong
1686     // allocation sites may be the cause for that. We have to deopt all
1687     // dependent code registered in the allocation sites to re-evaluate
1688     // our pretenuring decisions.
1689     ResetAllAllocationSitesDependentCode(TENURED);
1690     if (FLAG_trace_pretenuring) {
1691       PrintF(
1692           "Deopt all allocation sites dependent code due to low survival "
1693           "rate in the old generation %f\n",
1694           old_generation_survival_rate);
1695     }
1696   }
1697 }
1698
1699
1700 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1701   DisallowHeapAllocation no_allocation;
1702   // All external strings are listed in the external string table.
1703
1704   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1705    public:
1706     explicit ExternalStringTableVisitorAdapter(
1707         v8::ExternalResourceVisitor* visitor)
1708         : visitor_(visitor) {}
1709     virtual void VisitPointers(Object** start, Object** end) {
1710       for (Object** p = start; p < end; p++) {
1711         DCHECK((*p)->IsExternalString());
1712         visitor_->VisitExternalString(
1713             Utils::ToLocal(Handle<String>(String::cast(*p))));
1714       }
1715     }
1716
1717    private:
1718     v8::ExternalResourceVisitor* visitor_;
1719   } external_string_table_visitor(visitor);
1720
1721   external_string_table_.Iterate(&external_string_table_visitor);
1722 }
1723
1724
1725 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1726  public:
1727   static inline void VisitPointer(Heap* heap, Object** p) {
1728     Object* object = *p;
1729     if (!heap->InNewSpace(object)) return;
1730     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1731                          reinterpret_cast<HeapObject*>(object));
1732   }
1733 };
1734
1735
1736 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1737                          Address new_space_front) {
1738   do {
1739     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1740     // The addresses new_space_front and new_space_.top() define a
1741     // queue of unprocessed copied objects.  Process them until the
1742     // queue is empty.
1743     while (new_space_front != new_space_.top()) {
1744       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1745         HeapObject* object = HeapObject::FromAddress(new_space_front);
1746         new_space_front +=
1747             NewSpaceScavenger::IterateBody(object->map(), object);
1748       } else {
1749         new_space_front =
1750             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1751       }
1752     }
1753
1754     // Promote and process all the to-be-promoted objects.
1755     {
1756       StoreBufferRebuildScope scope(this, store_buffer(),
1757                                     &ScavengeStoreBufferCallback);
1758       while (!promotion_queue()->is_empty()) {
1759         HeapObject* target;
1760         int size;
1761         promotion_queue()->remove(&target, &size);
1762
1763         // Promoted object might be already partially visited
1764         // during old space pointer iteration. Thus we search specificly
1765         // for pointers to from semispace instead of looking for pointers
1766         // to new space.
1767         DCHECK(!target->IsMap());
1768         IterateAndMarkPointersToFromSpace(
1769             target->address(), target->address() + size, &ScavengeObject);
1770       }
1771     }
1772
1773     // Take another spin if there are now unswept objects in new space
1774     // (there are currently no more unswept promoted objects).
1775   } while (new_space_front != new_space_.top());
1776
1777   return new_space_front;
1778 }
1779
1780
1781 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
1782               0);  // NOLINT
1783 STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
1784               0);  // NOLINT
1785 STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
1786                kDoubleAlignmentMask) == 0);  // NOLINT
1787
1788
1789 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1790                                               int size));
1791
1792 static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
1793                                        int size) {
1794   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1795     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1796     return HeapObject::FromAddress(object->address() + kPointerSize);
1797   } else {
1798     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1799                                kPointerSize);
1800     return object;
1801   }
1802 }
1803
1804
1805 enum LoggingAndProfiling {
1806   LOGGING_AND_PROFILING_ENABLED,
1807   LOGGING_AND_PROFILING_DISABLED
1808 };
1809
1810
1811 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1812
1813
1814 template <MarksHandling marks_handling,
1815           LoggingAndProfiling logging_and_profiling_mode>
1816 class ScavengingVisitor : public StaticVisitorBase {
1817  public:
1818   static void Initialize() {
1819     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1820     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1821     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1822     table_.Register(kVisitByteArray, &EvacuateByteArray);
1823     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1824     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1825     table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
1826     table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
1827
1828     table_.Register(
1829         kVisitNativeContext,
1830         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1831             Context::kSize>);
1832
1833     table_.Register(
1834         kVisitConsString,
1835         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1836             ConsString::kSize>);
1837
1838     table_.Register(
1839         kVisitSlicedString,
1840         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1841             SlicedString::kSize>);
1842
1843     table_.Register(
1844         kVisitSymbol,
1845         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1846             Symbol::kSize>);
1847
1848     table_.Register(
1849         kVisitSharedFunctionInfo,
1850         &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1851             SharedFunctionInfo::kSize>);
1852
1853     table_.Register(kVisitJSWeakCollection,
1854                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1855
1856     table_.Register(kVisitJSArrayBuffer,
1857                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1858
1859     table_.Register(kVisitJSTypedArray,
1860                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1861
1862     table_.Register(kVisitJSDataView,
1863                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1864
1865     table_.Register(kVisitJSRegExp,
1866                     &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
1867
1868     if (marks_handling == IGNORE_MARKS) {
1869       table_.Register(
1870           kVisitJSFunction,
1871           &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
1872               JSFunction::kSize>);
1873     } else {
1874       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1875     }
1876
1877     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1878                                    kVisitDataObject, kVisitDataObjectGeneric>();
1879
1880     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1881                                    kVisitJSObject, kVisitJSObjectGeneric>();
1882
1883     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1884                                    kVisitStruct, kVisitStructGeneric>();
1885   }
1886
1887   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1888     return &table_;
1889   }
1890
1891  private:
1892   enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
1893
1894   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1895     bool should_record = false;
1896 #ifdef DEBUG
1897     should_record = FLAG_heap_stats;
1898 #endif
1899     should_record = should_record || FLAG_log_gc;
1900     if (should_record) {
1901       if (heap->new_space()->Contains(obj)) {
1902         heap->new_space()->RecordAllocation(obj);
1903       } else {
1904         heap->new_space()->RecordPromotion(obj);
1905       }
1906     }
1907   }
1908
1909   // Helper function used by CopyObject to copy a source object to an
1910   // allocated target object and update the forwarding pointer in the source
1911   // object.  Returns the target object.
1912   INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
1913                                    HeapObject* target, int size)) {
1914     // If we migrate into to-space, then the to-space top pointer should be
1915     // right after the target object. Incorporate double alignment
1916     // over-allocation.
1917     DCHECK(!heap->InToSpace(target) ||
1918            target->address() + size == heap->new_space()->top() ||
1919            target->address() + size + kPointerSize == heap->new_space()->top());
1920
1921     // Make sure that we do not overwrite the promotion queue which is at
1922     // the end of to-space.
1923     DCHECK(!heap->InToSpace(target) ||
1924            heap->promotion_queue()->IsBelowPromotionQueue(
1925                heap->new_space()->top()));
1926
1927     // Copy the content of source to target.
1928     heap->CopyBlock(target->address(), source->address(), size);
1929
1930     // Set the forwarding address.
1931     source->set_map_word(MapWord::FromForwardingAddress(target));
1932
1933     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
1934       // Update NewSpace stats if necessary.
1935       RecordCopiedObject(heap, target);
1936       heap->OnMoveEvent(target, source, size);
1937     }
1938
1939     if (marks_handling == TRANSFER_MARKS) {
1940       if (Marking::TransferColor(source, target)) {
1941         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
1942       }
1943     }
1944   }
1945
1946   template <int alignment>
1947   static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
1948                                          HeapObject* object, int object_size) {
1949     Heap* heap = map->GetHeap();
1950
1951     int allocation_size = object_size;
1952     if (alignment != kObjectAlignment) {
1953       DCHECK(alignment == kDoubleAlignment);
1954       allocation_size += kPointerSize;
1955     }
1956
1957     DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
1958     AllocationResult allocation =
1959         heap->new_space()->AllocateRaw(allocation_size);
1960
1961     HeapObject* target = NULL;  // Initialization to please compiler.
1962     if (allocation.To(&target)) {
1963       // Order is important here: Set the promotion limit before storing a
1964       // filler for double alignment or migrating the object. Otherwise we
1965       // may end up overwriting promotion queue entries when we migrate the
1966       // object.
1967       heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
1968
1969       if (alignment != kObjectAlignment) {
1970         target = EnsureDoubleAligned(heap, target, allocation_size);
1971       }
1972
1973       // Order is important: slot might be inside of the target if target
1974       // was allocated over a dead object and slot comes from the store
1975       // buffer.
1976       *slot = target;
1977       MigrateObject(heap, object, target, object_size);
1978
1979       heap->IncrementSemiSpaceCopiedObjectSize(object_size);
1980       return true;
1981     }
1982     return false;
1983   }
1984
1985
1986   template <ObjectContents object_contents, int alignment>
1987   static inline bool PromoteObject(Map* map, HeapObject** slot,
1988                                    HeapObject* object, int object_size) {
1989     Heap* heap = map->GetHeap();
1990
1991     int allocation_size = object_size;
1992     if (alignment != kObjectAlignment) {
1993       DCHECK(alignment == kDoubleAlignment);
1994       allocation_size += kPointerSize;
1995     }
1996
1997     AllocationResult allocation;
1998     if (object_contents == DATA_OBJECT) {
1999       DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2000       allocation = heap->old_data_space()->AllocateRaw(allocation_size);
2001     } else {
2002       DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2003       allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
2004     }
2005
2006     HeapObject* target = NULL;  // Initialization to please compiler.
2007     if (allocation.To(&target)) {
2008       if (alignment != kObjectAlignment) {
2009         target = EnsureDoubleAligned(heap, target, allocation_size);
2010       }
2011
2012       // Order is important: slot might be inside of the target if target
2013       // was allocated over a dead object and slot comes from the store
2014       // buffer.
2015       *slot = target;
2016       MigrateObject(heap, object, target, object_size);
2017
2018       if (object_contents == POINTER_OBJECT) {
2019         if (map->instance_type() == JS_FUNCTION_TYPE) {
2020           heap->promotion_queue()->insert(target,
2021                                           JSFunction::kNonWeakFieldsEndOffset);
2022         } else {
2023           heap->promotion_queue()->insert(target, object_size);
2024         }
2025       }
2026       heap->IncrementPromotedObjectsSize(object_size);
2027       return true;
2028     }
2029     return false;
2030   }
2031
2032
2033   template <ObjectContents object_contents, int alignment>
2034   static inline void EvacuateObject(Map* map, HeapObject** slot,
2035                                     HeapObject* object, int object_size) {
2036     SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2037     SLOW_DCHECK(object->Size() == object_size);
2038     Heap* heap = map->GetHeap();
2039
2040     if (!heap->ShouldBePromoted(object->address(), object_size)) {
2041       // A semi-space copy may fail due to fragmentation. In that case, we
2042       // try to promote the object.
2043       if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
2044         return;
2045       }
2046     }
2047
2048     if (PromoteObject<object_contents, alignment>(map, slot, object,
2049                                                   object_size)) {
2050       return;
2051     }
2052
2053     // If promotion failed, we try to copy the object to the other semi-space
2054     if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
2055
2056     UNREACHABLE();
2057   }
2058
2059
2060   static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
2061                                         HeapObject* object) {
2062     ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
2063         JSFunction::kSize>(map, slot, object);
2064
2065     MapWord map_word = object->map_word();
2066     DCHECK(map_word.IsForwardingAddress());
2067     HeapObject* target = map_word.ToForwardingAddress();
2068
2069     MarkBit mark_bit = Marking::MarkBitFrom(target);
2070     if (Marking::IsBlack(mark_bit)) {
2071       // This object is black and it might not be rescanned by marker.
2072       // We should explicitly record code entry slot for compaction because
2073       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2074       // miss it as it is not HeapObject-tagged.
2075       Address code_entry_slot =
2076           target->address() + JSFunction::kCodeEntryOffset;
2077       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2078       map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
2079           code_entry_slot, code);
2080     }
2081   }
2082
2083
2084   static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
2085                                         HeapObject* object) {
2086     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2087     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2088                                                      object_size);
2089   }
2090
2091
2092   static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
2093                                               HeapObject* object) {
2094     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2095     int object_size = FixedDoubleArray::SizeFor(length);
2096     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2097                                                   object_size);
2098   }
2099
2100
2101   static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
2102                                              HeapObject* object) {
2103     int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2104     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2105                                                   object_size);
2106   }
2107
2108
2109   static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
2110                                                HeapObject* object) {
2111     int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2112     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
2113                                                   object_size);
2114   }
2115
2116
2117   static inline void EvacuateByteArray(Map* map, HeapObject** slot,
2118                                        HeapObject* object) {
2119     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2120     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2121                                                   object_size);
2122   }
2123
2124
2125   static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
2126                                               HeapObject* object) {
2127     int object_size = SeqOneByteString::cast(object)
2128                           ->SeqOneByteStringSize(map->instance_type());
2129     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2130                                                   object_size);
2131   }
2132
2133
2134   static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
2135                                               HeapObject* object) {
2136     int object_size = SeqTwoByteString::cast(object)
2137                           ->SeqTwoByteStringSize(map->instance_type());
2138     EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
2139                                                   object_size);
2140   }
2141
2142
2143   static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
2144                                                HeapObject* object) {
2145     DCHECK(IsShortcutCandidate(map->instance_type()));
2146
2147     Heap* heap = map->GetHeap();
2148
2149     if (marks_handling == IGNORE_MARKS &&
2150         ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
2151       HeapObject* first =
2152           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2153
2154       *slot = first;
2155
2156       if (!heap->InNewSpace(first)) {
2157         object->set_map_word(MapWord::FromForwardingAddress(first));
2158         return;
2159       }
2160
2161       MapWord first_word = first->map_word();
2162       if (first_word.IsForwardingAddress()) {
2163         HeapObject* target = first_word.ToForwardingAddress();
2164
2165         *slot = target;
2166         object->set_map_word(MapWord::FromForwardingAddress(target));
2167         return;
2168       }
2169
2170       heap->DoScavengeObject(first->map(), slot, first);
2171       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2172       return;
2173     }
2174
2175     int object_size = ConsString::kSize;
2176     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
2177                                                      object_size);
2178   }
2179
2180   template <ObjectContents object_contents>
2181   class ObjectEvacuationStrategy {
2182    public:
2183     template <int object_size>
2184     static inline void VisitSpecialized(Map* map, HeapObject** slot,
2185                                         HeapObject* object) {
2186       EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2187                                                         object_size);
2188     }
2189
2190     static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
2191       int object_size = map->instance_size();
2192       EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
2193                                                         object_size);
2194     }
2195   };
2196
2197   static VisitorDispatchTable<ScavengingCallback> table_;
2198 };
2199
2200
2201 template <MarksHandling marks_handling,
2202           LoggingAndProfiling logging_and_profiling_mode>
2203 VisitorDispatchTable<ScavengingCallback>
2204     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2205
2206
2207 static void InitializeScavengingVisitorsTables() {
2208   ScavengingVisitor<TRANSFER_MARKS,
2209                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2210   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2211   ScavengingVisitor<TRANSFER_MARKS,
2212                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2213   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2214 }
2215
2216
2217 void Heap::SelectScavengingVisitorsTable() {
2218   bool logging_and_profiling =
2219       FLAG_verify_predictable || isolate()->logger()->is_logging() ||
2220       isolate()->cpu_profiler()->is_profiling() ||
2221       (isolate()->heap_profiler() != NULL &&
2222        isolate()->heap_profiler()->is_tracking_object_moves());
2223
2224   if (!incremental_marking()->IsMarking()) {
2225     if (!logging_and_profiling) {
2226       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2227           IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2228     } else {
2229       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2230           IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2231     }
2232   } else {
2233     if (!logging_and_profiling) {
2234       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2235           TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
2236     } else {
2237       scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
2238           TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
2239     }
2240
2241     if (incremental_marking()->IsCompacting()) {
2242       // When compacting forbid short-circuiting of cons-strings.
2243       // Scavenging code relies on the fact that new space object
2244       // can't be evacuated into evacuation candidate but
2245       // short-circuiting violates this assumption.
2246       scavenging_visitors_table_.Register(
2247           StaticVisitorBase::kVisitShortcutCandidate,
2248           scavenging_visitors_table_.GetVisitorById(
2249               StaticVisitorBase::kVisitConsString));
2250     }
2251   }
2252 }
2253
2254
2255 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2256   SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
2257   MapWord first_word = object->map_word();
2258   SLOW_DCHECK(!first_word.IsForwardingAddress());
2259   Map* map = first_word.ToMap();
2260   map->GetHeap()->DoScavengeObject(map, p, object);
2261 }
2262
2263
2264 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
2265                                           int instance_size) {
2266   Object* result;
2267   AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2268   if (!allocation.To(&result)) return allocation;
2269
2270   // Map::cast cannot be used due to uninitialized map field.
2271   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2272   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2273   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2274   reinterpret_cast<Map*>(result)->set_visitor_id(
2275       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2276   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2277   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2278   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2279   reinterpret_cast<Map*>(result)->set_bit_field(0);
2280   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2281   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2282                    Map::OwnsDescriptors::encode(true);
2283   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2284   return result;
2285 }
2286
2287
2288 AllocationResult Heap::AllocateMap(InstanceType instance_type,
2289                                    int instance_size,
2290                                    ElementsKind elements_kind) {
2291   HeapObject* result;
2292   AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2293   if (!allocation.To(&result)) return allocation;
2294
2295   result->set_map_no_write_barrier(meta_map());
2296   Map* map = Map::cast(result);
2297   map->set_instance_type(instance_type);
2298   map->set_visitor_id(
2299       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2300   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2301   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2302   map->set_instance_size(instance_size);
2303   map->set_inobject_properties(0);
2304   map->set_pre_allocated_property_fields(0);
2305   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2306   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2307                           SKIP_WRITE_BARRIER);
2308   map->init_back_pointer(undefined_value());
2309   map->set_unused_property_fields(0);
2310   map->set_instance_descriptors(empty_descriptor_array());
2311   map->set_bit_field(0);
2312   map->set_bit_field2(1 << Map::kIsExtensible);
2313   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2314                    Map::OwnsDescriptors::encode(true);
2315   map->set_bit_field3(bit_field3);
2316   map->set_elements_kind(elements_kind);
2317
2318   return map;
2319 }
2320
2321
2322 AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
2323                                             AllocationSpace space) {
2324   HeapObject* obj;
2325   {
2326     AllocationResult allocation = AllocateRaw(size, space, space);
2327     if (!allocation.To(&obj)) return allocation;
2328   }
2329 #ifdef DEBUG
2330   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2331   DCHECK(chunk->owner()->identity() == space);
2332 #endif
2333   CreateFillerObjectAt(obj->address(), size);
2334   return obj;
2335 }
2336
2337
2338 const Heap::StringTypeTable Heap::string_type_table[] = {
2339 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2340   { type, size, k##camel_name##MapRootIndex }             \
2341   ,
2342     STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2343 #undef STRING_TYPE_ELEMENT
2344 };
2345
2346
2347 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2348 #define CONSTANT_STRING_ELEMENT(name, contents) \
2349   { contents, k##name##RootIndex }              \
2350   ,
2351     INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2352 #undef CONSTANT_STRING_ELEMENT
2353 };
2354
2355
2356 const Heap::StructTable Heap::struct_table[] = {
2357 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)        \
2358   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
2359   ,
2360     STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2361 #undef STRUCT_TABLE_ELEMENT
2362 };
2363
2364
2365 bool Heap::CreateInitialMaps() {
2366   HeapObject* obj;
2367   {
2368     AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
2369     if (!allocation.To(&obj)) return false;
2370   }
2371   // Map::cast cannot be used due to uninitialized map field.
2372   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2373   set_meta_map(new_meta_map);
2374   new_meta_map->set_map(new_meta_map);
2375
2376   {  // Partial map allocation
2377 #define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)                \
2378   {                                                                          \
2379     Map* map;                                                                \
2380     if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
2381     set_##field_name##_map(map);                                             \
2382   }
2383
2384     ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
2385     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
2386     ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
2387     ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
2388                          constant_pool_array);
2389
2390 #undef ALLOCATE_PARTIAL_MAP
2391   }
2392
2393   // Allocate the empty array.
2394   {
2395     AllocationResult allocation = AllocateEmptyFixedArray();
2396     if (!allocation.To(&obj)) return false;
2397   }
2398   set_empty_fixed_array(FixedArray::cast(obj));
2399
2400   {
2401     AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
2402     if (!allocation.To(&obj)) return false;
2403   }
2404   set_null_value(Oddball::cast(obj));
2405   Oddball::cast(obj)->set_kind(Oddball::kNull);
2406
2407   {
2408     AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
2409     if (!allocation.To(&obj)) return false;
2410   }
2411   set_undefined_value(Oddball::cast(obj));
2412   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2413   DCHECK(!InNewSpace(undefined_value()));
2414
2415   // Set preliminary exception sentinel value before actually initializing it.
2416   set_exception(null_value());
2417
2418   // Allocate the empty descriptor array.
2419   {
2420     AllocationResult allocation = AllocateEmptyFixedArray();
2421     if (!allocation.To(&obj)) return false;
2422   }
2423   set_empty_descriptor_array(DescriptorArray::cast(obj));
2424
2425   // Allocate the constant pool array.
2426   {
2427     AllocationResult allocation = AllocateEmptyConstantPoolArray();
2428     if (!allocation.To(&obj)) return false;
2429   }
2430   set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2431
2432   // Fix the instance_descriptors for the existing maps.
2433   meta_map()->set_code_cache(empty_fixed_array());
2434   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2435   meta_map()->init_back_pointer(undefined_value());
2436   meta_map()->set_instance_descriptors(empty_descriptor_array());
2437
2438   fixed_array_map()->set_code_cache(empty_fixed_array());
2439   fixed_array_map()->set_dependent_code(
2440       DependentCode::cast(empty_fixed_array()));
2441   fixed_array_map()->init_back_pointer(undefined_value());
2442   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2443
2444   undefined_map()->set_code_cache(empty_fixed_array());
2445   undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2446   undefined_map()->init_back_pointer(undefined_value());
2447   undefined_map()->set_instance_descriptors(empty_descriptor_array());
2448
2449   null_map()->set_code_cache(empty_fixed_array());
2450   null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2451   null_map()->init_back_pointer(undefined_value());
2452   null_map()->set_instance_descriptors(empty_descriptor_array());
2453
2454   constant_pool_array_map()->set_code_cache(empty_fixed_array());
2455   constant_pool_array_map()->set_dependent_code(
2456       DependentCode::cast(empty_fixed_array()));
2457   constant_pool_array_map()->init_back_pointer(undefined_value());
2458   constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2459
2460   // Fix prototype object for existing maps.
2461   meta_map()->set_prototype(null_value());
2462   meta_map()->set_constructor(null_value());
2463
2464   fixed_array_map()->set_prototype(null_value());
2465   fixed_array_map()->set_constructor(null_value());
2466
2467   undefined_map()->set_prototype(null_value());
2468   undefined_map()->set_constructor(null_value());
2469
2470   null_map()->set_prototype(null_value());
2471   null_map()->set_constructor(null_value());
2472
2473   constant_pool_array_map()->set_prototype(null_value());
2474   constant_pool_array_map()->set_constructor(null_value());
2475
2476   {  // Map allocation
2477 #define ALLOCATE_MAP(instance_type, size, field_name)               \
2478   {                                                                 \
2479     Map* map;                                                       \
2480     if (!AllocateMap((instance_type), size).To(&map)) return false; \
2481     set_##field_name##_map(map);                                    \
2482   }
2483
2484 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
2485   ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2486
2487     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2488     DCHECK(fixed_array_map() != fixed_cow_array_map());
2489
2490     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2491     ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2492     ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
2493                  mutable_heap_number)
2494     ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2495     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2496
2497     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
2498     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
2499     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
2500     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
2501     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
2502     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
2503     ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
2504
2505     for (unsigned i = 0; i < arraysize(string_type_table); i++) {
2506       const StringTypeTable& entry = string_type_table[i];
2507       {
2508         AllocationResult allocation = AllocateMap(entry.type, entry.size);
2509         if (!allocation.To(&obj)) return false;
2510       }
2511       // Mark cons string maps as unstable, because their objects can change
2512       // maps during GC.
2513       Map* map = Map::cast(obj);
2514       if (StringShape(entry.type).IsCons()) map->mark_unstable();
2515       roots_[entry.index] = map;
2516     }
2517
2518     ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2519     undetectable_string_map()->set_is_undetectable();
2520
2521     ALLOCATE_VARSIZE_MAP(ONE_BYTE_STRING_TYPE, undetectable_one_byte_string);
2522     undetectable_one_byte_string_map()->set_is_undetectable();
2523
2524     ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2525     ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2526     ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2527
2528 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)        \
2529   ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
2530                external_##type##_array)
2531
2532     TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2533 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2534
2535 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
2536   ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
2537
2538     TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2539 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2540
2541     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
2542
2543     ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2544
2545     ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2546     ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2547     ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2548     ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2549
2550
2551     for (unsigned i = 0; i < arraysize(struct_table); i++) {
2552       const StructTable& entry = struct_table[i];
2553       Map* map;
2554       if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
2555       roots_[entry.index] = map;
2556     }
2557
2558     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2559     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
2560
2561     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2562     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2563     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2564     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2565     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2566     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2567
2568     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2569     native_context_map()->set_dictionary_map(true);
2570     native_context_map()->set_visitor_id(
2571         StaticVisitorBase::kVisitNativeContext);
2572
2573     ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2574                  shared_function_info)
2575
2576     ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
2577     ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
2578     external_map()->set_is_extensible(false);
2579 #undef ALLOCATE_VARSIZE_MAP
2580 #undef ALLOCATE_MAP
2581   }
2582
2583   {  // Empty arrays
2584     {
2585       ByteArray* byte_array;
2586       if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
2587       set_empty_byte_array(byte_array);
2588     }
2589
2590 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)  \
2591   {                                                                   \
2592     ExternalArray* obj;                                               \
2593     if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
2594       return false;                                                   \
2595     set_empty_external_##type##_array(obj);                           \
2596   }
2597
2598     TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2599 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2600
2601 #define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
2602   {                                                                     \
2603     FixedTypedArrayBase* obj;                                           \
2604     if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
2605       return false;                                                     \
2606     set_empty_fixed_##type##_array(obj);                                \
2607   }
2608
2609     TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
2610 #undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
2611   }
2612   DCHECK(!InNewSpace(empty_fixed_array()));
2613   return true;
2614 }
2615
2616
2617 AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
2618                                           PretenureFlag pretenure) {
2619   // Statically ensure that it is safe to allocate heap numbers in paged
2620   // spaces.
2621   int size = HeapNumber::kSize;
2622   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2623
2624   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2625
2626   HeapObject* result;
2627   {
2628     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
2629     if (!allocation.To(&result)) return allocation;
2630   }
2631
2632   Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
2633   HeapObject::cast(result)->set_map_no_write_barrier(map);
2634   HeapNumber::cast(result)->set_value(value);
2635   return result;
2636 }
2637
2638
2639 #define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
2640   V(Float32x4, float32x4)                  \
2641   V(Float64x2, float64x2)                  \
2642   V(Int32x4, int32x4)
2643
2644
2645 #define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(TYPE, type)               \
2646 AllocationResult Heap::Allocate##TYPE(type##_value_t value,           \
2647                                       PretenureFlag pretenure) {      \
2648   STATIC_ASSERT(TYPE::kSize <= Page::kMaxRegularHeapObjectSize);      \
2649                                                                       \
2650   AllocationSpace space =                                             \
2651       SelectSpace(TYPE::kSize, OLD_DATA_SPACE, pretenure);            \
2652                                                                       \
2653   HeapObject* result;                                                 \
2654   { AllocationResult allocation =                                     \
2655         AllocateRaw(TYPE::kSize, space, OLD_DATA_SPACE);              \
2656     if (!allocation.To(&result)) return allocation;                   \
2657   }                                                                   \
2658                                                                       \
2659   result->set_map_no_write_barrier(                                   \
2660   isolate()->native_context()->type##_function()->initial_map());     \
2661   JSObject::cast(result)->set_properties(empty_fixed_array());        \
2662   JSObject::cast(result)->set_elements(empty_fixed_array());          \
2663                                                                       \
2664   HeapObject* storage;                                                \
2665   int storage_size =                                                  \
2666       FixedTypedArrayBase::kDataOffset + k##TYPE##Size;               \
2667   space = SelectSpace(storage_size, OLD_DATA_SPACE, pretenure);       \
2668   { AllocationResult allocation =                                     \
2669        AllocateRaw(storage_size, space, OLD_DATA_SPACE);              \
2670     if (!allocation.To(&storage)) return allocation;                  \
2671   }                                                                   \
2672                                                                       \
2673   storage->set_map(                                                   \
2674       *isolate()->factory()->fixed_##type##_array_map());             \
2675   FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(storage); \
2676   elements->set_length(static_cast<int>(1));                          \
2677   memset(elements->DataPtr(), 0, elements->DataSize());               \
2678   Fixed##TYPE##Array::cast(storage)->set(0, value);                   \
2679   TYPE::cast(result)->set_value(storage);                             \
2680   return result;                                                      \
2681 }
2682
2683
2684 SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
2685
2686
2687 AllocationResult Heap::AllocateCell(Object* value) {
2688   int size = Cell::kSize;
2689   STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2690
2691   HeapObject* result;
2692   {
2693     AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2694     if (!allocation.To(&result)) return allocation;
2695   }
2696   result->set_map_no_write_barrier(cell_map());
2697   Cell::cast(result)->set_value(value);
2698   return result;
2699 }
2700
2701
2702 AllocationResult Heap::AllocatePropertyCell() {
2703   int size = PropertyCell::kSize;
2704   STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2705
2706   HeapObject* result;
2707   AllocationResult allocation =
2708       AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2709   if (!allocation.To(&result)) return allocation;
2710
2711   result->set_map_no_write_barrier(global_property_cell_map());
2712   PropertyCell* cell = PropertyCell::cast(result);
2713   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2714                            SKIP_WRITE_BARRIER);
2715   cell->set_value(the_hole_value());
2716   cell->set_type(HeapType::None());
2717   return result;
2718 }
2719
2720
2721 void Heap::CreateApiObjects() {
2722   HandleScope scope(isolate());
2723   Factory* factory = isolate()->factory();
2724   Handle<Map> new_neander_map =
2725       factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2726
2727   // Don't use Smi-only elements optimizations for objects with the neander
2728   // map. There are too many cases where element values are set directly with a
2729   // bottleneck to trap the Smi-only -> fast elements transition, and there
2730   // appears to be no benefit for optimize this case.
2731   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2732   set_neander_map(*new_neander_map);
2733
2734   Handle<JSObject> listeners = factory->NewNeanderObject();
2735   Handle<FixedArray> elements = factory->NewFixedArray(2);
2736   elements->set(0, Smi::FromInt(0));
2737   listeners->set_elements(*elements);
2738   set_message_listeners(*listeners);
2739 }
2740
2741
2742 void Heap::CreateJSEntryStub() {
2743   JSEntryStub stub(isolate(), StackFrame::ENTRY);
2744   set_js_entry_code(*stub.GetCode());
2745 }
2746
2747
2748 void Heap::CreateJSConstructEntryStub() {
2749   JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
2750   set_js_construct_entry_code(*stub.GetCode());
2751 }
2752
2753
2754 void Heap::CreateFixedStubs() {
2755   // Here we create roots for fixed stubs. They are needed at GC
2756   // for cooking and uncooking (check out frames.cc).
2757   // The eliminates the need for doing dictionary lookup in the
2758   // stub cache for these stubs.
2759   HandleScope scope(isolate());
2760
2761   // Create stubs that should be there, so we don't unexpectedly have to
2762   // create them if we need them during the creation of another stub.
2763   // Stub creation mixes raw pointers and handles in an unsafe manner so
2764   // we cannot create stubs while we are creating stubs.
2765   CodeStub::GenerateStubsAheadOfTime(isolate());
2766
2767   // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
2768   // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
2769   // is created.
2770
2771   // gcc-4.4 has problem generating correct code of following snippet:
2772   // {  JSEntryStub stub;
2773   //    js_entry_code_ = *stub.GetCode();
2774   // }
2775   // {  JSConstructEntryStub stub;
2776   //    js_construct_entry_code_ = *stub.GetCode();
2777   // }
2778   // To workaround the problem, make separate functions without inlining.
2779   Heap::CreateJSEntryStub();
2780   Heap::CreateJSConstructEntryStub();
2781 }
2782
2783
2784 void Heap::CreateInitialObjects() {
2785   HandleScope scope(isolate());
2786   Factory* factory = isolate()->factory();
2787
2788   // The -0 value must be set before NewNumber works.
2789   set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
2790   DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
2791
2792   set_nan_value(
2793       *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
2794   set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
2795
2796   // The hole has not been created yet, but we want to put something
2797   // predictable in the gaps in the string table, so lets make that Smi zero.
2798   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2799
2800   // Allocate initial string table.
2801   set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
2802
2803   // Finish initializing oddballs after creating the string table.
2804   Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
2805                       factory->nan_value(), Oddball::kUndefined);
2806
2807   // Initialize the null_value.
2808   Oddball::Initialize(isolate(), factory->null_value(), "null",
2809                       handle(Smi::FromInt(0), isolate()), Oddball::kNull);
2810
2811   set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
2812                                       handle(Smi::FromInt(1), isolate()),
2813                                       Oddball::kTrue));
2814
2815   set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
2816                                        handle(Smi::FromInt(0), isolate()),
2817                                        Oddball::kFalse));
2818
2819   set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
2820                                           handle(Smi::FromInt(-1), isolate()),
2821                                           Oddball::kTheHole));
2822
2823   set_uninitialized_value(*factory->NewOddball(
2824       factory->uninitialized_map(), "uninitialized",
2825       handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
2826
2827   set_arguments_marker(*factory->NewOddball(
2828       factory->arguments_marker_map(), "arguments_marker",
2829       handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
2830
2831   set_no_interceptor_result_sentinel(*factory->NewOddball(
2832       factory->no_interceptor_result_sentinel_map(),
2833       "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
2834       Oddball::kOther));
2835
2836   set_termination_exception(*factory->NewOddball(
2837       factory->termination_exception_map(), "termination_exception",
2838       handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
2839
2840   set_exception(*factory->NewOddball(factory->exception_map(), "exception",
2841                                      handle(Smi::FromInt(-5), isolate()),
2842                                      Oddball::kException));
2843
2844   for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
2845     Handle<String> str =
2846         factory->InternalizeUtf8String(constant_string_table[i].contents);
2847     roots_[constant_string_table[i].index] = *str;
2848   }
2849
2850   // Allocate the hidden string which is used to identify the hidden properties
2851   // in JSObjects. The hash code has a special value so that it will not match
2852   // the empty string when searching for the property. It cannot be part of the
2853   // loop above because it needs to be allocated manually with the special
2854   // hash code in place. The hash code for the hidden_string is zero to ensure
2855   // that it will always be at the first entry in property descriptors.
2856   hidden_string_ = *factory->NewOneByteInternalizedString(
2857       OneByteVector("", 0), String::kEmptyStringHash);
2858
2859   // Create the code_stubs dictionary. The initial size is set to avoid
2860   // expanding the dictionary during bootstrapping.
2861   set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
2862
2863   // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
2864   // is set to avoid expanding the dictionary during bootstrapping.
2865   set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
2866
2867   set_polymorphic_code_cache(PolymorphicCodeCache::cast(
2868       *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
2869
2870   set_instanceof_cache_function(Smi::FromInt(0));
2871   set_instanceof_cache_map(Smi::FromInt(0));
2872   set_instanceof_cache_answer(Smi::FromInt(0));
2873
2874   CreateFixedStubs();
2875
2876   // Allocate the dictionary of intrinsic function names.
2877   Handle<NameDictionary> intrinsic_names =
2878       NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
2879   Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
2880   set_intrinsic_function_names(*intrinsic_names);
2881
2882   set_number_string_cache(
2883       *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
2884
2885   // Allocate cache for single character one byte strings.
2886   set_single_character_string_cache(
2887       *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
2888
2889   // Allocate cache for string split and regexp-multiple.
2890   set_string_split_cache(*factory->NewFixedArray(
2891       RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2892   set_regexp_multiple_cache(*factory->NewFixedArray(
2893       RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
2894
2895   // Allocate cache for external strings pointing to native source code.
2896   set_natives_source_cache(
2897       *factory->NewFixedArray(Natives::GetBuiltinsCount()));
2898
2899   set_undefined_cell(*factory->NewCell(factory->undefined_value()));
2900
2901   // The symbol registry is initialized lazily.
2902   set_symbol_registry(undefined_value());
2903
2904   // Allocate object to hold object observation state.
2905   set_observation_state(*factory->NewJSObjectFromMap(
2906       factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
2907
2908   // Microtask queue uses the empty fixed array as a sentinel for "empty".
2909   // Number of queued microtasks stored in Isolate::pending_microtask_count().
2910   set_microtask_queue(empty_fixed_array());
2911
2912   set_detailed_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2913   set_elements_transition_symbol(*factory->NewPrivateOwnSymbol());
2914   set_frozen_symbol(*factory->NewPrivateOwnSymbol());
2915   set_megamorphic_symbol(*factory->NewPrivateOwnSymbol());
2916   set_premonomorphic_symbol(*factory->NewPrivateOwnSymbol());
2917   set_generic_symbol(*factory->NewPrivateOwnSymbol());
2918   set_nonexistent_symbol(*factory->NewPrivateOwnSymbol());
2919   set_normal_ic_symbol(*factory->NewPrivateOwnSymbol());
2920   set_observed_symbol(*factory->NewPrivateOwnSymbol());
2921   set_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
2922   set_uninitialized_symbol(*factory->NewPrivateOwnSymbol());
2923   set_home_object_symbol(*factory->NewPrivateOwnSymbol());
2924
2925   Handle<SeededNumberDictionary> slow_element_dictionary =
2926       SeededNumberDictionary::New(isolate(), 0, TENURED);
2927   slow_element_dictionary->set_requires_slow_elements();
2928   set_empty_slow_element_dictionary(*slow_element_dictionary);
2929
2930   set_materialized_objects(*factory->NewFixedArray(0, TENURED));
2931
2932   // Handling of script id generation is in Factory::NewScript.
2933   set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
2934
2935   set_allocation_sites_scratchpad(
2936       *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
2937   InitializeAllocationSitesScratchpad();
2938
2939   // Initialize keyed lookup cache.
2940   isolate_->keyed_lookup_cache()->Clear();
2941
2942   // Initialize context slot cache.
2943   isolate_->context_slot_cache()->Clear();
2944
2945   // Initialize descriptor cache.
2946   isolate_->descriptor_lookup_cache()->Clear();
2947
2948   // Initialize compilation cache.
2949   isolate_->compilation_cache()->Clear();
2950 }
2951
2952
2953 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
2954   RootListIndex writable_roots[] = {
2955       kStoreBufferTopRootIndex,
2956       kStackLimitRootIndex,
2957       kNumberStringCacheRootIndex,
2958       kInstanceofCacheFunctionRootIndex,
2959       kInstanceofCacheMapRootIndex,
2960       kInstanceofCacheAnswerRootIndex,
2961       kCodeStubsRootIndex,
2962       kNonMonomorphicCacheRootIndex,
2963       kPolymorphicCodeCacheRootIndex,
2964       kLastScriptIdRootIndex,
2965       kEmptyScriptRootIndex,
2966       kRealStackLimitRootIndex,
2967       kArgumentsAdaptorDeoptPCOffsetRootIndex,
2968       kConstructStubDeoptPCOffsetRootIndex,
2969       kGetterStubDeoptPCOffsetRootIndex,
2970       kSetterStubDeoptPCOffsetRootIndex,
2971       kStringTableRootIndex,
2972   };
2973
2974   for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
2975     if (root_index == writable_roots[i]) return true;
2976   }
2977   return false;
2978 }
2979
2980
2981 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
2982   return !RootCanBeWrittenAfterInitialization(root_index) &&
2983          !InNewSpace(roots_array_start()[root_index]);
2984 }
2985
2986
2987 Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
2988                                    Object* key_pattern, ResultsCacheType type) {
2989   FixedArray* cache;
2990   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
2991   if (type == STRING_SPLIT_SUBSTRINGS) {
2992     DCHECK(key_pattern->IsString());
2993     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
2994     cache = heap->string_split_cache();
2995   } else {
2996     DCHECK(type == REGEXP_MULTIPLE_INDICES);
2997     DCHECK(key_pattern->IsFixedArray());
2998     cache = heap->regexp_multiple_cache();
2999   }
3000
3001   uint32_t hash = key_string->Hash();
3002   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3003                     ~(kArrayEntriesPerCacheEntry - 1));
3004   if (cache->get(index + kStringOffset) == key_string &&
3005       cache->get(index + kPatternOffset) == key_pattern) {
3006     return cache->get(index + kArrayOffset);
3007   }
3008   index =
3009       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3010   if (cache->get(index + kStringOffset) == key_string &&
3011       cache->get(index + kPatternOffset) == key_pattern) {
3012     return cache->get(index + kArrayOffset);
3013   }
3014   return Smi::FromInt(0);
3015 }
3016
3017
3018 void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
3019                                Handle<Object> key_pattern,
3020                                Handle<FixedArray> value_array,
3021                                ResultsCacheType type) {
3022   Factory* factory = isolate->factory();
3023   Handle<FixedArray> cache;
3024   if (!key_string->IsInternalizedString()) return;
3025   if (type == STRING_SPLIT_SUBSTRINGS) {
3026     DCHECK(key_pattern->IsString());
3027     if (!key_pattern->IsInternalizedString()) return;
3028     cache = factory->string_split_cache();
3029   } else {
3030     DCHECK(type == REGEXP_MULTIPLE_INDICES);
3031     DCHECK(key_pattern->IsFixedArray());
3032     cache = factory->regexp_multiple_cache();
3033   }
3034
3035   uint32_t hash = key_string->Hash();
3036   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3037                     ~(kArrayEntriesPerCacheEntry - 1));
3038   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3039     cache->set(index + kStringOffset, *key_string);
3040     cache->set(index + kPatternOffset, *key_pattern);
3041     cache->set(index + kArrayOffset, *value_array);
3042   } else {
3043     uint32_t index2 =
3044         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3045     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3046       cache->set(index2 + kStringOffset, *key_string);
3047       cache->set(index2 + kPatternOffset, *key_pattern);
3048       cache->set(index2 + kArrayOffset, *value_array);
3049     } else {
3050       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3051       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3052       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3053       cache->set(index + kStringOffset, *key_string);
3054       cache->set(index + kPatternOffset, *key_pattern);
3055       cache->set(index + kArrayOffset, *value_array);
3056     }
3057   }
3058   // If the array is a reasonably short list of substrings, convert it into a
3059   // list of internalized strings.
3060   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3061     for (int i = 0; i < value_array->length(); i++) {
3062       Handle<String> str(String::cast(value_array->get(i)), isolate);
3063       Handle<String> internalized_str = factory->InternalizeString(str);
3064       value_array->set(i, *internalized_str);
3065     }
3066   }
3067   // Convert backing store to a copy-on-write array.
3068   value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
3069 }
3070
3071
3072 void RegExpResultsCache::Clear(FixedArray* cache) {
3073   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3074     cache->set(i, Smi::FromInt(0));
3075   }
3076 }
3077
3078
3079 int Heap::FullSizeNumberStringCacheLength() {
3080   // Compute the size of the number string cache based on the max newspace size.
3081   // The number string cache has a minimum size based on twice the initial cache
3082   // size to ensure that it is bigger after being made 'full size'.
3083   int number_string_cache_size = max_semi_space_size_ / 512;
3084   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3085                                  Min(0x4000, number_string_cache_size));
3086   // There is a string and a number per entry so the length is twice the number
3087   // of entries.
3088   return number_string_cache_size * 2;
3089 }
3090
3091
3092 void Heap::FlushNumberStringCache() {
3093   // Flush the number to string cache.
3094   int len = number_string_cache()->length();
3095   for (int i = 0; i < len; i++) {
3096     number_string_cache()->set_undefined(i);
3097   }
3098 }
3099
3100
3101 void Heap::FlushAllocationSitesScratchpad() {
3102   for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3103     allocation_sites_scratchpad()->set_undefined(i);
3104   }
3105   allocation_sites_scratchpad_length_ = 0;
3106 }
3107
3108
3109 void Heap::InitializeAllocationSitesScratchpad() {
3110   DCHECK(allocation_sites_scratchpad()->length() ==
3111          kAllocationSiteScratchpadSize);
3112   for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3113     allocation_sites_scratchpad()->set_undefined(i);
3114   }
3115 }
3116
3117
3118 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
3119                                          ScratchpadSlotMode mode) {
3120   if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3121     // We cannot use the normal write-barrier because slots need to be
3122     // recorded with non-incremental marking as well. We have to explicitly
3123     // record the slot to take evacuation candidates into account.
3124     allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
3125                                        site, SKIP_WRITE_BARRIER);
3126     Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
3127         allocation_sites_scratchpad_length_);
3128
3129     if (mode == RECORD_SCRATCHPAD_SLOT) {
3130       // We need to allow slots buffer overflow here since the evacuation
3131       // candidates are not part of the global list of old space pages and
3132       // releasing an evacuation candidate due to a slots buffer overflow
3133       // results in lost pages.
3134       mark_compact_collector()->RecordSlot(slot, slot, *slot,
3135                                            SlotsBuffer::IGNORE_OVERFLOW);
3136     }
3137     allocation_sites_scratchpad_length_++;
3138   }
3139 }
3140
3141
3142 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3143   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3144 }
3145
3146
3147 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3148     ExternalArrayType array_type) {
3149   switch (array_type) {
3150 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3151   case kExternal##Type##Array:                                  \
3152     return kExternal##Type##ArrayMapRootIndex;
3153
3154     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3155 #undef ARRAY_TYPE_TO_ROOT_INDEX
3156
3157     default:
3158       UNREACHABLE();
3159       return kUndefinedValueRootIndex;
3160   }
3161 }
3162
3163
3164 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3165   return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3166 }
3167
3168
3169 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3170     ExternalArrayType array_type) {
3171   switch (array_type) {
3172 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3173   case kExternal##Type##Array:                                  \
3174     return kFixed##Type##ArrayMapRootIndex;
3175
3176     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3177 #undef ARRAY_TYPE_TO_ROOT_INDEX
3178
3179     default:
3180       UNREACHABLE();
3181       return kUndefinedValueRootIndex;
3182   }
3183 }
3184
3185
3186 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3187     ElementsKind elementsKind) {
3188   switch (elementsKind) {
3189 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3190   case EXTERNAL_##TYPE##_ELEMENTS:                                \
3191     return kEmptyExternal##Type##ArrayRootIndex;
3192
3193     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3194 #undef ELEMENT_KIND_TO_ROOT_INDEX
3195
3196     default:
3197       UNREACHABLE();
3198       return kUndefinedValueRootIndex;
3199   }
3200 }
3201
3202
3203 Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
3204     ElementsKind elementsKind) {
3205   switch (elementsKind) {
3206 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
3207   case TYPE##_ELEMENTS:                                           \
3208     return kEmptyFixed##Type##ArrayRootIndex;
3209
3210     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3211 #undef ELEMENT_KIND_TO_ROOT_INDEX
3212     default:
3213       UNREACHABLE();
3214       return kUndefinedValueRootIndex;
3215   }
3216 }
3217
3218
3219 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3220   return ExternalArray::cast(
3221       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3222 }
3223
3224
3225 FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
3226   return FixedTypedArrayBase::cast(
3227       roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
3228 }
3229
3230
3231 AllocationResult Heap::AllocateForeign(Address address,
3232                                        PretenureFlag pretenure) {
3233   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3234   STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3235   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3236   Foreign* result;
3237   AllocationResult allocation = Allocate(foreign_map(), space);
3238   if (!allocation.To(&result)) return allocation;
3239   result->set_foreign_address(address);
3240   return result;
3241 }
3242
3243
3244 AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3245   if (length < 0 || length > ByteArray::kMaxLength) {
3246     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
3247   }
3248   int size = ByteArray::SizeFor(length);
3249   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3250   HeapObject* result;
3251   {
3252     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3253     if (!allocation.To(&result)) return allocation;
3254   }
3255
3256   result->set_map_no_write_barrier(byte_array_map());
3257   ByteArray::cast(result)->set_length(length);
3258   return result;
3259 }
3260
3261
3262 void Heap::CreateFillerObjectAt(Address addr, int size) {
3263   if (size == 0) return;
3264   HeapObject* filler = HeapObject::FromAddress(addr);
3265   if (size == kPointerSize) {
3266     filler->set_map_no_write_barrier(one_pointer_filler_map());
3267   } else if (size == 2 * kPointerSize) {
3268     filler->set_map_no_write_barrier(two_pointer_filler_map());
3269   } else {
3270     filler->set_map_no_write_barrier(free_space_map());
3271     FreeSpace::cast(filler)->set_size(size);
3272   }
3273 }
3274
3275
3276 bool Heap::CanMoveObjectStart(HeapObject* object) {
3277   Address address = object->address();
3278   bool is_in_old_pointer_space = InOldPointerSpace(address);
3279   bool is_in_old_data_space = InOldDataSpace(address);
3280
3281   if (lo_space()->Contains(object)) return false;
3282
3283   Page* page = Page::FromAddress(address);
3284   // We can move the object start if:
3285   // (1) the object is not in old pointer or old data space,
3286   // (2) the page of the object was already swept,
3287   // (3) the page was already concurrently swept. This case is an optimization
3288   // for concurrent sweeping. The WasSwept predicate for concurrently swept
3289   // pages is set after sweeping all pages.
3290   return (!is_in_old_pointer_space && !is_in_old_data_space) ||
3291          page->WasSwept() || page->SweepingCompleted();
3292 }
3293
3294
3295 void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
3296   if (incremental_marking()->IsMarking() &&
3297       Marking::IsBlack(Marking::MarkBitFrom(address))) {
3298     if (mode == FROM_GC) {
3299       MemoryChunk::IncrementLiveBytesFromGC(address, by);
3300     } else {
3301       MemoryChunk::IncrementLiveBytesFromMutator(address, by);
3302     }
3303   }
3304 }
3305
3306
3307 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
3308                                          int elements_to_trim) {
3309   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3310   const int bytes_to_trim = elements_to_trim * element_size;
3311   Map* map = object->map();
3312
3313   // For now this trick is only applied to objects in new and paged space.
3314   // In large object space the object's start must coincide with chunk
3315   // and thus the trick is just not applicable.
3316   DCHECK(!lo_space()->Contains(object));
3317   DCHECK(object->map() != fixed_cow_array_map());
3318
3319   STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
3320   STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
3321   STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
3322
3323   const int len = object->length();
3324   DCHECK(elements_to_trim <= len);
3325
3326   // Calculate location of new array start.
3327   Address new_start = object->address() + bytes_to_trim;
3328
3329   // Technically in new space this write might be omitted (except for
3330   // debug mode which iterates through the heap), but to play safer
3331   // we still do it.
3332   CreateFillerObjectAt(object->address(), bytes_to_trim);
3333
3334   // Initialize header of the trimmed array. Since left trimming is only
3335   // performed on pages which are not concurrently swept creating a filler
3336   // object does not require synchronization.
3337   DCHECK(CanMoveObjectStart(object));
3338   Object** former_start = HeapObject::RawField(object, 0);
3339   int new_start_index = elements_to_trim * (element_size / kPointerSize);
3340   former_start[new_start_index] = map;
3341   former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3342   FixedArrayBase* new_object =
3343       FixedArrayBase::cast(HeapObject::FromAddress(new_start));
3344
3345   // Maintain consistency of live bytes during incremental marking
3346   marking()->TransferMark(object->address(), new_start);
3347   AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
3348
3349   // Notify the heap profiler of change in object layout.
3350   OnMoveEvent(new_object, object, new_object->Size());
3351   return new_object;
3352 }
3353
3354
3355 // Force instantiation of templatized method.
3356 template
3357 void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
3358 template
3359 void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
3360
3361
3362 template<Heap::InvocationMode mode>
3363 void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
3364   const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
3365   const int bytes_to_trim = elements_to_trim * element_size;
3366
3367   // For now this trick is only applied to objects in new and paged space.
3368   DCHECK(object->map() != fixed_cow_array_map());
3369
3370   const int len = object->length();
3371   DCHECK(elements_to_trim < len);
3372
3373   // Calculate location of new array end.
3374   Address new_end = object->address() + object->Size() - bytes_to_trim;
3375
3376   // Technically in new space this write might be omitted (except for
3377   // debug mode which iterates through the heap), but to play safer
3378   // we still do it.
3379   // We do not create a filler for objects in large object space.
3380   // TODO(hpayer): We should shrink the large object page if the size
3381   // of the object changed significantly.
3382   if (!lo_space()->Contains(object)) {
3383     CreateFillerObjectAt(new_end, bytes_to_trim);
3384   }
3385
3386   // Initialize header of the trimmed array. We are storing the new length
3387   // using release store after creating a filler for the left-over space to
3388   // avoid races with the sweeper thread.
3389   object->synchronized_set_length(len - elements_to_trim);
3390
3391   // Maintain consistency of live bytes during incremental marking
3392   AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
3393
3394   // Notify the heap profiler of change in object layout. The array may not be
3395   // moved during GC, and size has to be adjusted nevertheless.
3396   HeapProfiler* profiler = isolate()->heap_profiler();
3397   if (profiler->is_tracking_allocations()) {
3398     profiler->UpdateObjectSizeEvent(object->address(), object->Size());
3399   }
3400 }
3401
3402
3403 AllocationResult Heap::AllocateExternalArray(int length,
3404                                              ExternalArrayType array_type,
3405                                              void* external_pointer,
3406                                              PretenureFlag pretenure) {
3407   int size = ExternalArray::kAlignedSize;
3408   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3409   HeapObject* result;
3410   {
3411     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3412     if (!allocation.To(&result)) return allocation;
3413   }
3414
3415   result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
3416   ExternalArray::cast(result)->set_length(length);
3417   ExternalArray::cast(result)->set_external_pointer(external_pointer);
3418   return result;
3419 }
3420
3421 static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
3422                                ElementsKind* element_kind) {
3423   switch (array_type) {
3424 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
3425   case kExternal##Type##Array:                          \
3426     *element_size = size;                               \
3427     *element_kind = TYPE##_ELEMENTS;                    \
3428     return;
3429
3430     TYPED_ARRAYS(TYPED_ARRAY_CASE)
3431 #undef TYPED_ARRAY_CASE
3432
3433     default:
3434       *element_size = 0;               // Bogus
3435       *element_kind = UINT8_ELEMENTS;  // Bogus
3436       UNREACHABLE();
3437   }
3438 }
3439
3440
3441 AllocationResult Heap::AllocateFixedTypedArray(int length,
3442                                                ExternalArrayType array_type,
3443                                                PretenureFlag pretenure) {
3444   int element_size;
3445   ElementsKind elements_kind;
3446   ForFixedTypedArray(array_type, &element_size, &elements_kind);
3447   int size = OBJECT_POINTER_ALIGN(length * element_size +
3448                                   FixedTypedArrayBase::kDataOffset);
3449 #ifndef V8_HOST_ARCH_64_BIT
3450   if (array_type == kExternalFloat64Array) {
3451     size += kPointerSize;
3452   }
3453 #endif
3454   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3455
3456   HeapObject* object;
3457   AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3458   if (!allocation.To(&object)) return allocation;
3459
3460   if (array_type == kExternalFloat64Array) {
3461     object = EnsureDoubleAligned(this, object, size);
3462   }
3463
3464   object->set_map(MapForFixedTypedArray(array_type));
3465   FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
3466   elements->set_length(length);
3467   memset(elements->DataPtr(), 0, elements->DataSize());
3468   return elements;
3469 }
3470
3471
3472 AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
3473   DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
3474   AllocationResult allocation =
3475       AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
3476
3477   HeapObject* result;
3478   if (!allocation.To(&result)) return allocation;
3479
3480   if (immovable) {
3481     Address address = result->address();
3482     // Code objects which should stay at a fixed address are allocated either
3483     // in the first page of code space (objects on the first page of each space
3484     // are never moved) or in large object space.
3485     if (!code_space_->FirstPage()->Contains(address) &&
3486         MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
3487       // Discard the first code allocation, which was on a page where it could
3488       // be moved.
3489       CreateFillerObjectAt(result->address(), object_size);
3490       allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
3491       if (!allocation.To(&result)) return allocation;
3492       OnAllocationEvent(result, object_size);
3493     }
3494   }
3495
3496   result->set_map_no_write_barrier(code_map());
3497   Code* code = Code::cast(result);
3498   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3499          isolate_->code_range()->contains(code->address()));
3500   code->set_gc_metadata(Smi::FromInt(0));
3501   code->set_ic_age(global_ic_age_);
3502   return code;
3503 }
3504
3505
3506 AllocationResult Heap::CopyCode(Code* code) {
3507   AllocationResult allocation;
3508   HeapObject* new_constant_pool;
3509   if (FLAG_enable_ool_constant_pool &&
3510       code->constant_pool() != empty_constant_pool_array()) {
3511     // Copy the constant pool, since edits to the copied code may modify
3512     // the constant pool.
3513     allocation = CopyConstantPoolArray(code->constant_pool());
3514     if (!allocation.To(&new_constant_pool)) return allocation;
3515   } else {
3516     new_constant_pool = empty_constant_pool_array();
3517   }
3518
3519   HeapObject* result;
3520   // Allocate an object the same size as the code object.
3521   int obj_size = code->Size();
3522   allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3523   if (!allocation.To(&result)) return allocation;
3524
3525   // Copy code object.
3526   Address old_addr = code->address();
3527   Address new_addr = result->address();
3528   CopyBlock(new_addr, old_addr, obj_size);
3529   Code* new_code = Code::cast(result);
3530
3531   // Update the constant pool.
3532   new_code->set_constant_pool(new_constant_pool);
3533
3534   // Relocate the copy.
3535   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3536          isolate_->code_range()->contains(code->address()));
3537   new_code->Relocate(new_addr - old_addr);
3538   return new_code;
3539 }
3540
3541
3542 AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
3543   // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
3544   // do not risk leaving uninitialized Code object (and breaking the heap).
3545   ByteArray* reloc_info_array;
3546   {
3547     AllocationResult allocation =
3548         AllocateByteArray(reloc_info.length(), TENURED);
3549     if (!allocation.To(&reloc_info_array)) return allocation;
3550   }
3551   HeapObject* new_constant_pool;
3552   if (FLAG_enable_ool_constant_pool &&
3553       code->constant_pool() != empty_constant_pool_array()) {
3554     // Copy the constant pool, since edits to the copied code may modify
3555     // the constant pool.
3556     AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
3557     if (!allocation.To(&new_constant_pool)) return allocation;
3558   } else {
3559     new_constant_pool = empty_constant_pool_array();
3560   }
3561
3562   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
3563
3564   int new_obj_size = Code::SizeFor(new_body_size);
3565
3566   Address old_addr = code->address();
3567
3568   size_t relocation_offset =
3569       static_cast<size_t>(code->instruction_end() - old_addr);
3570
3571   HeapObject* result;
3572   AllocationResult allocation =
3573       AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
3574   if (!allocation.To(&result)) return allocation;
3575
3576   // Copy code object.
3577   Address new_addr = result->address();
3578
3579   // Copy header and instructions.
3580   CopyBytes(new_addr, old_addr, relocation_offset);
3581
3582   Code* new_code = Code::cast(result);
3583   new_code->set_relocation_info(reloc_info_array);
3584
3585   // Update constant pool.
3586   new_code->set_constant_pool(new_constant_pool);
3587
3588   // Copy patched rinfo.
3589   CopyBytes(new_code->relocation_start(), reloc_info.start(),
3590             static_cast<size_t>(reloc_info.length()));
3591
3592   // Relocate the copy.
3593   DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
3594          isolate_->code_range()->contains(code->address()));
3595   new_code->Relocate(new_addr - old_addr);
3596
3597 #ifdef VERIFY_HEAP
3598   if (FLAG_verify_heap) code->ObjectVerify();
3599 #endif
3600   return new_code;
3601 }
3602
3603
3604 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
3605                                        AllocationSite* allocation_site) {
3606   memento->set_map_no_write_barrier(allocation_memento_map());
3607   DCHECK(allocation_site->map() == allocation_site_map());
3608   memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
3609   if (FLAG_allocation_site_pretenuring) {
3610     allocation_site->IncrementMementoCreateCount();
3611   }
3612 }
3613
3614
3615 AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
3616                                 AllocationSite* allocation_site) {
3617   DCHECK(gc_state_ == NOT_IN_GC);
3618   DCHECK(map->instance_type() != MAP_TYPE);
3619   // If allocation failures are disallowed, we may allocate in a different
3620   // space when new space is full and the object is not a large object.
3621   AllocationSpace retry_space =
3622       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
3623   int size = map->instance_size();
3624   if (allocation_site != NULL) {
3625     size += AllocationMemento::kSize;
3626   }
3627   HeapObject* result;
3628   AllocationResult allocation = AllocateRaw(size, space, retry_space);
3629   if (!allocation.To(&result)) return allocation;
3630   // No need for write barrier since object is white and map is in old space.
3631   result->set_map_no_write_barrier(map);
3632   if (allocation_site != NULL) {
3633     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3634         reinterpret_cast<Address>(result) + map->instance_size());
3635     InitializeAllocationMemento(alloc_memento, allocation_site);
3636   }
3637   return result;
3638 }
3639
3640
3641 void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
3642                                      Map* map) {
3643   obj->set_properties(properties);
3644   obj->initialize_elements();
3645   // TODO(1240798): Initialize the object's body using valid initial values
3646   // according to the object's initial map.  For example, if the map's
3647   // instance type is JS_ARRAY_TYPE, the length field should be initialized
3648   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
3649   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
3650   // verification code has to cope with (temporarily) invalid objects.  See
3651   // for example, JSArray::JSArrayVerify).
3652   Object* filler;
3653   // We cannot always fill with one_pointer_filler_map because objects
3654   // created from API functions expect their internal fields to be initialized
3655   // with undefined_value.
3656   // Pre-allocated fields need to be initialized with undefined_value as well
3657   // so that object accesses before the constructor completes (e.g. in the
3658   // debugger) will not cause a crash.
3659   if (map->constructor()->IsJSFunction() &&
3660       JSFunction::cast(map->constructor())
3661           ->IsInobjectSlackTrackingInProgress()) {
3662     // We might want to shrink the object later.
3663     DCHECK(obj->GetInternalFieldCount() == 0);
3664     filler = Heap::one_pointer_filler_map();
3665   } else {
3666     filler = Heap::undefined_value();
3667   }
3668   obj->InitializeBody(map, Heap::undefined_value(), filler);
3669 }
3670
3671
3672 AllocationResult Heap::AllocateJSObjectFromMap(
3673     Map* map, PretenureFlag pretenure, bool allocate_properties,
3674     AllocationSite* allocation_site) {
3675   // JSFunctions should be allocated using AllocateFunction to be
3676   // properly initialized.
3677   DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
3678
3679   // Both types of global objects should be allocated using
3680   // AllocateGlobalObject to be properly initialized.
3681   DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
3682   DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
3683
3684   // Allocate the backing storage for the properties.
3685   FixedArray* properties;
3686   if (allocate_properties) {
3687     int prop_size = map->InitialPropertiesLength();
3688     DCHECK(prop_size >= 0);
3689     {
3690       AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
3691       if (!allocation.To(&properties)) return allocation;
3692     }
3693   } else {
3694     properties = empty_fixed_array();
3695   }
3696
3697   // Allocate the JSObject.
3698   int size = map->instance_size();
3699   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
3700   JSObject* js_obj;
3701   AllocationResult allocation = Allocate(map, space, allocation_site);
3702   if (!allocation.To(&js_obj)) return allocation;
3703
3704   // Initialize the JSObject.
3705   InitializeJSObjectFromMap(js_obj, properties, map);
3706   DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
3707          js_obj->HasFixedTypedArrayElements());
3708   return js_obj;
3709 }
3710
3711
3712 AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
3713                                         PretenureFlag pretenure,
3714                                         AllocationSite* allocation_site) {
3715   DCHECK(constructor->has_initial_map());
3716
3717   // Allocate the object based on the constructors initial map.
3718   AllocationResult allocation = AllocateJSObjectFromMap(
3719       constructor->initial_map(), pretenure, true, allocation_site);
3720 #ifdef DEBUG
3721   // Make sure result is NOT a global object if valid.
3722   HeapObject* obj;
3723   DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
3724 #endif
3725   return allocation;
3726 }
3727
3728
3729 AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
3730   // Never used to copy functions.  If functions need to be copied we
3731   // have to be careful to clear the literals array.
3732   SLOW_DCHECK(!source->IsJSFunction());
3733
3734   // Make the clone.
3735   Map* map = source->map();
3736   int object_size = map->instance_size();
3737   HeapObject* clone;
3738
3739   DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
3740
3741   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
3742
3743   // If we're forced to always allocate, we use the general allocation
3744   // functions which may leave us with an object in old space.
3745   if (always_allocate()) {
3746     {
3747       AllocationResult allocation =
3748           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
3749       if (!allocation.To(&clone)) return allocation;
3750     }
3751     Address clone_address = clone->address();
3752     CopyBlock(clone_address, source->address(), object_size);
3753     // Update write barrier for all fields that lie beyond the header.
3754     RecordWrites(clone_address, JSObject::kHeaderSize,
3755                  (object_size - JSObject::kHeaderSize) / kPointerSize);
3756   } else {
3757     wb_mode = SKIP_WRITE_BARRIER;
3758
3759     {
3760       int adjusted_object_size =
3761           site != NULL ? object_size + AllocationMemento::kSize : object_size;
3762       AllocationResult allocation =
3763           AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
3764       if (!allocation.To(&clone)) return allocation;
3765     }
3766     SLOW_DCHECK(InNewSpace(clone));
3767     // Since we know the clone is allocated in new space, we can copy
3768     // the contents without worrying about updating the write barrier.
3769     CopyBlock(clone->address(), source->address(), object_size);
3770
3771     if (site != NULL) {
3772       AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
3773           reinterpret_cast<Address>(clone) + object_size);
3774       InitializeAllocationMemento(alloc_memento, site);
3775     }
3776   }
3777
3778   SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
3779               source->GetElementsKind());
3780   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3781   FixedArray* properties = FixedArray::cast(source->properties());
3782   // Update elements if necessary.
3783   if (elements->length() > 0) {
3784     FixedArrayBase* elem;
3785     {
3786       AllocationResult allocation;
3787       if (elements->map() == fixed_cow_array_map()) {
3788         allocation = FixedArray::cast(elements);
3789       } else if (source->HasFastDoubleElements()) {
3790         allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
3791       } else {
3792         allocation = CopyFixedArray(FixedArray::cast(elements));
3793       }
3794       if (!allocation.To(&elem)) return allocation;
3795     }
3796     JSObject::cast(clone)->set_elements(elem, wb_mode);
3797   }
3798   // Update properties if necessary.
3799   if (properties->length() > 0) {
3800     FixedArray* prop;
3801     {
3802       AllocationResult allocation = CopyFixedArray(properties);
3803       if (!allocation.To(&prop)) return allocation;
3804     }
3805     JSObject::cast(clone)->set_properties(prop, wb_mode);
3806   }
3807   // Return the new clone.
3808   return clone;
3809 }
3810
3811
3812 static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
3813                                     int len) {
3814   // Only works for one byte strings.
3815   DCHECK(vector.length() == len);
3816   MemCopy(chars, vector.start(), len);
3817 }
3818
3819 static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
3820                                     int len) {
3821   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
3822   unsigned stream_length = vector.length();
3823   while (stream_length != 0) {
3824     unsigned consumed = 0;
3825     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
3826     DCHECK(c != unibrow::Utf8::kBadChar);
3827     DCHECK(consumed <= stream_length);
3828     stream_length -= consumed;
3829     stream += consumed;
3830     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
3831       len -= 2;
3832       if (len < 0) break;
3833       *chars++ = unibrow::Utf16::LeadSurrogate(c);
3834       *chars++ = unibrow::Utf16::TrailSurrogate(c);
3835     } else {
3836       len -= 1;
3837       if (len < 0) break;
3838       *chars++ = c;
3839     }
3840   }
3841   DCHECK(stream_length == 0);
3842   DCHECK(len == 0);
3843 }
3844
3845
3846 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
3847   DCHECK(s->length() == len);
3848   String::WriteToFlat(s, chars, 0, len);
3849 }
3850
3851
3852 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
3853   DCHECK(s->length() == len);
3854   String::WriteToFlat(s, chars, 0, len);
3855 }
3856
3857
3858 template <bool is_one_byte, typename T>
3859 AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
3860                                                       uint32_t hash_field) {
3861   DCHECK(chars >= 0);
3862   // Compute map and object size.
3863   int size;
3864   Map* map;
3865
3866   DCHECK_LE(0, chars);
3867   DCHECK_GE(String::kMaxLength, chars);
3868   if (is_one_byte) {
3869     map = one_byte_internalized_string_map();
3870     size = SeqOneByteString::SizeFor(chars);
3871   } else {
3872     map = internalized_string_map();
3873     size = SeqTwoByteString::SizeFor(chars);
3874   }
3875   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
3876
3877   // Allocate string.
3878   HeapObject* result;
3879   {
3880     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3881     if (!allocation.To(&result)) return allocation;
3882   }
3883
3884   result->set_map_no_write_barrier(map);
3885   // Set length and hash fields of the allocated string.
3886   String* answer = String::cast(result);
3887   answer->set_length(chars);
3888   answer->set_hash_field(hash_field);
3889
3890   DCHECK_EQ(size, answer->Size());
3891
3892   if (is_one_byte) {
3893     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
3894   } else {
3895     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
3896   }
3897   return answer;
3898 }
3899
3900
3901 // Need explicit instantiations.
3902 template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
3903                                                                      int,
3904                                                                      uint32_t);
3905 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
3906                                                                       int,
3907                                                                       uint32_t);
3908 template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
3909     Vector<const char>, int, uint32_t);
3910
3911
3912 AllocationResult Heap::AllocateRawOneByteString(int length,
3913                                                 PretenureFlag pretenure) {
3914   DCHECK_LE(0, length);
3915   DCHECK_GE(String::kMaxLength, length);
3916   int size = SeqOneByteString::SizeFor(length);
3917   DCHECK(size <= SeqOneByteString::kMaxSize);
3918   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3919
3920   HeapObject* result;
3921   {
3922     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3923     if (!allocation.To(&result)) return allocation;
3924   }
3925
3926   // Partially initialize the object.
3927   result->set_map_no_write_barrier(one_byte_string_map());
3928   String::cast(result)->set_length(length);
3929   String::cast(result)->set_hash_field(String::kEmptyHashField);
3930   DCHECK_EQ(size, HeapObject::cast(result)->Size());
3931
3932   return result;
3933 }
3934
3935
3936 AllocationResult Heap::AllocateRawTwoByteString(int length,
3937                                                 PretenureFlag pretenure) {
3938   DCHECK_LE(0, length);
3939   DCHECK_GE(String::kMaxLength, length);
3940   int size = SeqTwoByteString::SizeFor(length);
3941   DCHECK(size <= SeqTwoByteString::kMaxSize);
3942   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3943
3944   HeapObject* result;
3945   {
3946     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
3947     if (!allocation.To(&result)) return allocation;
3948   }
3949
3950   // Partially initialize the object.
3951   result->set_map_no_write_barrier(string_map());
3952   String::cast(result)->set_length(length);
3953   String::cast(result)->set_hash_field(String::kEmptyHashField);
3954   DCHECK_EQ(size, HeapObject::cast(result)->Size());
3955   return result;
3956 }
3957
3958
3959 AllocationResult Heap::AllocateEmptyFixedArray() {
3960   int size = FixedArray::SizeFor(0);
3961   HeapObject* result;
3962   {
3963     AllocationResult allocation =
3964         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
3965     if (!allocation.To(&result)) return allocation;
3966   }
3967   // Initialize the object.
3968   result->set_map_no_write_barrier(fixed_array_map());
3969   FixedArray::cast(result)->set_length(0);
3970   return result;
3971 }
3972
3973
3974 AllocationResult Heap::AllocateEmptyExternalArray(
3975     ExternalArrayType array_type) {
3976   return AllocateExternalArray(0, array_type, NULL, TENURED);
3977 }
3978
3979
3980 AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
3981   if (!InNewSpace(src)) {
3982     return src;
3983   }
3984
3985   int len = src->length();
3986   HeapObject* obj;
3987   {
3988     AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
3989     if (!allocation.To(&obj)) return allocation;
3990   }
3991   obj->set_map_no_write_barrier(fixed_array_map());
3992   FixedArray* result = FixedArray::cast(obj);
3993   result->set_length(len);
3994
3995   // Copy the content
3996   DisallowHeapAllocation no_gc;
3997   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
3998   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
3999
4000   // TODO(mvstanton): The map is set twice because of protection against calling
4001   // set() on a COW FixedArray. Issue v8:3221 created to track this, and
4002   // we might then be able to remove this whole method.
4003   HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
4004   return result;
4005 }
4006
4007
4008 AllocationResult Heap::AllocateEmptyFixedTypedArray(
4009     ExternalArrayType array_type) {
4010   return AllocateFixedTypedArray(0, array_type, TENURED);
4011 }
4012
4013
4014 AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
4015   int len = src->length();
4016   HeapObject* obj;
4017   {
4018     AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
4019     if (!allocation.To(&obj)) return allocation;
4020   }
4021   if (InNewSpace(obj)) {
4022     obj->set_map_no_write_barrier(map);
4023     CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
4024               FixedArray::SizeFor(len) - kPointerSize);
4025     return obj;
4026   }
4027   obj->set_map_no_write_barrier(map);
4028   FixedArray* result = FixedArray::cast(obj);
4029   result->set_length(len);
4030
4031   // Copy the content
4032   DisallowHeapAllocation no_gc;
4033   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
4034   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
4035   return result;
4036 }
4037
4038
4039 AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
4040                                                    Map* map) {
4041   int len = src->length();
4042   HeapObject* obj;
4043   {
4044     AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
4045     if (!allocation.To(&obj)) return allocation;
4046   }
4047   obj->set_map_no_write_barrier(map);
4048   CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
4049             src->address() + FixedDoubleArray::kLengthOffset,
4050             FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
4051   return obj;
4052 }
4053
4054
4055 AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
4056                                                     Map* map) {
4057   HeapObject* obj;
4058   if (src->is_extended_layout()) {
4059     ConstantPoolArray::NumberOfEntries small(src,
4060                                              ConstantPoolArray::SMALL_SECTION);
4061     ConstantPoolArray::NumberOfEntries extended(
4062         src, ConstantPoolArray::EXTENDED_SECTION);
4063     AllocationResult allocation =
4064         AllocateExtendedConstantPoolArray(small, extended);
4065     if (!allocation.To(&obj)) return allocation;
4066   } else {
4067     ConstantPoolArray::NumberOfEntries small(src,
4068                                              ConstantPoolArray::SMALL_SECTION);
4069     AllocationResult allocation = AllocateConstantPoolArray(small);
4070     if (!allocation.To(&obj)) return allocation;
4071   }
4072   obj->set_map_no_write_barrier(map);
4073   CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
4074             src->address() + ConstantPoolArray::kFirstEntryOffset,
4075             src->size() - ConstantPoolArray::kFirstEntryOffset);
4076   return obj;
4077 }
4078
4079
4080 AllocationResult Heap::AllocateRawFixedArray(int length,
4081                                              PretenureFlag pretenure) {
4082   if (length < 0 || length > FixedArray::kMaxLength) {
4083     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4084   }
4085   int size = FixedArray::SizeFor(length);
4086   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4087
4088   return AllocateRaw(size, space, OLD_POINTER_SPACE);
4089 }
4090
4091
4092 AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
4093                                                     PretenureFlag pretenure,
4094                                                     Object* filler) {
4095   DCHECK(length >= 0);
4096   DCHECK(empty_fixed_array()->IsFixedArray());
4097   if (length == 0) return empty_fixed_array();
4098
4099   DCHECK(!InNewSpace(filler));
4100   HeapObject* result;
4101   {
4102     AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
4103     if (!allocation.To(&result)) return allocation;
4104   }
4105
4106   result->set_map_no_write_barrier(fixed_array_map());
4107   FixedArray* array = FixedArray::cast(result);
4108   array->set_length(length);
4109   MemsetPointer(array->data_start(), filler, length);
4110   return array;
4111 }
4112
4113
4114 AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
4115   return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
4116 }
4117
4118
4119 AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
4120   if (length == 0) return empty_fixed_array();
4121
4122   HeapObject* obj;
4123   {
4124     AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
4125     if (!allocation.To(&obj)) return allocation;
4126   }
4127
4128   obj->set_map_no_write_barrier(fixed_array_map());
4129   FixedArray::cast(obj)->set_length(length);
4130   return obj;
4131 }
4132
4133
4134 AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
4135     int length, PretenureFlag pretenure) {
4136   if (length == 0) return empty_fixed_array();
4137
4138   HeapObject* elements;
4139   AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
4140   if (!allocation.To(&elements)) return allocation;
4141
4142   elements->set_map_no_write_barrier(fixed_double_array_map());
4143   FixedDoubleArray::cast(elements)->set_length(length);
4144   return elements;
4145 }
4146
4147
4148 AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
4149                                                    PretenureFlag pretenure) {
4150   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
4151     v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
4152   }
4153   int size = FixedDoubleArray::SizeFor(length);
4154 #ifndef V8_HOST_ARCH_64_BIT
4155   size += kPointerSize;
4156 #endif
4157   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4158
4159   HeapObject* object;
4160   {
4161     AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
4162     if (!allocation.To(&object)) return allocation;
4163   }
4164
4165   return EnsureDoubleAligned(this, object, size);
4166 }
4167
4168
4169 AllocationResult Heap::AllocateConstantPoolArray(
4170     const ConstantPoolArray::NumberOfEntries& small) {
4171   CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4172   int size = ConstantPoolArray::SizeFor(small);
4173 #ifndef V8_HOST_ARCH_64_BIT
4174   size += kPointerSize;
4175 #endif
4176   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4177
4178   HeapObject* object;
4179   {
4180     AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4181     if (!allocation.To(&object)) return allocation;
4182   }
4183   object = EnsureDoubleAligned(this, object, size);
4184   object->set_map_no_write_barrier(constant_pool_array_map());
4185
4186   ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4187   constant_pool->Init(small);
4188   constant_pool->ClearPtrEntries(isolate());
4189   return constant_pool;
4190 }
4191
4192
4193 AllocationResult Heap::AllocateExtendedConstantPoolArray(
4194     const ConstantPoolArray::NumberOfEntries& small,
4195     const ConstantPoolArray::NumberOfEntries& extended) {
4196   CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
4197   CHECK(extended.are_in_range(0, kMaxInt));
4198   int size = ConstantPoolArray::SizeForExtended(small, extended);
4199 #ifndef V8_HOST_ARCH_64_BIT
4200   size += kPointerSize;
4201 #endif
4202   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4203
4204   HeapObject* object;
4205   {
4206     AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
4207     if (!allocation.To(&object)) return allocation;
4208   }
4209   object = EnsureDoubleAligned(this, object, size);
4210   object->set_map_no_write_barrier(constant_pool_array_map());
4211
4212   ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
4213   constant_pool->InitExtended(small, extended);
4214   constant_pool->ClearPtrEntries(isolate());
4215   return constant_pool;
4216 }
4217
4218
4219 AllocationResult Heap::AllocateEmptyConstantPoolArray() {
4220   ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
4221   int size = ConstantPoolArray::SizeFor(small);
4222   HeapObject* result;
4223   {
4224     AllocationResult allocation =
4225         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4226     if (!allocation.To(&result)) return allocation;
4227   }
4228   result->set_map_no_write_barrier(constant_pool_array_map());
4229   ConstantPoolArray::cast(result)->Init(small);
4230   return result;
4231 }
4232
4233
4234 AllocationResult Heap::AllocateSymbol() {
4235   // Statically ensure that it is safe to allocate symbols in paged spaces.
4236   STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
4237
4238   HeapObject* result;
4239   AllocationResult allocation =
4240       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
4241   if (!allocation.To(&result)) return allocation;
4242
4243   result->set_map_no_write_barrier(symbol_map());
4244
4245   // Generate a random hash value.
4246   int hash;
4247   int attempts = 0;
4248   do {
4249     hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
4250     attempts++;
4251   } while (hash == 0 && attempts < 30);
4252   if (hash == 0) hash = 1;  // never return 0
4253
4254   Symbol::cast(result)
4255       ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
4256   Symbol::cast(result)->set_name(undefined_value());
4257   Symbol::cast(result)->set_flags(Smi::FromInt(0));
4258
4259   DCHECK(!Symbol::cast(result)->is_private());
4260   return result;
4261 }
4262
4263
4264 AllocationResult Heap::AllocateStruct(InstanceType type) {
4265   Map* map;
4266   switch (type) {
4267 #define MAKE_CASE(NAME, Name, name) \
4268   case NAME##_TYPE:                 \
4269     map = name##_map();             \
4270     break;
4271     STRUCT_LIST(MAKE_CASE)
4272 #undef MAKE_CASE
4273     default:
4274       UNREACHABLE();
4275       return exception();
4276   }
4277   int size = map->instance_size();
4278   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
4279   Struct* result;
4280   {
4281     AllocationResult allocation = Allocate(map, space);
4282     if (!allocation.To(&result)) return allocation;
4283   }
4284   result->InitializeBody(size);
4285   return result;
4286 }
4287
4288
4289 bool Heap::IsHeapIterable() {
4290   // TODO(hpayer): This function is not correct. Allocation folding in old
4291   // space breaks the iterability.
4292   return new_space_top_after_last_gc_ == new_space()->top();
4293 }
4294
4295
4296 void Heap::MakeHeapIterable() {
4297   DCHECK(AllowHeapAllocation::IsAllowed());
4298   if (!IsHeapIterable()) {
4299     CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
4300   }
4301   if (mark_compact_collector()->sweeping_in_progress()) {
4302     mark_compact_collector()->EnsureSweepingCompleted();
4303   }
4304   DCHECK(IsHeapIterable());
4305 }
4306
4307
4308 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
4309   incremental_marking()->Step(step_size,
4310                               IncrementalMarking::NO_GC_VIA_STACK_GUARD, true);
4311
4312   if (incremental_marking()->IsComplete()) {
4313     bool uncommit = false;
4314     if (gc_count_at_last_idle_gc_ == gc_count_) {
4315       // No GC since the last full GC, the mutator is probably not active.
4316       isolate_->compilation_cache()->Clear();
4317       uncommit = true;
4318     }
4319     CollectAllGarbage(kReduceMemoryFootprintMask,
4320                       "idle notification: finalize incremental");
4321     gc_idle_time_handler_.NotifyIdleMarkCompact();
4322     gc_count_at_last_idle_gc_ = gc_count_;
4323     if (uncommit) {
4324       new_space_.Shrink();
4325       UncommitFromSpace();
4326     }
4327   }
4328 }
4329
4330
4331 bool Heap::WorthActivatingIncrementalMarking() {
4332   return incremental_marking()->IsStopped() &&
4333          incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
4334 }
4335
4336
4337 bool Heap::IdleNotification(int idle_time_in_ms) {
4338   // If incremental marking is off, we do not perform idle notification.
4339   if (!FLAG_incremental_marking) return true;
4340   base::ElapsedTimer timer;
4341   timer.Start();
4342   isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
4343       idle_time_in_ms);
4344   HistogramTimerScope idle_notification_scope(
4345       isolate_->counters()->gc_idle_notification());
4346
4347   GCIdleTimeHandler::HeapState heap_state;
4348   heap_state.contexts_disposed = contexts_disposed_;
4349   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
4350   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
4351   // TODO(ulan): Start incremental marking only for large heaps.
4352   heap_state.can_start_incremental_marking =
4353       incremental_marking()->ShouldActivate();
4354   heap_state.sweeping_in_progress =
4355       mark_compact_collector()->sweeping_in_progress();
4356   heap_state.mark_compact_speed_in_bytes_per_ms =
4357       static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
4358   heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
4359       tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
4360   heap_state.scavenge_speed_in_bytes_per_ms =
4361       static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
4362   heap_state.available_new_space_memory = new_space_.Available();
4363   heap_state.new_space_capacity = new_space_.Capacity();
4364   heap_state.new_space_allocation_throughput_in_bytes_per_ms =
4365       static_cast<size_t>(
4366           tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
4367
4368   GCIdleTimeAction action =
4369       gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
4370
4371   bool result = false;
4372   switch (action.type) {
4373     case DONE:
4374       result = true;
4375       break;
4376     case DO_INCREMENTAL_MARKING:
4377       if (incremental_marking()->IsStopped()) {
4378         incremental_marking()->Start();
4379       }
4380       AdvanceIdleIncrementalMarking(action.parameter);
4381       break;
4382     case DO_FULL_GC: {
4383       HistogramTimerScope scope(isolate_->counters()->gc_context());
4384       const char* message = contexts_disposed_
4385                                 ? "idle notification: contexts disposed"
4386                                 : "idle notification: finalize idle round";
4387       CollectAllGarbage(kReduceMemoryFootprintMask, message);
4388       gc_idle_time_handler_.NotifyIdleMarkCompact();
4389       break;
4390     }
4391     case DO_SCAVENGE:
4392       CollectGarbage(NEW_SPACE, "idle notification: scavenge");
4393       break;
4394     case DO_FINALIZE_SWEEPING:
4395       mark_compact_collector()->EnsureSweepingCompleted();
4396       break;
4397     case DO_NOTHING:
4398       break;
4399   }
4400
4401   int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
4402   if (actual_time_ms <= idle_time_in_ms) {
4403     isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
4404         idle_time_in_ms - actual_time_ms);
4405   } else {
4406     isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
4407         actual_time_ms - idle_time_in_ms);
4408   }
4409
4410   if (FLAG_trace_idle_notification) {
4411     PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
4412            idle_time_in_ms, actual_time_ms);
4413     action.Print();
4414     PrintF("]\n");
4415   }
4416
4417   contexts_disposed_ = 0;
4418   return result;
4419 }
4420
4421
4422 #ifdef DEBUG
4423
4424 void Heap::Print() {
4425   if (!HasBeenSetUp()) return;
4426   isolate()->PrintStack(stdout);
4427   AllSpaces spaces(this);
4428   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
4429     space->Print();
4430   }
4431 }
4432
4433
4434 void Heap::ReportCodeStatistics(const char* title) {
4435   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
4436   PagedSpace::ResetCodeStatistics(isolate());
4437   // We do not look for code in new space, map space, or old space.  If code
4438   // somehow ends up in those spaces, we would miss it here.
4439   code_space_->CollectCodeStatistics();
4440   lo_space_->CollectCodeStatistics();
4441   PagedSpace::ReportCodeStatistics(isolate());
4442 }
4443
4444
4445 // This function expects that NewSpace's allocated objects histogram is
4446 // populated (via a call to CollectStatistics or else as a side effect of a
4447 // just-completed scavenge collection).
4448 void Heap::ReportHeapStatistics(const char* title) {
4449   USE(title);
4450   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
4451          gc_count_);
4452   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4453          old_generation_allocation_limit_);
4454
4455   PrintF("\n");
4456   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
4457   isolate_->global_handles()->PrintStats();
4458   PrintF("\n");
4459
4460   PrintF("Heap statistics : ");
4461   isolate_->memory_allocator()->ReportStatistics();
4462   PrintF("To space : ");
4463   new_space_.ReportStatistics();
4464   PrintF("Old pointer space : ");
4465   old_pointer_space_->ReportStatistics();
4466   PrintF("Old data space : ");
4467   old_data_space_->ReportStatistics();
4468   PrintF("Code space : ");
4469   code_space_->ReportStatistics();
4470   PrintF("Map space : ");
4471   map_space_->ReportStatistics();
4472   PrintF("Cell space : ");
4473   cell_space_->ReportStatistics();
4474   PrintF("PropertyCell space : ");
4475   property_cell_space_->ReportStatistics();
4476   PrintF("Large object space : ");
4477   lo_space_->ReportStatistics();
4478   PrintF(">>>>>> ========================================= >>>>>>\n");
4479 }
4480
4481 #endif  // DEBUG
4482
4483 bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
4484
4485
4486 bool Heap::Contains(Address addr) {
4487   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4488   return HasBeenSetUp() &&
4489          (new_space_.ToSpaceContains(addr) ||
4490           old_pointer_space_->Contains(addr) ||
4491           old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
4492           map_space_->Contains(addr) || cell_space_->Contains(addr) ||
4493           property_cell_space_->Contains(addr) ||
4494           lo_space_->SlowContains(addr));
4495 }
4496
4497
4498 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
4499   return InSpace(value->address(), space);
4500 }
4501
4502
4503 bool Heap::InSpace(Address addr, AllocationSpace space) {
4504   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
4505   if (!HasBeenSetUp()) return false;
4506
4507   switch (space) {
4508     case NEW_SPACE:
4509       return new_space_.ToSpaceContains(addr);
4510     case OLD_POINTER_SPACE:
4511       return old_pointer_space_->Contains(addr);
4512     case OLD_DATA_SPACE:
4513       return old_data_space_->Contains(addr);
4514     case CODE_SPACE:
4515       return code_space_->Contains(addr);
4516     case MAP_SPACE:
4517       return map_space_->Contains(addr);
4518     case CELL_SPACE:
4519       return cell_space_->Contains(addr);
4520     case PROPERTY_CELL_SPACE:
4521       return property_cell_space_->Contains(addr);
4522     case LO_SPACE:
4523       return lo_space_->SlowContains(addr);
4524     case INVALID_SPACE:
4525       break;
4526   }
4527   UNREACHABLE();
4528   return false;
4529 }
4530
4531
4532 #ifdef VERIFY_HEAP
4533 void Heap::Verify() {
4534   CHECK(HasBeenSetUp());
4535   HandleScope scope(isolate());
4536
4537   store_buffer()->Verify();
4538
4539   if (mark_compact_collector()->sweeping_in_progress()) {
4540     // We have to wait here for the sweeper threads to have an iterable heap.
4541     mark_compact_collector()->EnsureSweepingCompleted();
4542   }
4543
4544   VerifyPointersVisitor visitor;
4545   IterateRoots(&visitor, VISIT_ONLY_STRONG);
4546
4547   VerifySmisVisitor smis_visitor;
4548   IterateSmiRoots(&smis_visitor);
4549
4550   new_space_.Verify();
4551
4552   old_pointer_space_->Verify(&visitor);
4553   map_space_->Verify(&visitor);
4554
4555   VerifyPointersVisitor no_dirty_regions_visitor;
4556   old_data_space_->Verify(&no_dirty_regions_visitor);
4557   code_space_->Verify(&no_dirty_regions_visitor);
4558   cell_space_->Verify(&no_dirty_regions_visitor);
4559   property_cell_space_->Verify(&no_dirty_regions_visitor);
4560
4561   lo_space_->Verify();
4562 }
4563 #endif
4564
4565
4566 void Heap::ZapFromSpace() {
4567   NewSpacePageIterator it(new_space_.FromSpaceStart(),
4568                           new_space_.FromSpaceEnd());
4569   while (it.has_next()) {
4570     NewSpacePage* page = it.next();
4571     for (Address cursor = page->area_start(), limit = page->area_end();
4572          cursor < limit; cursor += kPointerSize) {
4573       Memory::Address_at(cursor) = kFromSpaceZapValue;
4574     }
4575   }
4576 }
4577
4578
4579 void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
4580                                              ObjectSlotCallback callback) {
4581   Address slot_address = start;
4582
4583   // We are not collecting slots on new space objects during mutation
4584   // thus we have to scan for pointers to evacuation candidates when we
4585   // promote objects. But we should not record any slots in non-black
4586   // objects. Grey object's slots would be rescanned.
4587   // White object might not survive until the end of collection
4588   // it would be a violation of the invariant to record it's slots.
4589   bool record_slots = false;
4590   if (incremental_marking()->IsCompacting()) {
4591     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4592     record_slots = Marking::IsBlack(mark_bit);
4593   }
4594
4595   while (slot_address < end) {
4596     Object** slot = reinterpret_cast<Object**>(slot_address);
4597     Object* object = *slot;
4598     // If the store buffer becomes overfull we mark pages as being exempt from
4599     // the store buffer.  These pages are scanned to find pointers that point
4600     // to the new space.  In that case we may hit newly promoted objects and
4601     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
4602     if (object->IsHeapObject()) {
4603       if (Heap::InFromSpace(object)) {
4604         callback(reinterpret_cast<HeapObject**>(slot),
4605                  HeapObject::cast(object));
4606         Object* new_object = *slot;
4607         if (InNewSpace(new_object)) {
4608           SLOW_DCHECK(Heap::InToSpace(new_object));
4609           SLOW_DCHECK(new_object->IsHeapObject());
4610           store_buffer_.EnterDirectlyIntoStoreBuffer(
4611               reinterpret_cast<Address>(slot));
4612         }
4613         SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4614       } else if (record_slots &&
4615                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4616         mark_compact_collector()->RecordSlot(slot, slot, object);
4617       }
4618     }
4619     slot_address += kPointerSize;
4620   }
4621 }
4622
4623
4624 #ifdef DEBUG
4625 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4626
4627
4628 bool IsAMapPointerAddress(Object** addr) {
4629   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4630   int mod = a % Map::kSize;
4631   return mod >= Map::kPointerFieldsBeginOffset &&
4632          mod < Map::kPointerFieldsEndOffset;
4633 }
4634
4635
4636 bool EverythingsAPointer(Object** addr) { return true; }
4637
4638
4639 static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
4640                              Object**** store_buffer_position,
4641                              Object*** store_buffer_top,
4642                              CheckStoreBufferFilter filter,
4643                              Address special_garbage_start,
4644                              Address special_garbage_end) {
4645   Map* free_space_map = heap->free_space_map();
4646   for (; current < limit; current++) {
4647     Object* o = *current;
4648     Address current_address = reinterpret_cast<Address>(current);
4649     // Skip free space.
4650     if (o == free_space_map) {
4651       Address current_address = reinterpret_cast<Address>(current);
4652       FreeSpace* free_space =
4653           FreeSpace::cast(HeapObject::FromAddress(current_address));
4654       int skip = free_space->Size();
4655       DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
4656       DCHECK(skip > 0);
4657       current_address += skip - kPointerSize;
4658       current = reinterpret_cast<Object**>(current_address);
4659       continue;
4660     }
4661     // Skip the current linear allocation space between top and limit which is
4662     // unmarked with the free space map, but can contain junk.
4663     if (current_address == special_garbage_start &&
4664         special_garbage_end != special_garbage_start) {
4665       current_address = special_garbage_end - kPointerSize;
4666       current = reinterpret_cast<Object**>(current_address);
4667       continue;
4668     }
4669     if (!(*filter)(current)) continue;
4670     DCHECK(current_address < special_garbage_start ||
4671            current_address >= special_garbage_end);
4672     DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4673     // We have to check that the pointer does not point into new space
4674     // without trying to cast it to a heap object since the hash field of
4675     // a string can contain values like 1 and 3 which are tagged null
4676     // pointers.
4677     if (!heap->InNewSpace(o)) continue;
4678     while (**store_buffer_position < current &&
4679            *store_buffer_position < store_buffer_top) {
4680       (*store_buffer_position)++;
4681     }
4682     if (**store_buffer_position != current ||
4683         *store_buffer_position == store_buffer_top) {
4684       Object** obj_start = current;
4685       while (!(*obj_start)->IsMap()) obj_start--;
4686       UNREACHABLE();
4687     }
4688   }
4689 }
4690
4691
4692 // Check that the store buffer contains all intergenerational pointers by
4693 // scanning a page and ensuring that all pointers to young space are in the
4694 // store buffer.
4695 void Heap::OldPointerSpaceCheckStoreBuffer() {
4696   OldSpace* space = old_pointer_space();
4697   PageIterator pages(space);
4698
4699   store_buffer()->SortUniq();
4700
4701   while (pages.has_next()) {
4702     Page* page = pages.next();
4703     Object** current = reinterpret_cast<Object**>(page->area_start());
4704
4705     Address end = page->area_end();
4706
4707     Object*** store_buffer_position = store_buffer()->Start();
4708     Object*** store_buffer_top = store_buffer()->Top();
4709
4710     Object** limit = reinterpret_cast<Object**>(end);
4711     CheckStoreBuffer(this, current, limit, &store_buffer_position,
4712                      store_buffer_top, &EverythingsAPointer, space->top(),
4713                      space->limit());
4714   }
4715 }
4716
4717
4718 void Heap::MapSpaceCheckStoreBuffer() {
4719   MapSpace* space = map_space();
4720   PageIterator pages(space);
4721
4722   store_buffer()->SortUniq();
4723
4724   while (pages.has_next()) {
4725     Page* page = pages.next();
4726     Object** current = reinterpret_cast<Object**>(page->area_start());
4727
4728     Address end = page->area_end();
4729
4730     Object*** store_buffer_position = store_buffer()->Start();
4731     Object*** store_buffer_top = store_buffer()->Top();
4732
4733     Object** limit = reinterpret_cast<Object**>(end);
4734     CheckStoreBuffer(this, current, limit, &store_buffer_position,
4735                      store_buffer_top, &IsAMapPointerAddress, space->top(),
4736                      space->limit());
4737   }
4738 }
4739
4740
4741 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4742   LargeObjectIterator it(lo_space());
4743   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4744     // We only have code, sequential strings, or fixed arrays in large
4745     // object space, and only fixed arrays can possibly contain pointers to
4746     // the young generation.
4747     if (object->IsFixedArray()) {
4748       Object*** store_buffer_position = store_buffer()->Start();
4749       Object*** store_buffer_top = store_buffer()->Top();
4750       Object** current = reinterpret_cast<Object**>(object->address());
4751       Object** limit =
4752           reinterpret_cast<Object**>(object->address() + object->Size());
4753       CheckStoreBuffer(this, current, limit, &store_buffer_position,
4754                        store_buffer_top, &EverythingsAPointer, NULL, NULL);
4755     }
4756   }
4757 }
4758 #endif
4759
4760
4761 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4762   IterateStrongRoots(v, mode);
4763   IterateWeakRoots(v, mode);
4764 }
4765
4766
4767 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4768   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
4769   v->Synchronize(VisitorSynchronization::kStringTable);
4770   if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4771     // Scavenge collections have special processing for this.
4772     external_string_table_.Iterate(v);
4773   }
4774   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
4775 }
4776
4777
4778 void Heap::IterateSmiRoots(ObjectVisitor* v) {
4779   // Acquire execution access since we are going to read stack limit values.
4780   ExecutionAccess access(isolate());
4781   v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
4782   v->Synchronize(VisitorSynchronization::kSmiRootList);
4783 }
4784
4785
4786 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
4787   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
4788   v->Synchronize(VisitorSynchronization::kStrongRootList);
4789
4790   v->VisitPointer(bit_cast<Object**>(&hidden_string_));
4791   v->Synchronize(VisitorSynchronization::kInternalizedString);
4792
4793   isolate_->bootstrapper()->Iterate(v);
4794   v->Synchronize(VisitorSynchronization::kBootstrapper);
4795   isolate_->Iterate(v);
4796   v->Synchronize(VisitorSynchronization::kTop);
4797   Relocatable::Iterate(isolate_, v);
4798   v->Synchronize(VisitorSynchronization::kRelocatable);
4799
4800   if (isolate_->deoptimizer_data() != NULL) {
4801     isolate_->deoptimizer_data()->Iterate(v);
4802   }
4803   v->Synchronize(VisitorSynchronization::kDebug);
4804   isolate_->compilation_cache()->Iterate(v);
4805   v->Synchronize(VisitorSynchronization::kCompilationCache);
4806
4807   // Iterate over local handles in handle scopes.
4808   isolate_->handle_scope_implementer()->Iterate(v);
4809   isolate_->IterateDeferredHandles(v);
4810   v->Synchronize(VisitorSynchronization::kHandleScope);
4811
4812   // Iterate over the builtin code objects and code stubs in the
4813   // heap. Note that it is not necessary to iterate over code objects
4814   // on scavenge collections.
4815   if (mode != VISIT_ALL_IN_SCAVENGE) {
4816     isolate_->builtins()->IterateBuiltins(v);
4817   }
4818   v->Synchronize(VisitorSynchronization::kBuiltins);
4819
4820   // Iterate over global handles.
4821   switch (mode) {
4822     case VISIT_ONLY_STRONG:
4823       isolate_->global_handles()->IterateStrongRoots(v);
4824       break;
4825     case VISIT_ALL_IN_SCAVENGE:
4826       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
4827       break;
4828     case VISIT_ALL_IN_SWEEP_NEWSPACE:
4829     case VISIT_ALL:
4830       isolate_->global_handles()->IterateAllRoots(v);
4831       break;
4832   }
4833   v->Synchronize(VisitorSynchronization::kGlobalHandles);
4834
4835   // Iterate over eternal handles.
4836   if (mode == VISIT_ALL_IN_SCAVENGE) {
4837     isolate_->eternal_handles()->IterateNewSpaceRoots(v);
4838   } else {
4839     isolate_->eternal_handles()->IterateAllRoots(v);
4840   }
4841   v->Synchronize(VisitorSynchronization::kEternalHandles);
4842
4843   // Iterate over pointers being held by inactive threads.
4844   isolate_->thread_manager()->Iterate(v);
4845   v->Synchronize(VisitorSynchronization::kThreadManager);
4846
4847   // Iterate over the pointers the Serialization/Deserialization code is
4848   // holding.
4849   // During garbage collection this keeps the partial snapshot cache alive.
4850   // During deserialization of the startup snapshot this creates the partial
4851   // snapshot cache and deserializes the objects it refers to.  During
4852   // serialization this does nothing, since the partial snapshot cache is
4853   // empty.  However the next thing we do is create the partial snapshot,
4854   // filling up the partial snapshot cache with objects it needs as we go.
4855   SerializerDeserializer::Iterate(isolate_, v);
4856   // We don't do a v->Synchronize call here, because in debug mode that will
4857   // output a flag to the snapshot.  However at this point the serializer and
4858   // deserializer are deliberately a little unsynchronized (see above) so the
4859   // checking of the sync flag in the snapshot would fail.
4860 }
4861
4862
4863 // TODO(1236194): Since the heap size is configurable on the command line
4864 // and through the API, we should gracefully handle the case that the heap
4865 // size is not big enough to fit all the initial objects.
4866 bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
4867                          int max_executable_size, size_t code_range_size) {
4868   if (HasBeenSetUp()) return false;
4869
4870   // Overwrite default configuration.
4871   if (max_semi_space_size > 0) {
4872     max_semi_space_size_ = max_semi_space_size * MB;
4873   }
4874   if (max_old_space_size > 0) {
4875     max_old_generation_size_ = max_old_space_size * MB;
4876   }
4877   if (max_executable_size > 0) {
4878     max_executable_size_ = max_executable_size * MB;
4879   }
4880
4881   // If max space size flags are specified overwrite the configuration.
4882   if (FLAG_max_semi_space_size > 0) {
4883     max_semi_space_size_ = FLAG_max_semi_space_size * MB;
4884   }
4885   if (FLAG_max_old_space_size > 0) {
4886     max_old_generation_size_ = FLAG_max_old_space_size * MB;
4887   }
4888   if (FLAG_max_executable_size > 0) {
4889     max_executable_size_ = FLAG_max_executable_size * MB;
4890   }
4891
4892   if (FLAG_stress_compaction) {
4893     // This will cause more frequent GCs when stressing.
4894     max_semi_space_size_ = Page::kPageSize;
4895   }
4896
4897   if (Snapshot::HaveASnapshotToStartFrom()) {
4898     // If we are using a snapshot we always reserve the default amount
4899     // of memory for each semispace because code in the snapshot has
4900     // write-barrier code that relies on the size and alignment of new
4901     // space.  We therefore cannot use a larger max semispace size
4902     // than the default reserved semispace size.
4903     if (max_semi_space_size_ > reserved_semispace_size_) {
4904       max_semi_space_size_ = reserved_semispace_size_;
4905       if (FLAG_trace_gc) {
4906         PrintPID("Max semi-space size cannot be more than %d kbytes\n",
4907                  reserved_semispace_size_ >> 10);
4908       }
4909     }
4910   } else {
4911     // If we are not using snapshots we reserve space for the actual
4912     // max semispace size.
4913     reserved_semispace_size_ = max_semi_space_size_;
4914   }
4915
4916   // The max executable size must be less than or equal to the max old
4917   // generation size.
4918   if (max_executable_size_ > max_old_generation_size_) {
4919     max_executable_size_ = max_old_generation_size_;
4920   }
4921
4922   // The new space size must be a power of two to support single-bit testing
4923   // for containment.
4924   max_semi_space_size_ =
4925       base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
4926   reserved_semispace_size_ =
4927       base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
4928
4929   if (FLAG_min_semi_space_size > 0) {
4930     int initial_semispace_size = FLAG_min_semi_space_size * MB;
4931     if (initial_semispace_size > max_semi_space_size_) {
4932       initial_semispace_size_ = max_semi_space_size_;
4933       if (FLAG_trace_gc) {
4934         PrintPID(
4935             "Min semi-space size cannot be more than the maximum"
4936             "semi-space size of %d MB\n",
4937             max_semi_space_size_);
4938       }
4939     } else {
4940       initial_semispace_size_ = initial_semispace_size;
4941     }
4942   }
4943
4944   initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
4945
4946   // The old generation is paged and needs at least one page for each space.
4947   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
4948   max_old_generation_size_ =
4949       Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
4950           max_old_generation_size_);
4951
4952   // We rely on being able to allocate new arrays in paged spaces.
4953   DCHECK(Page::kMaxRegularHeapObjectSize >=
4954          (JSArray::kSize +
4955           FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
4956           AllocationMemento::kSize));
4957
4958   code_range_size_ = code_range_size * MB;
4959
4960   configured_ = true;
4961   return true;
4962 }
4963
4964
4965 bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
4966
4967
4968 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
4969   *stats->start_marker = HeapStats::kStartMarker;
4970   *stats->end_marker = HeapStats::kEndMarker;
4971   *stats->new_space_size = new_space_.SizeAsInt();
4972   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
4973   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
4974   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
4975   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
4976   *stats->old_data_space_capacity = old_data_space_->Capacity();
4977   *stats->code_space_size = code_space_->SizeOfObjects();
4978   *stats->code_space_capacity = code_space_->Capacity();
4979   *stats->map_space_size = map_space_->SizeOfObjects();
4980   *stats->map_space_capacity = map_space_->Capacity();
4981   *stats->cell_space_size = cell_space_->SizeOfObjects();
4982   *stats->cell_space_capacity = cell_space_->Capacity();
4983   *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
4984   *stats->property_cell_space_capacity = property_cell_space_->Capacity();
4985   *stats->lo_space_size = lo_space_->Size();
4986   isolate_->global_handles()->RecordStats(stats);
4987   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
4988   *stats->memory_allocator_capacity =
4989       isolate()->memory_allocator()->Size() +
4990       isolate()->memory_allocator()->Available();
4991   *stats->os_error = base::OS::GetLastError();
4992   isolate()->memory_allocator()->Available();
4993   if (take_snapshot) {
4994     HeapIterator iterator(this);
4995     for (HeapObject* obj = iterator.next(); obj != NULL;
4996          obj = iterator.next()) {
4997       InstanceType type = obj->map()->instance_type();
4998       DCHECK(0 <= type && type <= LAST_TYPE);
4999       stats->objects_per_type[type]++;
5000       stats->size_per_type[type] += obj->Size();
5001     }
5002   }
5003 }
5004
5005
5006 intptr_t Heap::PromotedSpaceSizeOfObjects() {
5007   return old_pointer_space_->SizeOfObjects() +
5008          old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
5009          map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
5010          property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
5011 }
5012
5013
5014 int64_t Heap::PromotedExternalMemorySize() {
5015   if (amount_of_external_allocated_memory_ <=
5016       amount_of_external_allocated_memory_at_last_global_gc_)
5017     return 0;
5018   return amount_of_external_allocated_memory_ -
5019          amount_of_external_allocated_memory_at_last_global_gc_;
5020 }
5021
5022
5023 intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
5024                                             int freed_global_handles) {
5025   const int kMaxHandles = 1000;
5026   const int kMinHandles = 100;
5027   double min_factor = 1.1;
5028   double max_factor = 4;
5029   // We set the old generation growing factor to 2 to grow the heap slower on
5030   // memory-constrained devices.
5031   if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
5032     max_factor = 2;
5033   }
5034   // If there are many freed global handles, then the next full GC will
5035   // likely collect a lot of garbage. Choose the heap growing factor
5036   // depending on freed global handles.
5037   // TODO(ulan, hpayer): Take into account mutator utilization.
5038   double factor;
5039   if (freed_global_handles <= kMinHandles) {
5040     factor = max_factor;
5041   } else if (freed_global_handles >= kMaxHandles) {
5042     factor = min_factor;
5043   } else {
5044     // Compute factor using linear interpolation between points
5045     // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
5046     factor = max_factor -
5047              (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
5048                  (kMaxHandles - kMinHandles);
5049   }
5050
5051   if (FLAG_stress_compaction ||
5052       mark_compact_collector()->reduce_memory_footprint_) {
5053     factor = min_factor;
5054   }
5055
5056   intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
5057   limit = Max(limit, kMinimumOldGenerationAllocationLimit);
5058   limit += new_space_.Capacity();
5059   intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
5060   return Min(limit, halfway_to_the_max);
5061 }
5062
5063
5064 void Heap::EnableInlineAllocation() {
5065   if (!inline_allocation_disabled_) return;
5066   inline_allocation_disabled_ = false;
5067
5068   // Update inline allocation limit for new space.
5069   new_space()->UpdateInlineAllocationLimit(0);
5070 }
5071
5072
5073 void Heap::DisableInlineAllocation() {
5074   if (inline_allocation_disabled_) return;
5075   inline_allocation_disabled_ = true;
5076
5077   // Update inline allocation limit for new space.
5078   new_space()->UpdateInlineAllocationLimit(0);
5079
5080   // Update inline allocation limit for old spaces.
5081   PagedSpaces spaces(this);
5082   for (PagedSpace* space = spaces.next(); space != NULL;
5083        space = spaces.next()) {
5084     space->EmptyAllocationInfo();
5085   }
5086 }
5087
5088
5089 V8_DECLARE_ONCE(initialize_gc_once);
5090
5091 static void InitializeGCOnce() {
5092   InitializeScavengingVisitorsTables();
5093   NewSpaceScavenger::Initialize();
5094   MarkCompactCollector::Initialize();
5095 }
5096
5097
5098 bool Heap::SetUp() {
5099 #ifdef DEBUG
5100   allocation_timeout_ = FLAG_gc_interval;
5101 #endif
5102
5103   // Initialize heap spaces and initial maps and objects. Whenever something
5104   // goes wrong, just return false. The caller should check the results and
5105   // call Heap::TearDown() to release allocated memory.
5106   //
5107   // If the heap is not yet configured (e.g. through the API), configure it.
5108   // Configuration is based on the flags new-space-size (really the semispace
5109   // size) and old-space-size if set or the initial values of semispace_size_
5110   // and old_generation_size_ otherwise.
5111   if (!configured_) {
5112     if (!ConfigureHeapDefault()) return false;
5113   }
5114
5115   base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
5116
5117   MarkMapPointersAsEncoded(false);
5118
5119   // Set up memory allocator.
5120   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
5121     return false;
5122
5123   // Set up new space.
5124   if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
5125     return false;
5126   }
5127   new_space_top_after_last_gc_ = new_space()->top();
5128
5129   // Initialize old pointer space.
5130   old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
5131                                     OLD_POINTER_SPACE, NOT_EXECUTABLE);
5132   if (old_pointer_space_ == NULL) return false;
5133   if (!old_pointer_space_->SetUp()) return false;
5134
5135   // Initialize old data space.
5136   old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
5137                                  NOT_EXECUTABLE);
5138   if (old_data_space_ == NULL) return false;
5139   if (!old_data_space_->SetUp()) return false;
5140
5141   if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
5142
5143   // Initialize the code space, set its maximum capacity to the old
5144   // generation size. It needs executable memory.
5145   code_space_ =
5146       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5147   if (code_space_ == NULL) return false;
5148   if (!code_space_->SetUp()) return false;
5149
5150   // Initialize map space.
5151   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
5152   if (map_space_ == NULL) return false;
5153   if (!map_space_->SetUp()) return false;
5154
5155   // Initialize simple cell space.
5156   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5157   if (cell_space_ == NULL) return false;
5158   if (!cell_space_->SetUp()) return false;
5159
5160   // Initialize global property cell space.
5161   property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
5162                                                PROPERTY_CELL_SPACE);
5163   if (property_cell_space_ == NULL) return false;
5164   if (!property_cell_space_->SetUp()) return false;
5165
5166   // The large object code space may contain code or data.  We set the memory
5167   // to be non-executable here for safety, but this means we need to enable it
5168   // explicitly when allocating large code objects.
5169   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
5170   if (lo_space_ == NULL) return false;
5171   if (!lo_space_->SetUp()) return false;
5172
5173   // Set up the seed that is used to randomize the string hash function.
5174   DCHECK(hash_seed() == 0);
5175   if (FLAG_randomize_hashes) {
5176     if (FLAG_hash_seed == 0) {
5177       int rnd = isolate()->random_number_generator()->NextInt();
5178       set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
5179     } else {
5180       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
5181     }
5182   }
5183
5184   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5185   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5186
5187   store_buffer()->SetUp();
5188
5189   mark_compact_collector()->SetUp();
5190
5191   return true;
5192 }
5193
5194
5195 bool Heap::CreateHeapObjects() {
5196   // Create initial maps.
5197   if (!CreateInitialMaps()) return false;
5198   CreateApiObjects();
5199
5200   // Create initial objects
5201   CreateInitialObjects();
5202   CHECK_EQ(0, gc_count_);
5203
5204   set_native_contexts_list(undefined_value());
5205   set_array_buffers_list(undefined_value());
5206   set_allocation_sites_list(undefined_value());
5207   weak_object_to_code_table_ = undefined_value();
5208   return true;
5209 }
5210
5211
5212 void Heap::SetStackLimits() {
5213   DCHECK(isolate_ != NULL);
5214   DCHECK(isolate_ == isolate());
5215   // On 64 bit machines, pointers are generally out of range of Smis.  We write
5216   // something that looks like an out of range Smi to the GC.
5217
5218   // Set up the special root array entries containing the stack limits.
5219   // These are actually addresses, but the tag makes the GC ignore it.
5220   roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
5221       (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5222   roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
5223       (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5224 }
5225
5226
5227 void Heap::TearDown() {
5228 #ifdef VERIFY_HEAP
5229   if (FLAG_verify_heap) {
5230     Verify();
5231   }
5232 #endif
5233
5234   UpdateMaximumCommitted();
5235
5236   if (FLAG_print_cumulative_gc_stat) {
5237     PrintF("\n");
5238     PrintF("gc_count=%d ", gc_count_);
5239     PrintF("mark_sweep_count=%d ", ms_count_);
5240     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
5241     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
5242     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
5243     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
5244     PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
5245     PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
5246     PrintF("\n\n");
5247   }
5248
5249   if (FLAG_print_max_heap_committed) {
5250     PrintF("\n");
5251     PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
5252            MaximumCommittedMemory());
5253     PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
5254            new_space_.MaximumCommittedMemory());
5255     PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
5256            old_data_space_->MaximumCommittedMemory());
5257     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5258            old_pointer_space_->MaximumCommittedMemory());
5259     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
5260            old_pointer_space_->MaximumCommittedMemory());
5261     PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
5262            code_space_->MaximumCommittedMemory());
5263     PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
5264            map_space_->MaximumCommittedMemory());
5265     PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
5266            cell_space_->MaximumCommittedMemory());
5267     PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
5268            property_cell_space_->MaximumCommittedMemory());
5269     PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
5270            lo_space_->MaximumCommittedMemory());
5271     PrintF("\n\n");
5272   }
5273
5274   if (FLAG_verify_predictable) {
5275     PrintAlloctionsHash();
5276   }
5277
5278   TearDownArrayBuffers();
5279
5280   isolate_->global_handles()->TearDown();
5281
5282   external_string_table_.TearDown();
5283
5284   mark_compact_collector()->TearDown();
5285
5286   new_space_.TearDown();
5287
5288   if (old_pointer_space_ != NULL) {
5289     old_pointer_space_->TearDown();
5290     delete old_pointer_space_;
5291     old_pointer_space_ = NULL;
5292   }
5293
5294   if (old_data_space_ != NULL) {
5295     old_data_space_->TearDown();
5296     delete old_data_space_;
5297     old_data_space_ = NULL;
5298   }
5299
5300   if (code_space_ != NULL) {
5301     code_space_->TearDown();
5302     delete code_space_;
5303     code_space_ = NULL;
5304   }
5305
5306   if (map_space_ != NULL) {
5307     map_space_->TearDown();
5308     delete map_space_;
5309     map_space_ = NULL;
5310   }
5311
5312   if (cell_space_ != NULL) {
5313     cell_space_->TearDown();
5314     delete cell_space_;
5315     cell_space_ = NULL;
5316   }
5317
5318   if (property_cell_space_ != NULL) {
5319     property_cell_space_->TearDown();
5320     delete property_cell_space_;
5321     property_cell_space_ = NULL;
5322   }
5323
5324   if (lo_space_ != NULL) {
5325     lo_space_->TearDown();
5326     delete lo_space_;
5327     lo_space_ = NULL;
5328   }
5329
5330   store_buffer()->TearDown();
5331   incremental_marking()->TearDown();
5332
5333   isolate_->memory_allocator()->TearDown();
5334 }
5335
5336
5337 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
5338                                  GCType gc_type, bool pass_isolate) {
5339   DCHECK(callback != NULL);
5340   GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
5341   DCHECK(!gc_prologue_callbacks_.Contains(pair));
5342   return gc_prologue_callbacks_.Add(pair);
5343 }
5344
5345
5346 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
5347   DCHECK(callback != NULL);
5348   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
5349     if (gc_prologue_callbacks_[i].callback == callback) {
5350       gc_prologue_callbacks_.Remove(i);
5351       return;
5352     }
5353   }
5354   UNREACHABLE();
5355 }
5356
5357
5358 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
5359                                  GCType gc_type, bool pass_isolate) {
5360   DCHECK(callback != NULL);
5361   GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
5362   DCHECK(!gc_epilogue_callbacks_.Contains(pair));
5363   return gc_epilogue_callbacks_.Add(pair);
5364 }
5365
5366
5367 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
5368   DCHECK(callback != NULL);
5369   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
5370     if (gc_epilogue_callbacks_[i].callback == callback) {
5371       gc_epilogue_callbacks_.Remove(i);
5372       return;
5373     }
5374   }
5375   UNREACHABLE();
5376 }
5377
5378
5379 // TODO(ishell): Find a better place for this.
5380 void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
5381                                          Handle<DependentCode> dep) {
5382   DCHECK(!InNewSpace(*obj));
5383   DCHECK(!InNewSpace(*dep));
5384   // This handle scope keeps the table handle local to this function, which
5385   // allows us to safely skip write barriers in table update operations.
5386   HandleScope scope(isolate());
5387   Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
5388                               isolate());
5389   table = WeakHashTable::Put(table, obj, dep);
5390
5391   if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
5392     WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
5393   }
5394   set_weak_object_to_code_table(*table);
5395   DCHECK_EQ(*dep, table->Lookup(obj));
5396 }
5397
5398
5399 DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
5400   Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
5401   if (dep->IsDependentCode()) return DependentCode::cast(dep);
5402   return DependentCode::cast(empty_fixed_array());
5403 }
5404
5405
5406 void Heap::EnsureWeakObjectToCodeTable() {
5407   if (!weak_object_to_code_table()->IsHashTable()) {
5408     set_weak_object_to_code_table(
5409         *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
5410                             TENURED));
5411   }
5412 }
5413
5414
5415 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
5416   v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
5417 }
5418
5419 #ifdef DEBUG
5420
5421 class PrintHandleVisitor : public ObjectVisitor {
5422  public:
5423   void VisitPointers(Object** start, Object** end) {
5424     for (Object** p = start; p < end; p++)
5425       PrintF("  handle %p to %p\n", reinterpret_cast<void*>(p),
5426              reinterpret_cast<void*>(*p));
5427   }
5428 };
5429
5430
5431 void Heap::PrintHandles() {
5432   PrintF("Handles:\n");
5433   PrintHandleVisitor v;
5434   isolate_->handle_scope_implementer()->Iterate(&v);
5435 }
5436
5437 #endif
5438
5439
5440 Space* AllSpaces::next() {
5441   switch (counter_++) {
5442     case NEW_SPACE:
5443       return heap_->new_space();
5444     case OLD_POINTER_SPACE:
5445       return heap_->old_pointer_space();
5446     case OLD_DATA_SPACE:
5447       return heap_->old_data_space();
5448     case CODE_SPACE:
5449       return heap_->code_space();
5450     case MAP_SPACE:
5451       return heap_->map_space();
5452     case CELL_SPACE:
5453       return heap_->cell_space();
5454     case PROPERTY_CELL_SPACE:
5455       return heap_->property_cell_space();
5456     case LO_SPACE:
5457       return heap_->lo_space();
5458     default:
5459       return NULL;
5460   }
5461 }
5462
5463
5464 PagedSpace* PagedSpaces::next() {
5465   switch (counter_++) {
5466     case OLD_POINTER_SPACE:
5467       return heap_->old_pointer_space();
5468     case OLD_DATA_SPACE:
5469       return heap_->old_data_space();
5470     case CODE_SPACE:
5471       return heap_->code_space();
5472     case MAP_SPACE:
5473       return heap_->map_space();
5474     case CELL_SPACE:
5475       return heap_->cell_space();
5476     case PROPERTY_CELL_SPACE:
5477       return heap_->property_cell_space();
5478     default:
5479       return NULL;
5480   }
5481 }
5482
5483
5484 OldSpace* OldSpaces::next() {
5485   switch (counter_++) {
5486     case OLD_POINTER_SPACE:
5487       return heap_->old_pointer_space();
5488     case OLD_DATA_SPACE:
5489       return heap_->old_data_space();
5490     case CODE_SPACE:
5491       return heap_->code_space();
5492     default:
5493       return NULL;
5494   }
5495 }
5496
5497
5498 SpaceIterator::SpaceIterator(Heap* heap)
5499     : heap_(heap),
5500       current_space_(FIRST_SPACE),
5501       iterator_(NULL),
5502       size_func_(NULL) {}
5503
5504
5505 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
5506     : heap_(heap),
5507       current_space_(FIRST_SPACE),
5508       iterator_(NULL),
5509       size_func_(size_func) {}
5510
5511
5512 SpaceIterator::~SpaceIterator() {
5513   // Delete active iterator if any.
5514   delete iterator_;
5515 }
5516
5517
5518 bool SpaceIterator::has_next() {
5519   // Iterate until no more spaces.
5520   return current_space_ != LAST_SPACE;
5521 }
5522
5523
5524 ObjectIterator* SpaceIterator::next() {
5525   if (iterator_ != NULL) {
5526     delete iterator_;
5527     iterator_ = NULL;
5528     // Move to the next space
5529     current_space_++;
5530     if (current_space_ > LAST_SPACE) {
5531       return NULL;
5532     }
5533   }
5534
5535   // Return iterator for the new current space.
5536   return CreateIterator();
5537 }
5538
5539
5540 // Create an iterator for the space to iterate.
5541 ObjectIterator* SpaceIterator::CreateIterator() {
5542   DCHECK(iterator_ == NULL);
5543
5544   switch (current_space_) {
5545     case NEW_SPACE:
5546       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
5547       break;
5548     case OLD_POINTER_SPACE:
5549       iterator_ =
5550           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
5551       break;
5552     case OLD_DATA_SPACE:
5553       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
5554       break;
5555     case CODE_SPACE:
5556       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
5557       break;
5558     case MAP_SPACE:
5559       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
5560       break;
5561     case CELL_SPACE:
5562       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
5563       break;
5564     case PROPERTY_CELL_SPACE:
5565       iterator_ =
5566           new HeapObjectIterator(heap_->property_cell_space(), size_func_);
5567       break;
5568     case LO_SPACE:
5569       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
5570       break;
5571   }
5572
5573   // Return the newly allocated iterator;
5574   DCHECK(iterator_ != NULL);
5575   return iterator_;
5576 }
5577
5578
5579 class HeapObjectsFilter {
5580  public:
5581   virtual ~HeapObjectsFilter() {}
5582   virtual bool SkipObject(HeapObject* object) = 0;
5583 };
5584
5585
5586 class UnreachableObjectsFilter : public HeapObjectsFilter {
5587  public:
5588   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
5589     MarkReachableObjects();
5590   }
5591
5592   ~UnreachableObjectsFilter() {
5593     heap_->mark_compact_collector()->ClearMarkbits();
5594   }
5595
5596   bool SkipObject(HeapObject* object) {
5597     MarkBit mark_bit = Marking::MarkBitFrom(object);
5598     return !mark_bit.Get();
5599   }
5600
5601  private:
5602   class MarkingVisitor : public ObjectVisitor {
5603    public:
5604     MarkingVisitor() : marking_stack_(10) {}
5605
5606     void VisitPointers(Object** start, Object** end) {
5607       for (Object** p = start; p < end; p++) {
5608         if (!(*p)->IsHeapObject()) continue;
5609         HeapObject* obj = HeapObject::cast(*p);
5610         MarkBit mark_bit = Marking::MarkBitFrom(obj);
5611         if (!mark_bit.Get()) {
5612           mark_bit.Set();
5613           marking_stack_.Add(obj);
5614         }
5615       }
5616     }
5617
5618     void TransitiveClosure() {
5619       while (!marking_stack_.is_empty()) {
5620         HeapObject* obj = marking_stack_.RemoveLast();
5621         obj->Iterate(this);
5622       }
5623     }
5624
5625    private:
5626     List<HeapObject*> marking_stack_;
5627   };
5628
5629   void MarkReachableObjects() {
5630     MarkingVisitor visitor;
5631     heap_->IterateRoots(&visitor, VISIT_ALL);
5632     visitor.TransitiveClosure();
5633   }
5634
5635   Heap* heap_;
5636   DisallowHeapAllocation no_allocation_;
5637 };
5638
5639
5640 HeapIterator::HeapIterator(Heap* heap)
5641     : make_heap_iterable_helper_(heap),
5642       no_heap_allocation_(),
5643       heap_(heap),
5644       filtering_(HeapIterator::kNoFiltering),
5645       filter_(NULL) {
5646   Init();
5647 }
5648
5649
5650 HeapIterator::HeapIterator(Heap* heap,
5651                            HeapIterator::HeapObjectsFiltering filtering)
5652     : make_heap_iterable_helper_(heap),
5653       no_heap_allocation_(),
5654       heap_(heap),
5655       filtering_(filtering),
5656       filter_(NULL) {
5657   Init();
5658 }
5659
5660
5661 HeapIterator::~HeapIterator() { Shutdown(); }
5662
5663
5664 void HeapIterator::Init() {
5665   // Start the iteration.
5666   space_iterator_ = new SpaceIterator(heap_);
5667   switch (filtering_) {
5668     case kFilterUnreachable:
5669       filter_ = new UnreachableObjectsFilter(heap_);
5670       break;
5671     default:
5672       break;
5673   }
5674   object_iterator_ = space_iterator_->next();
5675 }
5676
5677
5678 void HeapIterator::Shutdown() {
5679 #ifdef DEBUG
5680   // Assert that in filtering mode we have iterated through all
5681   // objects. Otherwise, heap will be left in an inconsistent state.
5682   if (filtering_ != kNoFiltering) {
5683     DCHECK(object_iterator_ == NULL);
5684   }
5685 #endif
5686   // Make sure the last iterator is deallocated.
5687   delete space_iterator_;
5688   space_iterator_ = NULL;
5689   object_iterator_ = NULL;
5690   delete filter_;
5691   filter_ = NULL;
5692 }
5693
5694
5695 HeapObject* HeapIterator::next() {
5696   if (filter_ == NULL) return NextObject();
5697
5698   HeapObject* obj = NextObject();
5699   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
5700   return obj;
5701 }
5702
5703
5704 HeapObject* HeapIterator::NextObject() {
5705   // No iterator means we are done.
5706   if (object_iterator_ == NULL) return NULL;
5707
5708   if (HeapObject* obj = object_iterator_->next_object()) {
5709     // If the current iterator has more objects we are fine.
5710     return obj;
5711   } else {
5712     // Go though the spaces looking for one that has objects.
5713     while (space_iterator_->has_next()) {
5714       object_iterator_ = space_iterator_->next();
5715       if (HeapObject* obj = object_iterator_->next_object()) {
5716         return obj;
5717       }
5718     }
5719   }
5720   // Done with the last space.
5721   object_iterator_ = NULL;
5722   return NULL;
5723 }
5724
5725
5726 void HeapIterator::reset() {
5727   // Restart the iterator.
5728   Shutdown();
5729   Init();
5730 }
5731
5732
5733 #ifdef DEBUG
5734
5735 Object* const PathTracer::kAnyGlobalObject = NULL;
5736
5737 class PathTracer::MarkVisitor : public ObjectVisitor {
5738  public:
5739   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5740   void VisitPointers(Object** start, Object** end) {
5741     // Scan all HeapObject pointers in [start, end)
5742     for (Object** p = start; !tracer_->found() && (p < end); p++) {
5743       if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
5744     }
5745   }
5746
5747  private:
5748   PathTracer* tracer_;
5749 };
5750
5751
5752 class PathTracer::UnmarkVisitor : public ObjectVisitor {
5753  public:
5754   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
5755   void VisitPointers(Object** start, Object** end) {
5756     // Scan all HeapObject pointers in [start, end)
5757     for (Object** p = start; p < end; p++) {
5758       if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
5759     }
5760   }
5761
5762  private:
5763   PathTracer* tracer_;
5764 };
5765
5766
5767 void PathTracer::VisitPointers(Object** start, Object** end) {
5768   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
5769   // Visit all HeapObject pointers in [start, end)
5770   for (Object** p = start; !done && (p < end); p++) {
5771     if ((*p)->IsHeapObject()) {
5772       TracePathFrom(p);
5773       done = ((what_to_find_ == FIND_FIRST) && found_target_);
5774     }
5775   }
5776 }
5777
5778
5779 void PathTracer::Reset() {
5780   found_target_ = false;
5781   object_stack_.Clear();
5782 }
5783
5784
5785 void PathTracer::TracePathFrom(Object** root) {
5786   DCHECK((search_target_ == kAnyGlobalObject) ||
5787          search_target_->IsHeapObject());
5788   found_target_in_trace_ = false;
5789   Reset();
5790
5791   MarkVisitor mark_visitor(this);
5792   MarkRecursively(root, &mark_visitor);
5793
5794   UnmarkVisitor unmark_visitor(this);
5795   UnmarkRecursively(root, &unmark_visitor);
5796
5797   ProcessResults();
5798 }
5799
5800
5801 static bool SafeIsNativeContext(HeapObject* obj) {
5802   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
5803 }
5804
5805
5806 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5807   if (!(*p)->IsHeapObject()) return;
5808
5809   HeapObject* obj = HeapObject::cast(*p);
5810
5811   MapWord map_word = obj->map_word();
5812   if (!map_word.ToMap()->IsHeapObject()) return;  // visited before
5813
5814   if (found_target_in_trace_) return;  // stop if target found
5815   object_stack_.Add(obj);
5816   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5817       (obj == search_target_)) {
5818     found_target_in_trace_ = true;
5819     found_target_ = true;
5820     return;
5821   }
5822
5823   bool is_native_context = SafeIsNativeContext(obj);
5824
5825   // not visited yet
5826   Map* map = Map::cast(map_word.ToMap());
5827
5828   MapWord marked_map_word =
5829       MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
5830   obj->set_map_word(marked_map_word);
5831
5832   // Scan the object body.
5833   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
5834     // This is specialized to scan Context's properly.
5835     Object** start =
5836         reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
5837     Object** end =
5838         reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
5839                                    Context::FIRST_WEAK_SLOT * kPointerSize);
5840     mark_visitor->VisitPointers(start, end);
5841   } else {
5842     obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
5843   }
5844
5845   // Scan the map after the body because the body is a lot more interesting
5846   // when doing leak detection.
5847   MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
5848
5849   if (!found_target_in_trace_) {  // don't pop if found the target
5850     object_stack_.RemoveLast();
5851   }
5852 }
5853
5854
5855 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
5856   if (!(*p)->IsHeapObject()) return;
5857
5858   HeapObject* obj = HeapObject::cast(*p);
5859
5860   MapWord map_word = obj->map_word();
5861   if (map_word.ToMap()->IsHeapObject()) return;  // unmarked already
5862
5863   MapWord unmarked_map_word =
5864       MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
5865   obj->set_map_word(unmarked_map_word);
5866
5867   Map* map = Map::cast(unmarked_map_word.ToMap());
5868
5869   UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
5870
5871   obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
5872 }
5873
5874
5875 void PathTracer::ProcessResults() {
5876   if (found_target_) {
5877     OFStream os(stdout);
5878     os << "=====================================\n"
5879        << "====        Path to object       ====\n"
5880        << "=====================================\n\n";
5881
5882     DCHECK(!object_stack_.is_empty());
5883     for (int i = 0; i < object_stack_.length(); i++) {
5884       if (i > 0) os << "\n     |\n     |\n     V\n\n";
5885       object_stack_[i]->Print(os);
5886     }
5887     os << "=====================================\n";
5888   }
5889 }
5890
5891
5892 // Triggers a depth-first traversal of reachable objects from one
5893 // given root object and finds a path to a specific heap object and
5894 // prints it.
5895 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
5896   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5897   tracer.VisitPointer(&root);
5898 }
5899
5900
5901 // Triggers a depth-first traversal of reachable objects from roots
5902 // and finds a path to a specific heap object and prints it.
5903 void Heap::TracePathToObject(Object* target) {
5904   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
5905   IterateRoots(&tracer, VISIT_ONLY_STRONG);
5906 }
5907
5908
5909 // Triggers a depth-first traversal of reachable objects from roots
5910 // and finds a path to any global object and prints it. Useful for
5911 // determining the source for leaks of global objects.
5912 void Heap::TracePathToGlobal() {
5913   PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
5914                     VISIT_ALL);
5915   IterateRoots(&tracer, VISIT_ONLY_STRONG);
5916 }
5917 #endif
5918
5919
5920 void Heap::UpdateCumulativeGCStatistics(double duration,
5921                                         double spent_in_mutator,
5922                                         double marking_time) {
5923   if (FLAG_print_cumulative_gc_stat) {
5924     total_gc_time_ms_ += duration;
5925     max_gc_pause_ = Max(max_gc_pause_, duration);
5926     max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
5927     min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
5928   } else if (FLAG_trace_gc_verbose) {
5929     total_gc_time_ms_ += duration;
5930   }
5931
5932   marking_time_ += marking_time;
5933 }
5934
5935
5936 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
5937   DisallowHeapAllocation no_gc;
5938   // Uses only lower 32 bits if pointers are larger.
5939   uintptr_t addr_hash =
5940       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
5941   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
5942 }
5943
5944
5945 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
5946   DisallowHeapAllocation no_gc;
5947   int index = (Hash(map, name) & kHashMask);
5948   for (int i = 0; i < kEntriesPerBucket; i++) {
5949     Key& key = keys_[index + i];
5950     if ((key.map == *map) && key.name->Equals(*name)) {
5951       return field_offsets_[index + i];
5952     }
5953   }
5954   return kNotFound;
5955 }
5956
5957
5958 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
5959                               int field_offset) {
5960   DisallowHeapAllocation no_gc;
5961   if (!name->IsUniqueName()) {
5962     if (!StringTable::InternalizeStringIfExists(
5963              name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
5964       return;
5965     }
5966   }
5967   // This cache is cleared only between mark compact passes, so we expect the
5968   // cache to only contain old space names.
5969   DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
5970
5971   int index = (Hash(map, name) & kHashMask);
5972   // After a GC there will be free slots, so we use them in order (this may
5973   // help to get the most frequently used one in position 0).
5974   for (int i = 0; i < kEntriesPerBucket; i++) {
5975     Key& key = keys_[index];
5976     Object* free_entry_indicator = NULL;
5977     if (key.map == free_entry_indicator) {
5978       key.map = *map;
5979       key.name = *name;
5980       field_offsets_[index + i] = field_offset;
5981       return;
5982     }
5983   }
5984   // No free entry found in this bucket, so we move them all down one and
5985   // put the new entry at position zero.
5986   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
5987     Key& key = keys_[index + i];
5988     Key& key2 = keys_[index + i - 1];
5989     key = key2;
5990     field_offsets_[index + i] = field_offsets_[index + i - 1];
5991   }
5992
5993   // Write the new first entry.
5994   Key& key = keys_[index];
5995   key.map = *map;
5996   key.name = *name;
5997   field_offsets_[index] = field_offset;
5998 }
5999
6000
6001 void KeyedLookupCache::Clear() {
6002   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
6003 }
6004
6005
6006 void DescriptorLookupCache::Clear() {
6007   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
6008 }
6009
6010
6011 void ExternalStringTable::CleanUp() {
6012   int last = 0;
6013   for (int i = 0; i < new_space_strings_.length(); ++i) {
6014     if (new_space_strings_[i] == heap_->the_hole_value()) {
6015       continue;
6016     }
6017     DCHECK(new_space_strings_[i]->IsExternalString());
6018     if (heap_->InNewSpace(new_space_strings_[i])) {
6019       new_space_strings_[last++] = new_space_strings_[i];
6020     } else {
6021       old_space_strings_.Add(new_space_strings_[i]);
6022     }
6023   }
6024   new_space_strings_.Rewind(last);
6025   new_space_strings_.Trim();
6026
6027   last = 0;
6028   for (int i = 0; i < old_space_strings_.length(); ++i) {
6029     if (old_space_strings_[i] == heap_->the_hole_value()) {
6030       continue;
6031     }
6032     DCHECK(old_space_strings_[i]->IsExternalString());
6033     DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
6034     old_space_strings_[last++] = old_space_strings_[i];
6035   }
6036   old_space_strings_.Rewind(last);
6037   old_space_strings_.Trim();
6038 #ifdef VERIFY_HEAP
6039   if (FLAG_verify_heap) {
6040     Verify();
6041   }
6042 #endif
6043 }
6044
6045
6046 void ExternalStringTable::TearDown() {
6047   for (int i = 0; i < new_space_strings_.length(); ++i) {
6048     heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
6049   }
6050   new_space_strings_.Free();
6051   for (int i = 0; i < old_space_strings_.length(); ++i) {
6052     heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
6053   }
6054   old_space_strings_.Free();
6055 }
6056
6057
6058 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6059   chunk->set_next_chunk(chunks_queued_for_free_);
6060   chunks_queued_for_free_ = chunk;
6061 }
6062
6063
6064 void Heap::FreeQueuedChunks() {
6065   if (chunks_queued_for_free_ == NULL) return;
6066   MemoryChunk* next;
6067   MemoryChunk* chunk;
6068   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6069     next = chunk->next_chunk();
6070     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6071
6072     if (chunk->owner()->identity() == LO_SPACE) {
6073       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6074       // If FromAnyPointerAddress encounters a slot that belongs to a large
6075       // chunk queued for deletion it will fail to find the chunk because
6076       // it try to perform a search in the list of pages owned by of the large
6077       // object space and queued chunks were detached from that list.
6078       // To work around this we split large chunk into normal kPageSize aligned
6079       // pieces and initialize size, owner and flags field of every piece.
6080       // If FromAnyPointerAddress encounters a slot that belongs to one of
6081       // these smaller pieces it will treat it as a slot on a normal Page.
6082       Address chunk_end = chunk->address() + chunk->size();
6083       MemoryChunk* inner =
6084           MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
6085       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
6086       while (inner <= inner_last) {
6087         // Size of a large chunk is always a multiple of
6088         // OS::AllocateAlignment() so there is always
6089         // enough space for a fake MemoryChunk header.
6090         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
6091         // Guard against overflow.
6092         if (area_end < inner->address()) area_end = chunk_end;
6093         inner->SetArea(inner->address(), area_end);
6094         inner->set_size(Page::kPageSize);
6095         inner->set_owner(lo_space());
6096         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6097         inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
6098       }
6099     }
6100   }
6101   isolate_->heap()->store_buffer()->Compact();
6102   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6103   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6104     next = chunk->next_chunk();
6105     isolate_->memory_allocator()->Free(chunk);
6106   }
6107   chunks_queued_for_free_ = NULL;
6108 }
6109
6110
6111 void Heap::RememberUnmappedPage(Address page, bool compacted) {
6112   uintptr_t p = reinterpret_cast<uintptr_t>(page);
6113   // Tag the page pointer to make it findable in the dump file.
6114   if (compacted) {
6115     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
6116   } else {
6117     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
6118   }
6119   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
6120       reinterpret_cast<Address>(p);
6121   remembered_unmapped_pages_index_++;
6122   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
6123 }
6124
6125
6126 void Heap::ClearObjectStats(bool clear_last_time_stats) {
6127   memset(object_counts_, 0, sizeof(object_counts_));
6128   memset(object_sizes_, 0, sizeof(object_sizes_));
6129   if (clear_last_time_stats) {
6130     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
6131     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
6132   }
6133 }
6134
6135
6136 static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
6137
6138
6139 void Heap::CheckpointObjectStats() {
6140   base::LockGuard<base::Mutex> lock_guard(
6141       checkpoint_object_stats_mutex.Pointer());
6142   Counters* counters = isolate()->counters();
6143 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)              \
6144   counters->count_of_##name()->Increment(                \
6145       static_cast<int>(object_counts_[name]));           \
6146   counters->count_of_##name()->Decrement(                \
6147       static_cast<int>(object_counts_last_time_[name])); \
6148   counters->size_of_##name()->Increment(                 \
6149       static_cast<int>(object_sizes_[name]));            \
6150   counters->size_of_##name()->Decrement(                 \
6151       static_cast<int>(object_sizes_last_time_[name]));
6152   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6153 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6154   int index;
6155 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
6156   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
6157   counters->count_of_CODE_TYPE_##name()->Increment(       \
6158       static_cast<int>(object_counts_[index]));           \
6159   counters->count_of_CODE_TYPE_##name()->Decrement(       \
6160       static_cast<int>(object_counts_last_time_[index])); \
6161   counters->size_of_CODE_TYPE_##name()->Increment(        \
6162       static_cast<int>(object_sizes_[index]));            \
6163   counters->size_of_CODE_TYPE_##name()->Decrement(        \
6164       static_cast<int>(object_sizes_last_time_[index]));
6165   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6166 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6167 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
6168   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
6169   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
6170       static_cast<int>(object_counts_[index]));           \
6171   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
6172       static_cast<int>(object_counts_last_time_[index])); \
6173   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
6174       static_cast<int>(object_sizes_[index]));            \
6175   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
6176       static_cast<int>(object_sizes_last_time_[index]));
6177   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
6178 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6179 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
6180   index =                                                                     \
6181       FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
6182   counters->count_of_CODE_AGE_##name()->Increment(                            \
6183       static_cast<int>(object_counts_[index]));                               \
6184   counters->count_of_CODE_AGE_##name()->Decrement(                            \
6185       static_cast<int>(object_counts_last_time_[index]));                     \
6186   counters->size_of_CODE_AGE_##name()->Increment(                             \
6187       static_cast<int>(object_sizes_[index]));                                \
6188   counters->size_of_CODE_AGE_##name()->Decrement(                             \
6189       static_cast<int>(object_sizes_last_time_[index]));
6190   CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
6191 #undef ADJUST_LAST_TIME_OBJECT_COUNT
6192
6193   MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
6194   MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
6195   ClearObjectStats();
6196 }
6197 }
6198 }  // namespace v8::internal