Upstream version 5.34.104.0
[platform/framework/web/crosswalk.git] / src / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
36 #include "debug.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "isolate-inl.h"
42 #include "mark-compact.h"
43 #include "natives.h"
44 #include "objects-visiting.h"
45 #include "objects-visiting-inl.h"
46 #include "once.h"
47 #include "runtime-profiler.h"
48 #include "scopeinfo.h"
49 #include "snapshot.h"
50 #include "store-buffer.h"
51 #include "utils/random-number-generator.h"
52 #include "v8conversions.h"
53 #include "v8threads.h"
54 #include "v8utils.h"
55 #include "vm-state-inl.h"
56 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "arm/regexp-macro-assembler-arm.h"
59 #endif
60 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
61 #include "regexp-macro-assembler.h"
62 #include "mips/regexp-macro-assembler-mips.h"
63 #endif
64
65 namespace v8 {
66 namespace internal {
67
68
69 Heap::Heap()
70     : isolate_(NULL),
71       code_range_size_(kIs64BitArch ? 512 * MB : 0),
72 // semispace_size_ should be a power of 2 and old_generation_size_ should be
73 // a multiple of Page::kPageSize.
74       reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
75       max_semispace_size_(8 * (kPointerSize / 4)  * MB),
76       initial_semispace_size_(Page::kPageSize),
77       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
78       max_executable_size_(256ul * (kPointerSize / 4) * MB),
79 // Variables set based on semispace_size_ and old_generation_size_ in
80 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
81 // Will be 4 * reserved_semispace_size_ to ensure that young
82 // generation can be aligned to its size.
83       maximum_committed_(0),
84       survived_since_last_expansion_(0),
85       sweep_generation_(0),
86       always_allocate_scope_depth_(0),
87       linear_allocation_scope_depth_(0),
88       contexts_disposed_(0),
89       global_ic_age_(0),
90       flush_monomorphic_ics_(false),
91       scan_on_scavenge_pages_(0),
92       new_space_(this),
93       old_pointer_space_(NULL),
94       old_data_space_(NULL),
95       code_space_(NULL),
96       map_space_(NULL),
97       cell_space_(NULL),
98       property_cell_space_(NULL),
99       lo_space_(NULL),
100       gc_state_(NOT_IN_GC),
101       gc_post_processing_depth_(0),
102       ms_count_(0),
103       gc_count_(0),
104       remembered_unmapped_pages_index_(0),
105       unflattened_strings_length_(0),
106 #ifdef DEBUG
107       allocation_timeout_(0),
108       disallow_allocation_failure_(false),
109 #endif  // DEBUG
110       new_space_high_promotion_mode_active_(false),
111       old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
112       size_of_old_gen_at_last_old_space_gc_(0),
113       external_allocation_limit_(0),
114       amount_of_external_allocated_memory_(0),
115       amount_of_external_allocated_memory_at_last_global_gc_(0),
116       old_gen_exhausted_(false),
117       inline_allocation_disabled_(false),
118       store_buffer_rebuilder_(store_buffer()),
119       hidden_string_(NULL),
120       gc_safe_size_of_old_object_(NULL),
121       total_regexp_code_generated_(0),
122       tracer_(NULL),
123       young_survivors_after_last_gc_(0),
124       high_survival_rate_period_length_(0),
125       low_survival_rate_period_length_(0),
126       survival_rate_(0),
127       previous_survival_rate_trend_(Heap::STABLE),
128       survival_rate_trend_(Heap::STABLE),
129       max_gc_pause_(0.0),
130       total_gc_time_ms_(0.0),
131       max_alive_after_gc_(0),
132       min_in_mutator_(kMaxInt),
133       alive_after_last_gc_(0),
134       last_gc_end_timestamp_(0.0),
135       marking_time_(0.0),
136       sweeping_time_(0.0),
137       mark_compact_collector_(this),
138       store_buffer_(this),
139       marking_(this),
140       incremental_marking_(this),
141       number_idle_notifications_(0),
142       last_idle_notification_gc_count_(0),
143       last_idle_notification_gc_count_init_(false),
144       mark_sweeps_since_idle_round_started_(0),
145       gc_count_at_last_idle_gc_(0),
146       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
147       full_codegen_bytes_generated_(0),
148       crankshaft_codegen_bytes_generated_(0),
149       gcs_since_last_deopt_(0),
150 #ifdef VERIFY_HEAP
151       no_weak_object_verification_scope_depth_(0),
152 #endif
153       allocation_sites_scratchpad_length_(0),
154       promotion_queue_(this),
155       configured_(false),
156       external_string_table_(this),
157       chunks_queued_for_free_(NULL),
158       relocation_mutex_(NULL) {
159   // Allow build-time customization of the max semispace size. Building
160   // V8 with snapshots and a non-default max semispace size is much
161   // easier if you can define it as part of the build environment.
162 #if defined(V8_MAX_SEMISPACE_SIZE)
163   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
164 #endif
165
166   // Ensure old_generation_size_ is a multiple of kPageSize.
167   ASSERT(MB >= Page::kPageSize);
168
169   intptr_t max_virtual = OS::MaxVirtualMemory();
170
171   if (max_virtual > 0) {
172     if (code_range_size_ > 0) {
173       // Reserve no more than 1/8 of the memory for the code range.
174       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
175     }
176   }
177
178   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
179   native_contexts_list_ = NULL;
180   array_buffers_list_ = Smi::FromInt(0);
181   allocation_sites_list_ = Smi::FromInt(0);
182   // Put a dummy entry in the remembered pages so we can find the list the
183   // minidump even if there are no real unmapped pages.
184   RememberUnmappedPage(NULL, false);
185
186   ClearObjectStats(true);
187 }
188
189
190 intptr_t Heap::Capacity() {
191   if (!HasBeenSetUp()) return 0;
192
193   return new_space_.Capacity() +
194       old_pointer_space_->Capacity() +
195       old_data_space_->Capacity() +
196       code_space_->Capacity() +
197       map_space_->Capacity() +
198       cell_space_->Capacity() +
199       property_cell_space_->Capacity();
200 }
201
202
203 intptr_t Heap::CommittedMemory() {
204   if (!HasBeenSetUp()) return 0;
205
206   return new_space_.CommittedMemory() +
207       old_pointer_space_->CommittedMemory() +
208       old_data_space_->CommittedMemory() +
209       code_space_->CommittedMemory() +
210       map_space_->CommittedMemory() +
211       cell_space_->CommittedMemory() +
212       property_cell_space_->CommittedMemory() +
213       lo_space_->Size();
214 }
215
216
217 size_t Heap::CommittedPhysicalMemory() {
218   if (!HasBeenSetUp()) return 0;
219
220   return new_space_.CommittedPhysicalMemory() +
221       old_pointer_space_->CommittedPhysicalMemory() +
222       old_data_space_->CommittedPhysicalMemory() +
223       code_space_->CommittedPhysicalMemory() +
224       map_space_->CommittedPhysicalMemory() +
225       cell_space_->CommittedPhysicalMemory() +
226       property_cell_space_->CommittedPhysicalMemory() +
227       lo_space_->CommittedPhysicalMemory();
228 }
229
230
231 intptr_t Heap::CommittedMemoryExecutable() {
232   if (!HasBeenSetUp()) return 0;
233
234   return isolate()->memory_allocator()->SizeExecutable();
235 }
236
237
238 void Heap::UpdateMaximumCommitted() {
239   if (!HasBeenSetUp()) return;
240
241   intptr_t current_committed_memory = CommittedMemory();
242   if (current_committed_memory > maximum_committed_) {
243     maximum_committed_ = current_committed_memory;
244   }
245 }
246
247
248 intptr_t Heap::Available() {
249   if (!HasBeenSetUp()) return 0;
250
251   return new_space_.Available() +
252       old_pointer_space_->Available() +
253       old_data_space_->Available() +
254       code_space_->Available() +
255       map_space_->Available() +
256       cell_space_->Available() +
257       property_cell_space_->Available();
258 }
259
260
261 bool Heap::HasBeenSetUp() {
262   return old_pointer_space_ != NULL &&
263          old_data_space_ != NULL &&
264          code_space_ != NULL &&
265          map_space_ != NULL &&
266          cell_space_ != NULL &&
267          property_cell_space_ != NULL &&
268          lo_space_ != NULL;
269 }
270
271
272 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
273   if (IntrusiveMarking::IsMarked(object)) {
274     return IntrusiveMarking::SizeOfMarkedObject(object);
275   }
276   return object->SizeFromMap(object->map());
277 }
278
279
280 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
281                                               const char** reason) {
282   // Is global GC requested?
283   if (space != NEW_SPACE) {
284     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
285     *reason = "GC in old space requested";
286     return MARK_COMPACTOR;
287   }
288
289   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
290     *reason = "GC in old space forced by flags";
291     return MARK_COMPACTOR;
292   }
293
294   // Is enough data promoted to justify a global GC?
295   if (OldGenerationAllocationLimitReached()) {
296     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
297     *reason = "promotion limit reached";
298     return MARK_COMPACTOR;
299   }
300
301   // Have allocation in OLD and LO failed?
302   if (old_gen_exhausted_) {
303     isolate_->counters()->
304         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
305     *reason = "old generations exhausted";
306     return MARK_COMPACTOR;
307   }
308
309   // Is there enough space left in OLD to guarantee that a scavenge can
310   // succeed?
311   //
312   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
313   // for object promotion. It counts only the bytes that the memory
314   // allocator has not yet allocated from the OS and assigned to any space,
315   // and does not count available bytes already in the old space or code
316   // space.  Undercounting is safe---we may get an unrequested full GC when
317   // a scavenge would have succeeded.
318   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
319     isolate_->counters()->
320         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
321     *reason = "scavenge might not succeed";
322     return MARK_COMPACTOR;
323   }
324
325   // Default
326   *reason = NULL;
327   return SCAVENGER;
328 }
329
330
331 // TODO(1238405): Combine the infrastructure for --heap-stats and
332 // --log-gc to avoid the complicated preprocessor and flag testing.
333 void Heap::ReportStatisticsBeforeGC() {
334   // Heap::ReportHeapStatistics will also log NewSpace statistics when
335   // compiled --log-gc is set.  The following logic is used to avoid
336   // double logging.
337 #ifdef DEBUG
338   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
339   if (FLAG_heap_stats) {
340     ReportHeapStatistics("Before GC");
341   } else if (FLAG_log_gc) {
342     new_space_.ReportStatistics();
343   }
344   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
345 #else
346   if (FLAG_log_gc) {
347     new_space_.CollectStatistics();
348     new_space_.ReportStatistics();
349     new_space_.ClearHistograms();
350   }
351 #endif  // DEBUG
352 }
353
354
355 void Heap::PrintShortHeapStatistics() {
356   if (!FLAG_trace_gc_verbose) return;
357   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
358                ", available: %6" V8_PTR_PREFIX "d KB\n",
359            isolate_->memory_allocator()->Size() / KB,
360            isolate_->memory_allocator()->Available() / KB);
361   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
362                ", available: %6" V8_PTR_PREFIX "d KB"
363                ", committed: %6" V8_PTR_PREFIX "d KB\n",
364            new_space_.Size() / KB,
365            new_space_.Available() / KB,
366            new_space_.CommittedMemory() / KB);
367   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
368                ", available: %6" V8_PTR_PREFIX "d KB"
369                ", committed: %6" V8_PTR_PREFIX "d KB\n",
370            old_pointer_space_->SizeOfObjects() / KB,
371            old_pointer_space_->Available() / KB,
372            old_pointer_space_->CommittedMemory() / KB);
373   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
374                ", available: %6" V8_PTR_PREFIX "d KB"
375                ", committed: %6" V8_PTR_PREFIX "d KB\n",
376            old_data_space_->SizeOfObjects() / KB,
377            old_data_space_->Available() / KB,
378            old_data_space_->CommittedMemory() / KB);
379   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
380                ", available: %6" V8_PTR_PREFIX "d KB"
381                ", committed: %6" V8_PTR_PREFIX "d KB\n",
382            code_space_->SizeOfObjects() / KB,
383            code_space_->Available() / KB,
384            code_space_->CommittedMemory() / KB);
385   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
386                ", available: %6" V8_PTR_PREFIX "d KB"
387                ", committed: %6" V8_PTR_PREFIX "d KB\n",
388            map_space_->SizeOfObjects() / KB,
389            map_space_->Available() / KB,
390            map_space_->CommittedMemory() / KB);
391   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
392                ", available: %6" V8_PTR_PREFIX "d KB"
393                ", committed: %6" V8_PTR_PREFIX "d KB\n",
394            cell_space_->SizeOfObjects() / KB,
395            cell_space_->Available() / KB,
396            cell_space_->CommittedMemory() / KB);
397   PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
398                ", available: %6" V8_PTR_PREFIX "d KB"
399                ", committed: %6" V8_PTR_PREFIX "d KB\n",
400            property_cell_space_->SizeOfObjects() / KB,
401            property_cell_space_->Available() / KB,
402            property_cell_space_->CommittedMemory() / KB);
403   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
404                ", available: %6" V8_PTR_PREFIX "d KB"
405                ", committed: %6" V8_PTR_PREFIX "d KB\n",
406            lo_space_->SizeOfObjects() / KB,
407            lo_space_->Available() / KB,
408            lo_space_->CommittedMemory() / KB);
409   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
410                ", available: %6" V8_PTR_PREFIX "d KB"
411                ", committed: %6" V8_PTR_PREFIX "d KB\n",
412            this->SizeOfObjects() / KB,
413            this->Available() / KB,
414            this->CommittedMemory() / KB);
415   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
416            static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
417   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
418 }
419
420
421 // TODO(1238405): Combine the infrastructure for --heap-stats and
422 // --log-gc to avoid the complicated preprocessor and flag testing.
423 void Heap::ReportStatisticsAfterGC() {
424   // Similar to the before GC, we use some complicated logic to ensure that
425   // NewSpace statistics are logged exactly once when --log-gc is turned on.
426 #if defined(DEBUG)
427   if (FLAG_heap_stats) {
428     new_space_.CollectStatistics();
429     ReportHeapStatistics("After GC");
430   } else if (FLAG_log_gc) {
431     new_space_.ReportStatistics();
432   }
433 #else
434   if (FLAG_log_gc) new_space_.ReportStatistics();
435 #endif  // DEBUG
436 }
437
438
439 void Heap::GarbageCollectionPrologue() {
440   {  AllowHeapAllocation for_the_first_part_of_prologue;
441     ClearJSFunctionResultCaches();
442     gc_count_++;
443     unflattened_strings_length_ = 0;
444
445     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
446       mark_compact_collector()->EnableCodeFlushing(true);
447     }
448
449 #ifdef VERIFY_HEAP
450     if (FLAG_verify_heap) {
451       Verify();
452     }
453 #endif
454   }
455
456   UpdateMaximumCommitted();
457
458 #ifdef DEBUG
459   ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
460
461   if (FLAG_gc_verbose) Print();
462
463   ReportStatisticsBeforeGC();
464 #endif  // DEBUG
465
466   store_buffer()->GCPrologue();
467
468   if (isolate()->concurrent_osr_enabled()) {
469     isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
470   }
471 }
472
473
474 intptr_t Heap::SizeOfObjects() {
475   intptr_t total = 0;
476   AllSpaces spaces(this);
477   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
478     total += space->SizeOfObjects();
479   }
480   return total;
481 }
482
483
484 void Heap::ClearAllICsByKind(Code::Kind kind) {
485   HeapObjectIterator it(code_space());
486
487   for (Object* object = it.Next(); object != NULL; object = it.Next()) {
488     Code* code = Code::cast(object);
489     Code::Kind current_kind = code->kind();
490     if (current_kind == Code::FUNCTION ||
491         current_kind == Code::OPTIMIZED_FUNCTION) {
492       code->ClearInlineCaches(kind);
493     }
494   }
495 }
496
497
498 void Heap::RepairFreeListsAfterBoot() {
499   PagedSpaces spaces(this);
500   for (PagedSpace* space = spaces.next();
501        space != NULL;
502        space = spaces.next()) {
503     space->RepairFreeListsAfterBoot();
504   }
505 }
506
507
508 void Heap::ProcessPretenuringFeedback() {
509   if (FLAG_allocation_site_pretenuring) {
510     int tenure_decisions = 0;
511     int dont_tenure_decisions = 0;
512     int allocation_mementos_found = 0;
513     int allocation_sites = 0;
514     int active_allocation_sites = 0;
515
516     // If the scratchpad overflowed, we have to iterate over the allocation
517     // sites list.
518     bool use_scratchpad =
519         allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize;
520
521     int i = 0;
522     Object* list_element = allocation_sites_list();
523     bool trigger_deoptimization = false;
524     while (use_scratchpad ?
525               i < allocation_sites_scratchpad_length_ :
526               list_element->IsAllocationSite()) {
527       AllocationSite* site = use_scratchpad ?
528           AllocationSite::cast(allocation_sites_scratchpad()->get(i)) :
529           AllocationSite::cast(list_element);
530       allocation_mementos_found += site->memento_found_count();
531       if (site->memento_found_count() > 0) {
532         active_allocation_sites++;
533       }
534       if (site->DigestPretenuringFeedback()) trigger_deoptimization = true;
535       if (site->GetPretenureMode() == TENURED) {
536         tenure_decisions++;
537       } else {
538         dont_tenure_decisions++;
539       }
540       allocation_sites++;
541       if (use_scratchpad) {
542         i++;
543       } else {
544         list_element = site->weak_next();
545       }
546     }
547
548     if (trigger_deoptimization) isolate_->stack_guard()->DeoptMarkedCode();
549
550     FlushAllocationSitesScratchpad();
551
552     if (FLAG_trace_pretenuring_statistics &&
553         (allocation_mementos_found > 0 ||
554          tenure_decisions > 0 ||
555          dont_tenure_decisions > 0)) {
556       PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
557              "#mementos, #tenure decisions, #donttenure decisions) "
558              "(%s, %d, %d, %d, %d, %d)\n",
559              use_scratchpad ? "use scratchpad" : "use list",
560              allocation_sites,
561              active_allocation_sites,
562              allocation_mementos_found,
563              tenure_decisions,
564              dont_tenure_decisions);
565     }
566   }
567 }
568
569
570 void Heap::GarbageCollectionEpilogue() {
571   store_buffer()->GCEpilogue();
572
573   // In release mode, we only zap the from space under heap verification.
574   if (Heap::ShouldZapGarbage()) {
575     ZapFromSpace();
576   }
577
578 #ifdef VERIFY_HEAP
579   if (FLAG_verify_heap) {
580     Verify();
581   }
582 #endif
583
584   AllowHeapAllocation for_the_rest_of_the_epilogue;
585
586 #ifdef DEBUG
587   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
588   if (FLAG_print_handles) PrintHandles();
589   if (FLAG_gc_verbose) Print();
590   if (FLAG_code_stats) ReportCodeStatistics("After GC");
591 #endif
592   if (FLAG_deopt_every_n_garbage_collections > 0) {
593     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
594       Deoptimizer::DeoptimizeAll(isolate());
595       gcs_since_last_deopt_ = 0;
596     }
597   }
598
599   UpdateMaximumCommitted();
600
601   isolate_->counters()->alive_after_last_gc()->Set(
602       static_cast<int>(SizeOfObjects()));
603
604   isolate_->counters()->string_table_capacity()->Set(
605       string_table()->Capacity());
606   isolate_->counters()->number_of_symbols()->Set(
607       string_table()->NumberOfElements());
608
609   if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
610     isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
611         static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
612             (crankshaft_codegen_bytes_generated_
613             + full_codegen_bytes_generated_)));
614   }
615
616   if (CommittedMemory() > 0) {
617     isolate_->counters()->external_fragmentation_total()->AddSample(
618         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
619
620     isolate_->counters()->heap_fraction_new_space()->
621         AddSample(static_cast<int>(
622             (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
623     isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
624         static_cast<int>(
625             (old_pointer_space()->CommittedMemory() * 100.0) /
626             CommittedMemory()));
627     isolate_->counters()->heap_fraction_old_data_space()->AddSample(
628         static_cast<int>(
629             (old_data_space()->CommittedMemory() * 100.0) /
630             CommittedMemory()));
631     isolate_->counters()->heap_fraction_code_space()->
632         AddSample(static_cast<int>(
633             (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
634     isolate_->counters()->heap_fraction_map_space()->AddSample(
635         static_cast<int>(
636             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
637     isolate_->counters()->heap_fraction_cell_space()->AddSample(
638         static_cast<int>(
639             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
640     isolate_->counters()->heap_fraction_property_cell_space()->
641         AddSample(static_cast<int>(
642             (property_cell_space()->CommittedMemory() * 100.0) /
643             CommittedMemory()));
644     isolate_->counters()->heap_fraction_lo_space()->
645         AddSample(static_cast<int>(
646             (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
647
648     isolate_->counters()->heap_sample_total_committed()->AddSample(
649         static_cast<int>(CommittedMemory() / KB));
650     isolate_->counters()->heap_sample_total_used()->AddSample(
651         static_cast<int>(SizeOfObjects() / KB));
652     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
653         static_cast<int>(map_space()->CommittedMemory() / KB));
654     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
655         static_cast<int>(cell_space()->CommittedMemory() / KB));
656     isolate_->counters()->
657         heap_sample_property_cell_space_committed()->
658             AddSample(static_cast<int>(
659                 property_cell_space()->CommittedMemory() / KB));
660     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
661         static_cast<int>(code_space()->CommittedMemory() / KB));
662
663     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
664         static_cast<int>(MaximumCommittedMemory() / KB));
665   }
666
667 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
668   isolate_->counters()->space##_bytes_available()->Set(                        \
669       static_cast<int>(space()->Available()));                                 \
670   isolate_->counters()->space##_bytes_committed()->Set(                        \
671       static_cast<int>(space()->CommittedMemory()));                           \
672   isolate_->counters()->space##_bytes_used()->Set(                             \
673       static_cast<int>(space()->SizeOfObjects()));
674 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
675   if (space()->CommittedMemory() > 0) {                                        \
676     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
677         static_cast<int>(100 -                                                 \
678             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
679   }
680 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
681   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
682   UPDATE_FRAGMENTATION_FOR_SPACE(space)
683
684   UPDATE_COUNTERS_FOR_SPACE(new_space)
685   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
686   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
687   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
688   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
689   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
690   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
691   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
692 #undef UPDATE_COUNTERS_FOR_SPACE
693 #undef UPDATE_FRAGMENTATION_FOR_SPACE
694 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
695
696 #if defined(DEBUG)
697   ReportStatisticsAfterGC();
698 #endif  // DEBUG
699 #ifdef ENABLE_DEBUGGER_SUPPORT
700   isolate_->debug()->AfterGarbageCollection();
701 #endif  // ENABLE_DEBUGGER_SUPPORT
702 }
703
704
705 void Heap::CollectAllGarbage(int flags,
706                              const char* gc_reason,
707                              const v8::GCCallbackFlags gc_callback_flags) {
708   // Since we are ignoring the return value, the exact choice of space does
709   // not matter, so long as we do not specify NEW_SPACE, which would not
710   // cause a full GC.
711   mark_compact_collector_.SetFlags(flags);
712   CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
713   mark_compact_collector_.SetFlags(kNoGCFlags);
714 }
715
716
717 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
718   // Since we are ignoring the return value, the exact choice of space does
719   // not matter, so long as we do not specify NEW_SPACE, which would not
720   // cause a full GC.
721   // Major GC would invoke weak handle callbacks on weakly reachable
722   // handles, but won't collect weakly reachable objects until next
723   // major GC.  Therefore if we collect aggressively and weak handle callback
724   // has been invoked, we rerun major GC to release objects which become
725   // garbage.
726   // Note: as weak callbacks can execute arbitrary code, we cannot
727   // hope that eventually there will be no weak callbacks invocations.
728   // Therefore stop recollecting after several attempts.
729   if (isolate()->concurrent_recompilation_enabled()) {
730     // The optimizing compiler may be unnecessarily holding on to memory.
731     DisallowHeapAllocation no_recursive_gc;
732     isolate()->optimizing_compiler_thread()->Flush();
733   }
734   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
735                                      kReduceMemoryFootprintMask);
736   isolate_->compilation_cache()->Clear();
737   const int kMaxNumberOfAttempts = 7;
738   const int kMinNumberOfAttempts = 2;
739   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
740     if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
741         attempt + 1 >= kMinNumberOfAttempts) {
742       break;
743     }
744   }
745   mark_compact_collector()->SetFlags(kNoGCFlags);
746   new_space_.Shrink();
747   UncommitFromSpace();
748   incremental_marking()->UncommitMarkingDeque();
749 }
750
751
752 bool Heap::CollectGarbage(GarbageCollector collector,
753                           const char* gc_reason,
754                           const char* collector_reason,
755                           const v8::GCCallbackFlags gc_callback_flags) {
756   // The VM is in the GC state until exiting this function.
757   VMState<GC> state(isolate_);
758
759 #ifdef DEBUG
760   // Reset the allocation timeout to the GC interval, but make sure to
761   // allow at least a few allocations after a collection. The reason
762   // for this is that we have a lot of allocation sequences and we
763   // assume that a garbage collection will allow the subsequent
764   // allocation attempts to go through.
765   allocation_timeout_ = Max(6, FLAG_gc_interval);
766 #endif
767
768   // There may be an allocation memento behind every object in new space.
769   // If we evacuate a not full new space or if we are on the last page of
770   // the new space, then there may be uninitialized memory behind the top
771   // pointer of the new space page. We store a filler object there to
772   // identify the unused space.
773   Address from_top = new_space_.top();
774   Address from_limit = new_space_.limit();
775   if (from_top < from_limit) {
776     int remaining_in_page = static_cast<int>(from_limit - from_top);
777     CreateFillerObjectAt(from_top, remaining_in_page);
778   }
779
780   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
781     if (FLAG_trace_incremental_marking) {
782       PrintF("[IncrementalMarking] Scavenge during marking.\n");
783     }
784   }
785
786   if (collector == MARK_COMPACTOR &&
787       !mark_compact_collector()->abort_incremental_marking() &&
788       !incremental_marking()->IsStopped() &&
789       !incremental_marking()->should_hurry() &&
790       FLAG_incremental_marking_steps) {
791     // Make progress in incremental marking.
792     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
793     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
794                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
795     if (!incremental_marking()->IsComplete()) {
796       if (FLAG_trace_incremental_marking) {
797         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
798       }
799       collector = SCAVENGER;
800       collector_reason = "incremental marking delaying mark-sweep";
801     }
802   }
803
804   bool next_gc_likely_to_collect_more = false;
805
806   { GCTracer tracer(this, gc_reason, collector_reason);
807     ASSERT(AllowHeapAllocation::IsAllowed());
808     DisallowHeapAllocation no_allocation_during_gc;
809     GarbageCollectionPrologue();
810     // The GC count was incremented in the prologue.  Tell the tracer about
811     // it.
812     tracer.set_gc_count(gc_count_);
813
814     // Tell the tracer which collector we've selected.
815     tracer.set_collector(collector);
816
817     {
818       HistogramTimerScope histogram_timer_scope(
819           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
820                                    : isolate_->counters()->gc_compactor());
821       next_gc_likely_to_collect_more =
822           PerformGarbageCollection(collector, &tracer, gc_callback_flags);
823     }
824
825     GarbageCollectionEpilogue();
826   }
827
828   // Start incremental marking for the next cycle. The heap snapshot
829   // generator needs incremental marking to stay off after it aborted.
830   if (!mark_compact_collector()->abort_incremental_marking() &&
831       incremental_marking()->IsStopped() &&
832       incremental_marking()->WorthActivating() &&
833       NextGCIsLikelyToBeFull()) {
834     incremental_marking()->Start();
835   }
836
837   return next_gc_likely_to_collect_more;
838 }
839
840
841 int Heap::NotifyContextDisposed() {
842   if (isolate()->concurrent_recompilation_enabled()) {
843     // Flush the queued recompilation tasks.
844     isolate()->optimizing_compiler_thread()->Flush();
845   }
846   flush_monomorphic_ics_ = true;
847   AgeInlineCaches();
848   return ++contexts_disposed_;
849 }
850
851
852 void Heap::PerformScavenge() {
853   GCTracer tracer(this, NULL, NULL);
854   if (incremental_marking()->IsStopped()) {
855     PerformGarbageCollection(SCAVENGER, &tracer);
856   } else {
857     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
858   }
859 }
860
861
862 void Heap::MoveElements(FixedArray* array,
863                         int dst_index,
864                         int src_index,
865                         int len) {
866   if (len == 0) return;
867
868   ASSERT(array->map() != fixed_cow_array_map());
869   Object** dst_objects = array->data_start() + dst_index;
870   OS::MemMove(dst_objects,
871               array->data_start() + src_index,
872               len * kPointerSize);
873   if (!InNewSpace(array)) {
874     for (int i = 0; i < len; i++) {
875       // TODO(hpayer): check store buffer for entries
876       if (InNewSpace(dst_objects[i])) {
877         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
878       }
879     }
880   }
881   incremental_marking()->RecordWrites(array);
882 }
883
884
885 #ifdef VERIFY_HEAP
886 // Helper class for verifying the string table.
887 class StringTableVerifier : public ObjectVisitor {
888  public:
889   void VisitPointers(Object** start, Object** end) {
890     // Visit all HeapObject pointers in [start, end).
891     for (Object** p = start; p < end; p++) {
892       if ((*p)->IsHeapObject()) {
893         // Check that the string is actually internalized.
894         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
895               (*p)->IsInternalizedString());
896       }
897     }
898   }
899 };
900
901
902 static void VerifyStringTable(Heap* heap) {
903   StringTableVerifier verifier;
904   heap->string_table()->IterateElements(&verifier);
905 }
906 #endif  // VERIFY_HEAP
907
908
909 static bool AbortIncrementalMarkingAndCollectGarbage(
910     Heap* heap,
911     AllocationSpace space,
912     const char* gc_reason = NULL) {
913   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
914   bool result = heap->CollectGarbage(space, gc_reason);
915   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
916   return result;
917 }
918
919
920 void Heap::ReserveSpace(int *sizes, Address *locations_out) {
921   bool gc_performed = true;
922   int counter = 0;
923   static const int kThreshold = 20;
924   while (gc_performed && counter++ < kThreshold) {
925     gc_performed = false;
926     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
927     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
928       if (sizes[space] != 0) {
929         MaybeObject* allocation;
930         if (space == NEW_SPACE) {
931           allocation = new_space()->AllocateRaw(sizes[space]);
932         } else {
933           allocation = paged_space(space)->AllocateRaw(sizes[space]);
934         }
935         FreeListNode* node;
936         if (!allocation->To<FreeListNode>(&node)) {
937           if (space == NEW_SPACE) {
938             Heap::CollectGarbage(NEW_SPACE,
939                                  "failed to reserve space in the new space");
940           } else {
941             AbortIncrementalMarkingAndCollectGarbage(
942                 this,
943                 static_cast<AllocationSpace>(space),
944                 "failed to reserve space in paged space");
945           }
946           gc_performed = true;
947           break;
948         } else {
949           // Mark with a free list node, in case we have a GC before
950           // deserializing.
951           node->set_size(this, sizes[space]);
952           locations_out[space] = node->address();
953         }
954       }
955     }
956   }
957
958   if (gc_performed) {
959     // Failed to reserve the space after several attempts.
960     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
961   }
962 }
963
964
965 void Heap::EnsureFromSpaceIsCommitted() {
966   if (new_space_.CommitFromSpaceIfNeeded()) return;
967
968   // Committing memory to from space failed.
969   // Memory is exhausted and we will die.
970   V8::FatalProcessOutOfMemory("Committing semi space failed.");
971 }
972
973
974 void Heap::ClearJSFunctionResultCaches() {
975   if (isolate_->bootstrapper()->IsActive()) return;
976
977   Object* context = native_contexts_list_;
978   while (!context->IsUndefined()) {
979     // Get the caches for this context. GC can happen when the context
980     // is not fully initialized, so the caches can be undefined.
981     Object* caches_or_undefined =
982         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
983     if (!caches_or_undefined->IsUndefined()) {
984       FixedArray* caches = FixedArray::cast(caches_or_undefined);
985       // Clear the caches:
986       int length = caches->length();
987       for (int i = 0; i < length; i++) {
988         JSFunctionResultCache::cast(caches->get(i))->Clear();
989       }
990     }
991     // Get the next context:
992     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
993   }
994 }
995
996
997 void Heap::ClearNormalizedMapCaches() {
998   if (isolate_->bootstrapper()->IsActive() &&
999       !incremental_marking()->IsMarking()) {
1000     return;
1001   }
1002
1003   Object* context = native_contexts_list_;
1004   while (!context->IsUndefined()) {
1005     // GC can happen when the context is not fully initialized,
1006     // so the cache can be undefined.
1007     Object* cache =
1008         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1009     if (!cache->IsUndefined()) {
1010       NormalizedMapCache::cast(cache)->Clear();
1011     }
1012     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1013   }
1014 }
1015
1016
1017 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
1018   if (start_new_space_size == 0) return;
1019
1020   double survival_rate =
1021       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
1022       start_new_space_size;
1023
1024   if (survival_rate > kYoungSurvivalRateHighThreshold) {
1025     high_survival_rate_period_length_++;
1026   } else {
1027     high_survival_rate_period_length_ = 0;
1028   }
1029
1030   if (survival_rate < kYoungSurvivalRateLowThreshold) {
1031     low_survival_rate_period_length_++;
1032   } else {
1033     low_survival_rate_period_length_ = 0;
1034   }
1035
1036   double survival_rate_diff = survival_rate_ - survival_rate;
1037
1038   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
1039     set_survival_rate_trend(DECREASING);
1040   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
1041     set_survival_rate_trend(INCREASING);
1042   } else {
1043     set_survival_rate_trend(STABLE);
1044   }
1045
1046   survival_rate_ = survival_rate;
1047 }
1048
1049 bool Heap::PerformGarbageCollection(
1050     GarbageCollector collector,
1051     GCTracer* tracer,
1052     const v8::GCCallbackFlags gc_callback_flags) {
1053   bool next_gc_likely_to_collect_more = false;
1054
1055   if (collector != SCAVENGER) {
1056     PROFILE(isolate_, CodeMovingGCEvent());
1057   }
1058
1059 #ifdef VERIFY_HEAP
1060   if (FLAG_verify_heap) {
1061     VerifyStringTable(this);
1062   }
1063 #endif
1064
1065   GCType gc_type =
1066       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1067
1068   {
1069     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1070     VMState<EXTERNAL> state(isolate_);
1071     HandleScope handle_scope(isolate_);
1072     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1073   }
1074
1075   EnsureFromSpaceIsCommitted();
1076
1077   int start_new_space_size = Heap::new_space()->SizeAsInt();
1078
1079   if (IsHighSurvivalRate()) {
1080     // We speed up the incremental marker if it is running so that it
1081     // does not fall behind the rate of promotion, which would cause a
1082     // constantly growing old space.
1083     incremental_marking()->NotifyOfHighPromotionRate();
1084   }
1085
1086   if (collector == MARK_COMPACTOR) {
1087     // Perform mark-sweep with optional compaction.
1088     MarkCompact(tracer);
1089     sweep_generation_++;
1090
1091     UpdateSurvivalRateTrend(start_new_space_size);
1092
1093     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
1094
1095     old_generation_allocation_limit_ =
1096         OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
1097
1098     old_gen_exhausted_ = false;
1099   } else {
1100     tracer_ = tracer;
1101     Scavenge();
1102     tracer_ = NULL;
1103
1104     UpdateSurvivalRateTrend(start_new_space_size);
1105   }
1106
1107   if (!new_space_high_promotion_mode_active_ &&
1108       new_space_.Capacity() == new_space_.MaximumCapacity() &&
1109       IsStableOrIncreasingSurvivalTrend() &&
1110       IsHighSurvivalRate()) {
1111     // Stable high survival rates even though young generation is at
1112     // maximum capacity indicates that most objects will be promoted.
1113     // To decrease scavenger pauses and final mark-sweep pauses, we
1114     // have to limit maximal capacity of the young generation.
1115     SetNewSpaceHighPromotionModeActive(true);
1116     if (FLAG_trace_gc) {
1117       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
1118                new_space_.InitialCapacity() / MB);
1119     }
1120     // The high promotion mode is our indicator to turn on pretenuring. We have
1121     // to deoptimize all optimized code in global pretenuring mode and all
1122     // code which should be tenured in local pretenuring mode.
1123     if (FLAG_pretenuring) {
1124       if (!FLAG_allocation_site_pretenuring) {
1125         isolate_->stack_guard()->FullDeopt();
1126       }
1127     }
1128   } else if (new_space_high_promotion_mode_active_ &&
1129       IsStableOrDecreasingSurvivalTrend() &&
1130       IsLowSurvivalRate()) {
1131     // Decreasing low survival rates might indicate that the above high
1132     // promotion mode is over and we should allow the young generation
1133     // to grow again.
1134     SetNewSpaceHighPromotionModeActive(false);
1135     if (FLAG_trace_gc) {
1136       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
1137                new_space_.MaximumCapacity() / MB);
1138     }
1139     // Trigger deoptimization here to turn off global pretenuring as soon as
1140     // possible.
1141     if (FLAG_pretenuring && !FLAG_allocation_site_pretenuring) {
1142       isolate_->stack_guard()->FullDeopt();
1143     }
1144   }
1145
1146   if (new_space_high_promotion_mode_active_ &&
1147       new_space_.Capacity() > new_space_.InitialCapacity()) {
1148     new_space_.Shrink();
1149   }
1150
1151   isolate_->counters()->objs_since_last_young()->Set(0);
1152
1153   // Callbacks that fire after this point might trigger nested GCs and
1154   // restart incremental marking, the assertion can't be moved down.
1155   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1156
1157   gc_post_processing_depth_++;
1158   { AllowHeapAllocation allow_allocation;
1159     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1160     next_gc_likely_to_collect_more =
1161         isolate_->global_handles()->PostGarbageCollectionProcessing(
1162             collector, tracer);
1163   }
1164   gc_post_processing_depth_--;
1165
1166   isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1167
1168   // Update relocatables.
1169   Relocatable::PostGarbageCollectionProcessing(isolate_);
1170
1171   if (collector == MARK_COMPACTOR) {
1172     // Register the amount of external allocated memory.
1173     amount_of_external_allocated_memory_at_last_global_gc_ =
1174         amount_of_external_allocated_memory_;
1175   }
1176
1177   {
1178     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1179     VMState<EXTERNAL> state(isolate_);
1180     HandleScope handle_scope(isolate_);
1181     CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1182   }
1183
1184 #ifdef VERIFY_HEAP
1185   if (FLAG_verify_heap) {
1186     VerifyStringTable(this);
1187   }
1188 #endif
1189
1190   return next_gc_likely_to_collect_more;
1191 }
1192
1193
1194 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1195   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1196     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1197       if (!gc_prologue_callbacks_[i].pass_isolate_) {
1198         v8::GCPrologueCallback callback =
1199             reinterpret_cast<v8::GCPrologueCallback>(
1200                 gc_prologue_callbacks_[i].callback);
1201         callback(gc_type, flags);
1202       } else {
1203         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1204         gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1205       }
1206     }
1207   }
1208 }
1209
1210
1211 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1212                                    GCCallbackFlags gc_callback_flags) {
1213   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1214     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1215       if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1216         v8::GCPrologueCallback callback =
1217             reinterpret_cast<v8::GCPrologueCallback>(
1218                 gc_epilogue_callbacks_[i].callback);
1219         callback(gc_type, gc_callback_flags);
1220       } else {
1221         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1222         gc_epilogue_callbacks_[i].callback(
1223             isolate, gc_type, gc_callback_flags);
1224       }
1225     }
1226   }
1227 }
1228
1229
1230 void Heap::MarkCompact(GCTracer* tracer) {
1231   gc_state_ = MARK_COMPACT;
1232   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1233
1234   uint64_t size_of_objects_before_gc = SizeOfObjects();
1235
1236   mark_compact_collector_.Prepare(tracer);
1237
1238   ms_count_++;
1239   tracer->set_full_gc_count(ms_count_);
1240
1241   MarkCompactPrologue();
1242
1243   mark_compact_collector_.CollectGarbage();
1244
1245   LOG(isolate_, ResourceEvent("markcompact", "end"));
1246
1247   gc_state_ = NOT_IN_GC;
1248
1249   isolate_->counters()->objs_since_last_full()->Set(0);
1250
1251   flush_monomorphic_ics_ = false;
1252
1253   if (FLAG_allocation_site_pretenuring) {
1254     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1255   }
1256 }
1257
1258
1259 void Heap::MarkCompactPrologue() {
1260   // At any old GC clear the keyed lookup cache to enable collection of unused
1261   // maps.
1262   isolate_->keyed_lookup_cache()->Clear();
1263   isolate_->context_slot_cache()->Clear();
1264   isolate_->descriptor_lookup_cache()->Clear();
1265   RegExpResultsCache::Clear(string_split_cache());
1266   RegExpResultsCache::Clear(regexp_multiple_cache());
1267
1268   isolate_->compilation_cache()->MarkCompactPrologue();
1269
1270   CompletelyClearInstanceofCache();
1271
1272   FlushNumberStringCache();
1273   if (FLAG_cleanup_code_caches_at_gc) {
1274     polymorphic_code_cache()->set_cache(undefined_value());
1275   }
1276
1277   ClearNormalizedMapCaches();
1278 }
1279
1280
1281 // Helper class for copying HeapObjects
1282 class ScavengeVisitor: public ObjectVisitor {
1283  public:
1284   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1285
1286   void VisitPointer(Object** p) { ScavengePointer(p); }
1287
1288   void VisitPointers(Object** start, Object** end) {
1289     // Copy all HeapObject pointers in [start, end)
1290     for (Object** p = start; p < end; p++) ScavengePointer(p);
1291   }
1292
1293  private:
1294   void ScavengePointer(Object** p) {
1295     Object* object = *p;
1296     if (!heap_->InNewSpace(object)) return;
1297     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1298                          reinterpret_cast<HeapObject*>(object));
1299   }
1300
1301   Heap* heap_;
1302 };
1303
1304
1305 #ifdef VERIFY_HEAP
1306 // Visitor class to verify pointers in code or data space do not point into
1307 // new space.
1308 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1309  public:
1310   explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1311   void VisitPointers(Object** start, Object**end) {
1312     for (Object** current = start; current < end; current++) {
1313       if ((*current)->IsHeapObject()) {
1314         CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1315       }
1316     }
1317   }
1318
1319  private:
1320   Heap* heap_;
1321 };
1322
1323
1324 static void VerifyNonPointerSpacePointers(Heap* heap) {
1325   // Verify that there are no pointers to new space in spaces where we
1326   // do not expect them.
1327   VerifyNonPointerSpacePointersVisitor v(heap);
1328   HeapObjectIterator code_it(heap->code_space());
1329   for (HeapObject* object = code_it.Next();
1330        object != NULL; object = code_it.Next())
1331     object->Iterate(&v);
1332
1333   // The old data space was normally swept conservatively so that the iterator
1334   // doesn't work, so we normally skip the next bit.
1335   if (!heap->old_data_space()->was_swept_conservatively()) {
1336     HeapObjectIterator data_it(heap->old_data_space());
1337     for (HeapObject* object = data_it.Next();
1338          object != NULL; object = data_it.Next())
1339       object->Iterate(&v);
1340   }
1341 }
1342 #endif  // VERIFY_HEAP
1343
1344
1345 void Heap::CheckNewSpaceExpansionCriteria() {
1346   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1347       survived_since_last_expansion_ > new_space_.Capacity() &&
1348       !new_space_high_promotion_mode_active_) {
1349     // Grow the size of new space if there is room to grow, enough data
1350     // has survived scavenge since the last expansion and we are not in
1351     // high promotion mode.
1352     new_space_.Grow();
1353     survived_since_last_expansion_ = 0;
1354   }
1355 }
1356
1357
1358 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1359   return heap->InNewSpace(*p) &&
1360       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1361 }
1362
1363
1364 void Heap::ScavengeStoreBufferCallback(
1365     Heap* heap,
1366     MemoryChunk* page,
1367     StoreBufferEvent event) {
1368   heap->store_buffer_rebuilder_.Callback(page, event);
1369 }
1370
1371
1372 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1373   if (event == kStoreBufferStartScanningPagesEvent) {
1374     start_of_current_page_ = NULL;
1375     current_page_ = NULL;
1376   } else if (event == kStoreBufferScanningPageEvent) {
1377     if (current_page_ != NULL) {
1378       // If this page already overflowed the store buffer during this iteration.
1379       if (current_page_->scan_on_scavenge()) {
1380         // Then we should wipe out the entries that have been added for it.
1381         store_buffer_->SetTop(start_of_current_page_);
1382       } else if (store_buffer_->Top() - start_of_current_page_ >=
1383                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1384         // Did we find too many pointers in the previous page?  The heuristic is
1385         // that no page can take more then 1/5 the remaining slots in the store
1386         // buffer.
1387         current_page_->set_scan_on_scavenge(true);
1388         store_buffer_->SetTop(start_of_current_page_);
1389       } else {
1390         // In this case the page we scanned took a reasonable number of slots in
1391         // the store buffer.  It has now been rehabilitated and is no longer
1392         // marked scan_on_scavenge.
1393         ASSERT(!current_page_->scan_on_scavenge());
1394       }
1395     }
1396     start_of_current_page_ = store_buffer_->Top();
1397     current_page_ = page;
1398   } else if (event == kStoreBufferFullEvent) {
1399     // The current page overflowed the store buffer again.  Wipe out its entries
1400     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1401     // several times while scanning.
1402     if (current_page_ == NULL) {
1403       // Store Buffer overflowed while scanning promoted objects.  These are not
1404       // in any particular page, though they are likely to be clustered by the
1405       // allocation routines.
1406       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1407     } else {
1408       // Store Buffer overflowed while scanning a particular old space page for
1409       // pointers to new space.
1410       ASSERT(current_page_ == page);
1411       ASSERT(page != NULL);
1412       current_page_->set_scan_on_scavenge(true);
1413       ASSERT(start_of_current_page_ != store_buffer_->Top());
1414       store_buffer_->SetTop(start_of_current_page_);
1415     }
1416   } else {
1417     UNREACHABLE();
1418   }
1419 }
1420
1421
1422 void PromotionQueue::Initialize() {
1423   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1424   // entries (where each is a pair of intptr_t). This allows us to simplify
1425   // the test fpr when to switch pages.
1426   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1427          == 0);
1428   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1429   front_ = rear_ =
1430       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1431   emergency_stack_ = NULL;
1432   guard_ = false;
1433 }
1434
1435
1436 void PromotionQueue::RelocateQueueHead() {
1437   ASSERT(emergency_stack_ == NULL);
1438
1439   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1440   intptr_t* head_start = rear_;
1441   intptr_t* head_end =
1442       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1443
1444   int entries_count =
1445       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1446
1447   emergency_stack_ = new List<Entry>(2 * entries_count);
1448
1449   while (head_start != head_end) {
1450     int size = static_cast<int>(*(head_start++));
1451     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1452     emergency_stack_->Add(Entry(obj, size));
1453   }
1454   rear_ = head_end;
1455 }
1456
1457
1458 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1459  public:
1460   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1461
1462   virtual Object* RetainAs(Object* object) {
1463     if (!heap_->InFromSpace(object)) {
1464       return object;
1465     }
1466
1467     MapWord map_word = HeapObject::cast(object)->map_word();
1468     if (map_word.IsForwardingAddress()) {
1469       return map_word.ToForwardingAddress();
1470     }
1471     return NULL;
1472   }
1473
1474  private:
1475   Heap* heap_;
1476 };
1477
1478
1479 void Heap::Scavenge() {
1480   RelocationLock relocation_lock(this);
1481
1482 #ifdef VERIFY_HEAP
1483   if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1484 #endif
1485
1486   gc_state_ = SCAVENGE;
1487
1488   // Implements Cheney's copying algorithm
1489   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1490
1491   // Clear descriptor cache.
1492   isolate_->descriptor_lookup_cache()->Clear();
1493
1494   // Used for updating survived_since_last_expansion_ at function end.
1495   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1496
1497   CheckNewSpaceExpansionCriteria();
1498
1499   SelectScavengingVisitorsTable();
1500
1501   incremental_marking()->PrepareForScavenge();
1502
1503   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1504   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1505
1506   // Flip the semispaces.  After flipping, to space is empty, from space has
1507   // live objects.
1508   new_space_.Flip();
1509   new_space_.ResetAllocationInfo();
1510
1511   // We need to sweep newly copied objects which can be either in the
1512   // to space or promoted to the old generation.  For to-space
1513   // objects, we treat the bottom of the to space as a queue.  Newly
1514   // copied and unswept objects lie between a 'front' mark and the
1515   // allocation pointer.
1516   //
1517   // Promoted objects can go into various old-generation spaces, and
1518   // can be allocated internally in the spaces (from the free list).
1519   // We treat the top of the to space as a queue of addresses of
1520   // promoted objects.  The addresses of newly promoted and unswept
1521   // objects lie between a 'front' mark and a 'rear' mark that is
1522   // updated as a side effect of promoting an object.
1523   //
1524   // There is guaranteed to be enough room at the top of the to space
1525   // for the addresses of promoted objects: every object promoted
1526   // frees up its size in bytes from the top of the new space, and
1527   // objects are at least one pointer in size.
1528   Address new_space_front = new_space_.ToSpaceStart();
1529   promotion_queue_.Initialize();
1530
1531 #ifdef DEBUG
1532   store_buffer()->Clean();
1533 #endif
1534
1535   ScavengeVisitor scavenge_visitor(this);
1536   // Copy roots.
1537   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1538
1539   // Copy objects reachable from the old generation.
1540   {
1541     StoreBufferRebuildScope scope(this,
1542                                   store_buffer(),
1543                                   &ScavengeStoreBufferCallback);
1544     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1545   }
1546
1547   // Copy objects reachable from simple cells by scavenging cell values
1548   // directly.
1549   HeapObjectIterator cell_iterator(cell_space_);
1550   for (HeapObject* heap_object = cell_iterator.Next();
1551        heap_object != NULL;
1552        heap_object = cell_iterator.Next()) {
1553     if (heap_object->IsCell()) {
1554       Cell* cell = Cell::cast(heap_object);
1555       Address value_address = cell->ValueAddress();
1556       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1557     }
1558   }
1559
1560   // Copy objects reachable from global property cells by scavenging global
1561   // property cell values directly.
1562   HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1563   for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1564        heap_object != NULL;
1565        heap_object = js_global_property_cell_iterator.Next()) {
1566     if (heap_object->IsPropertyCell()) {
1567       PropertyCell* cell = PropertyCell::cast(heap_object);
1568       Address value_address = cell->ValueAddress();
1569       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1570       Address type_address = cell->TypeAddress();
1571       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1572     }
1573   }
1574
1575   // Copy objects reachable from the code flushing candidates list.
1576   MarkCompactCollector* collector = mark_compact_collector();
1577   if (collector->is_code_flushing_enabled()) {
1578     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1579   }
1580
1581   // Scavenge object reachable from the native contexts list directly.
1582   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1583
1584   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1585
1586   while (isolate()->global_handles()->IterateObjectGroups(
1587       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1588     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1589   }
1590   isolate()->global_handles()->RemoveObjectGroups();
1591   isolate()->global_handles()->RemoveImplicitRefGroups();
1592
1593   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1594       &IsUnscavengedHeapObject);
1595   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1596       &scavenge_visitor);
1597   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1598
1599   UpdateNewSpaceReferencesInExternalStringTable(
1600       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1601
1602   promotion_queue_.Destroy();
1603
1604   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1605
1606   ScavengeWeakObjectRetainer weak_object_retainer(this);
1607   ProcessWeakReferences(&weak_object_retainer);
1608
1609   ASSERT(new_space_front == new_space_.top());
1610
1611   // Set age mark.
1612   new_space_.set_age_mark(new_space_.top());
1613
1614   new_space_.LowerInlineAllocationLimit(
1615       new_space_.inline_allocation_limit_step());
1616
1617   // Update how much has survived scavenge.
1618   IncrementYoungSurvivorsCounter(static_cast<int>(
1619       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1620
1621   ProcessPretenuringFeedback();
1622
1623   LOG(isolate_, ResourceEvent("scavenge", "end"));
1624
1625   gc_state_ = NOT_IN_GC;
1626
1627   scavenges_since_last_idle_round_++;
1628 }
1629
1630
1631 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1632                                                                 Object** p) {
1633   MapWord first_word = HeapObject::cast(*p)->map_word();
1634
1635   if (!first_word.IsForwardingAddress()) {
1636     // Unreachable external string can be finalized.
1637     heap->FinalizeExternalString(String::cast(*p));
1638     return NULL;
1639   }
1640
1641   // String is still reachable.
1642   return String::cast(first_word.ToForwardingAddress());
1643 }
1644
1645
1646 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1647     ExternalStringTableUpdaterCallback updater_func) {
1648 #ifdef VERIFY_HEAP
1649   if (FLAG_verify_heap) {
1650     external_string_table_.Verify();
1651   }
1652 #endif
1653
1654   if (external_string_table_.new_space_strings_.is_empty()) return;
1655
1656   Object** start = &external_string_table_.new_space_strings_[0];
1657   Object** end = start + external_string_table_.new_space_strings_.length();
1658   Object** last = start;
1659
1660   for (Object** p = start; p < end; ++p) {
1661     ASSERT(InFromSpace(*p));
1662     String* target = updater_func(this, p);
1663
1664     if (target == NULL) continue;
1665
1666     ASSERT(target->IsExternalString());
1667
1668     if (InNewSpace(target)) {
1669       // String is still in new space.  Update the table entry.
1670       *last = target;
1671       ++last;
1672     } else {
1673       // String got promoted.  Move it to the old string list.
1674       external_string_table_.AddOldString(target);
1675     }
1676   }
1677
1678   ASSERT(last <= end);
1679   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1680 }
1681
1682
1683 void Heap::UpdateReferencesInExternalStringTable(
1684     ExternalStringTableUpdaterCallback updater_func) {
1685
1686   // Update old space string references.
1687   if (external_string_table_.old_space_strings_.length() > 0) {
1688     Object** start = &external_string_table_.old_space_strings_[0];
1689     Object** end = start + external_string_table_.old_space_strings_.length();
1690     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1691   }
1692
1693   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1694 }
1695
1696
1697 template <class T>
1698 struct WeakListVisitor;
1699
1700
1701 template <class T>
1702 static Object* VisitWeakList(Heap* heap,
1703                              Object* list,
1704                              WeakObjectRetainer* retainer,
1705                              bool record_slots) {
1706   Object* undefined = heap->undefined_value();
1707   Object* head = undefined;
1708   T* tail = NULL;
1709   MarkCompactCollector* collector = heap->mark_compact_collector();
1710   while (list != undefined) {
1711     // Check whether to keep the candidate in the list.
1712     T* candidate = reinterpret_cast<T*>(list);
1713     Object* retained = retainer->RetainAs(list);
1714     if (retained != NULL) {
1715       if (head == undefined) {
1716         // First element in the list.
1717         head = retained;
1718       } else {
1719         // Subsequent elements in the list.
1720         ASSERT(tail != NULL);
1721         WeakListVisitor<T>::SetWeakNext(tail, retained);
1722         if (record_slots) {
1723           Object** next_slot =
1724             HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1725           collector->RecordSlot(next_slot, next_slot, retained);
1726         }
1727       }
1728       // Retained object is new tail.
1729       ASSERT(!retained->IsUndefined());
1730       candidate = reinterpret_cast<T*>(retained);
1731       tail = candidate;
1732
1733
1734       // tail is a live object, visit it.
1735       WeakListVisitor<T>::VisitLiveObject(
1736           heap, tail, retainer, record_slots);
1737     } else {
1738       WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1739     }
1740
1741     // Move to next element in the list.
1742     list = WeakListVisitor<T>::WeakNext(candidate);
1743   }
1744
1745   // Terminate the list if there is one or more elements.
1746   if (tail != NULL) {
1747     WeakListVisitor<T>::SetWeakNext(tail, undefined);
1748   }
1749   return head;
1750 }
1751
1752
1753 template<>
1754 struct WeakListVisitor<JSFunction> {
1755   static void SetWeakNext(JSFunction* function, Object* next) {
1756     function->set_next_function_link(next);
1757   }
1758
1759   static Object* WeakNext(JSFunction* function) {
1760     return function->next_function_link();
1761   }
1762
1763   static int WeakNextOffset() {
1764     return JSFunction::kNextFunctionLinkOffset;
1765   }
1766
1767   static void VisitLiveObject(Heap*, JSFunction*,
1768                               WeakObjectRetainer*, bool) {
1769   }
1770
1771   static void VisitPhantomObject(Heap*, JSFunction*) {
1772   }
1773 };
1774
1775
1776 template<>
1777 struct WeakListVisitor<Code> {
1778   static void SetWeakNext(Code* code, Object* next) {
1779     code->set_next_code_link(next);
1780   }
1781
1782   static Object* WeakNext(Code* code) {
1783     return code->next_code_link();
1784   }
1785
1786   static int WeakNextOffset() {
1787     return Code::kNextCodeLinkOffset;
1788   }
1789
1790   static void VisitLiveObject(Heap*, Code*,
1791                               WeakObjectRetainer*, bool) {
1792   }
1793
1794   static void VisitPhantomObject(Heap*, Code*) {
1795   }
1796 };
1797
1798
1799 template<>
1800 struct WeakListVisitor<Context> {
1801   static void SetWeakNext(Context* context, Object* next) {
1802     context->set(Context::NEXT_CONTEXT_LINK,
1803                  next,
1804                  UPDATE_WRITE_BARRIER);
1805   }
1806
1807   static Object* WeakNext(Context* context) {
1808     return context->get(Context::NEXT_CONTEXT_LINK);
1809   }
1810
1811   static void VisitLiveObject(Heap* heap,
1812                               Context* context,
1813                               WeakObjectRetainer* retainer,
1814                               bool record_slots) {
1815     // Process the three weak lists linked off the context.
1816     DoWeakList<JSFunction>(heap, context, retainer, record_slots,
1817         Context::OPTIMIZED_FUNCTIONS_LIST);
1818     DoWeakList<Code>(heap, context, retainer, record_slots,
1819         Context::OPTIMIZED_CODE_LIST);
1820     DoWeakList<Code>(heap, context, retainer, record_slots,
1821         Context::DEOPTIMIZED_CODE_LIST);
1822   }
1823
1824   template<class T>
1825   static void DoWeakList(Heap* heap,
1826                          Context* context,
1827                          WeakObjectRetainer* retainer,
1828                          bool record_slots,
1829                          int index) {
1830     // Visit the weak list, removing dead intermediate elements.
1831     Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
1832         record_slots);
1833
1834     // Update the list head.
1835     context->set(index, list_head, UPDATE_WRITE_BARRIER);
1836
1837     if (record_slots) {
1838       // Record the updated slot if necessary.
1839       Object** head_slot = HeapObject::RawField(
1840           context, FixedArray::SizeFor(index));
1841       heap->mark_compact_collector()->RecordSlot(
1842           head_slot, head_slot, list_head);
1843     }
1844   }
1845
1846   static void VisitPhantomObject(Heap*, Context*) {
1847   }
1848
1849   static int WeakNextOffset() {
1850     return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1851   }
1852 };
1853
1854
1855 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1856   // We don't record weak slots during marking or scavenges.
1857   // Instead we do it once when we complete mark-compact cycle.
1858   // Note that write barrier has no effect if we are already in the middle of
1859   // compacting mark-sweep cycle and we have to record slots manually.
1860   bool record_slots =
1861       gc_state() == MARK_COMPACT &&
1862       mark_compact_collector()->is_compacting();
1863   ProcessArrayBuffers(retainer, record_slots);
1864   ProcessNativeContexts(retainer, record_slots);
1865   // TODO(mvstanton): AllocationSites only need to be processed during
1866   // MARK_COMPACT, as they live in old space. Verify and address.
1867   ProcessAllocationSites(retainer, record_slots);
1868 }
1869
1870 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1871                                  bool record_slots) {
1872   Object* head =
1873       VisitWeakList<Context>(
1874           this, native_contexts_list(), retainer, record_slots);
1875   // Update the head of the list of contexts.
1876   native_contexts_list_ = head;
1877 }
1878
1879
1880 template<>
1881 struct WeakListVisitor<JSArrayBufferView> {
1882   static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1883     obj->set_weak_next(next);
1884   }
1885
1886   static Object* WeakNext(JSArrayBufferView* obj) {
1887     return obj->weak_next();
1888   }
1889
1890   static void VisitLiveObject(Heap*,
1891                               JSArrayBufferView* obj,
1892                               WeakObjectRetainer* retainer,
1893                               bool record_slots) {}
1894
1895   static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1896
1897   static int WeakNextOffset() {
1898     return JSArrayBufferView::kWeakNextOffset;
1899   }
1900 };
1901
1902
1903 template<>
1904 struct WeakListVisitor<JSArrayBuffer> {
1905   static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1906     obj->set_weak_next(next);
1907   }
1908
1909   static Object* WeakNext(JSArrayBuffer* obj) {
1910     return obj->weak_next();
1911   }
1912
1913   static void VisitLiveObject(Heap* heap,
1914                               JSArrayBuffer* array_buffer,
1915                               WeakObjectRetainer* retainer,
1916                               bool record_slots) {
1917     Object* typed_array_obj =
1918         VisitWeakList<JSArrayBufferView>(
1919             heap,
1920             array_buffer->weak_first_view(),
1921             retainer, record_slots);
1922     array_buffer->set_weak_first_view(typed_array_obj);
1923     if (typed_array_obj != heap->undefined_value() && record_slots) {
1924       Object** slot = HeapObject::RawField(
1925           array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1926       heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1927     }
1928   }
1929
1930   static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1931     Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1932   }
1933
1934   static int WeakNextOffset() {
1935     return JSArrayBuffer::kWeakNextOffset;
1936   }
1937 };
1938
1939
1940 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1941                                bool record_slots) {
1942   Object* array_buffer_obj =
1943       VisitWeakList<JSArrayBuffer>(this,
1944                                    array_buffers_list(),
1945                                    retainer, record_slots);
1946   set_array_buffers_list(array_buffer_obj);
1947 }
1948
1949
1950 void Heap::TearDownArrayBuffers() {
1951   Object* undefined = undefined_value();
1952   for (Object* o = array_buffers_list(); o != undefined;) {
1953     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1954     Runtime::FreeArrayBuffer(isolate(), buffer);
1955     o = buffer->weak_next();
1956   }
1957   array_buffers_list_ = undefined;
1958 }
1959
1960
1961 template<>
1962 struct WeakListVisitor<AllocationSite> {
1963   static void SetWeakNext(AllocationSite* obj, Object* next) {
1964     obj->set_weak_next(next);
1965   }
1966
1967   static Object* WeakNext(AllocationSite* obj) {
1968     return obj->weak_next();
1969   }
1970
1971   static void VisitLiveObject(Heap* heap,
1972                               AllocationSite* site,
1973                               WeakObjectRetainer* retainer,
1974                               bool record_slots) {}
1975
1976   static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1977
1978   static int WeakNextOffset() {
1979     return AllocationSite::kWeakNextOffset;
1980   }
1981 };
1982
1983
1984 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1985                                   bool record_slots) {
1986   Object* allocation_site_obj =
1987       VisitWeakList<AllocationSite>(this,
1988                                     allocation_sites_list(),
1989                                     retainer, record_slots);
1990   set_allocation_sites_list(allocation_site_obj);
1991 }
1992
1993
1994 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1995   DisallowHeapAllocation no_allocation_scope;
1996   Object* cur = allocation_sites_list();
1997   bool marked = false;
1998   while (cur->IsAllocationSite()) {
1999     AllocationSite* casted = AllocationSite::cast(cur);
2000     if (casted->GetPretenureMode() == flag) {
2001       casted->ResetPretenureDecision();
2002       bool got_marked = casted->dependent_code()->MarkCodeForDeoptimization(
2003           isolate_,
2004           DependentCode::kAllocationSiteTenuringChangedGroup);
2005       if (got_marked) marked = true;
2006     }
2007     cur = casted->weak_next();
2008   }
2009   if (marked) isolate_->stack_guard()->DeoptMarkedCode();
2010 }
2011
2012
2013 void Heap::EvaluateOldSpaceLocalPretenuring(
2014     uint64_t size_of_objects_before_gc) {
2015   uint64_t size_of_objects_after_gc = SizeOfObjects();
2016   double old_generation_survival_rate =
2017       (static_cast<double>(size_of_objects_after_gc) * 100) /
2018           static_cast<double>(size_of_objects_before_gc);
2019
2020   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2021     // Too many objects died in the old generation, pretenuring of wrong
2022     // allocation sites may be the cause for that. We have to deopt all
2023     // dependent code registered in the allocation sites to re-evaluate
2024     // our pretenuring decisions.
2025     ResetAllAllocationSitesDependentCode(TENURED);
2026     if (FLAG_trace_pretenuring) {
2027       PrintF("Deopt all allocation sites dependent code due to low survival "
2028              "rate in the old generation %f\n", old_generation_survival_rate);
2029     }
2030   }
2031 }
2032
2033
2034 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
2035   DisallowHeapAllocation no_allocation;
2036   // All external strings are listed in the external string table.
2037
2038   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
2039    public:
2040     explicit ExternalStringTableVisitorAdapter(
2041         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
2042     virtual void VisitPointers(Object** start, Object** end) {
2043       for (Object** p = start; p < end; p++) {
2044         ASSERT((*p)->IsExternalString());
2045         visitor_->VisitExternalString(Utils::ToLocal(
2046             Handle<String>(String::cast(*p))));
2047       }
2048     }
2049    private:
2050     v8::ExternalResourceVisitor* visitor_;
2051   } external_string_table_visitor(visitor);
2052
2053   external_string_table_.Iterate(&external_string_table_visitor);
2054 }
2055
2056
2057 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
2058  public:
2059   static inline void VisitPointer(Heap* heap, Object** p) {
2060     Object* object = *p;
2061     if (!heap->InNewSpace(object)) return;
2062     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
2063                          reinterpret_cast<HeapObject*>(object));
2064   }
2065 };
2066
2067
2068 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
2069                          Address new_space_front) {
2070   do {
2071     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
2072     // The addresses new_space_front and new_space_.top() define a
2073     // queue of unprocessed copied objects.  Process them until the
2074     // queue is empty.
2075     while (new_space_front != new_space_.top()) {
2076       if (!NewSpacePage::IsAtEnd(new_space_front)) {
2077         HeapObject* object = HeapObject::FromAddress(new_space_front);
2078         new_space_front +=
2079           NewSpaceScavenger::IterateBody(object->map(), object);
2080       } else {
2081         new_space_front =
2082             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
2083       }
2084     }
2085
2086     // Promote and process all the to-be-promoted objects.
2087     {
2088       StoreBufferRebuildScope scope(this,
2089                                     store_buffer(),
2090                                     &ScavengeStoreBufferCallback);
2091       while (!promotion_queue()->is_empty()) {
2092         HeapObject* target;
2093         int size;
2094         promotion_queue()->remove(&target, &size);
2095
2096         // Promoted object might be already partially visited
2097         // during old space pointer iteration. Thus we search specificly
2098         // for pointers to from semispace instead of looking for pointers
2099         // to new space.
2100         ASSERT(!target->IsMap());
2101         IterateAndMarkPointersToFromSpace(target->address(),
2102                                           target->address() + size,
2103                                           &ScavengeObject);
2104       }
2105     }
2106
2107     // Take another spin if there are now unswept objects in new space
2108     // (there are currently no more unswept promoted objects).
2109   } while (new_space_front != new_space_.top());
2110
2111   return new_space_front;
2112 }
2113
2114
2115 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
2116 STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0);
2117
2118
2119 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
2120                                               HeapObject* object,
2121                                               int size));
2122
2123 static HeapObject* EnsureDoubleAligned(Heap* heap,
2124                                        HeapObject* object,
2125                                        int size) {
2126   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
2127     heap->CreateFillerObjectAt(object->address(), kPointerSize);
2128     return HeapObject::FromAddress(object->address() + kPointerSize);
2129   } else {
2130     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
2131                                kPointerSize);
2132     return object;
2133   }
2134 }
2135
2136
2137 enum LoggingAndProfiling {
2138   LOGGING_AND_PROFILING_ENABLED,
2139   LOGGING_AND_PROFILING_DISABLED
2140 };
2141
2142
2143 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
2144
2145
2146 template<MarksHandling marks_handling,
2147          LoggingAndProfiling logging_and_profiling_mode>
2148 class ScavengingVisitor : public StaticVisitorBase {
2149  public:
2150   static void Initialize() {
2151     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
2152     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
2153     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
2154     table_.Register(kVisitByteArray, &EvacuateByteArray);
2155     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
2156     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
2157     table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
2158     table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
2159
2160     table_.Register(kVisitNativeContext,
2161                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2162                         template VisitSpecialized<Context::kSize>);
2163
2164     table_.Register(kVisitConsString,
2165                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2166                         template VisitSpecialized<ConsString::kSize>);
2167
2168     table_.Register(kVisitSlicedString,
2169                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2170                         template VisitSpecialized<SlicedString::kSize>);
2171
2172     table_.Register(kVisitSymbol,
2173                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2174                         template VisitSpecialized<Symbol::kSize>);
2175
2176     table_.Register(kVisitSharedFunctionInfo,
2177                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2178                         template VisitSpecialized<SharedFunctionInfo::kSize>);
2179
2180     table_.Register(kVisitJSWeakMap,
2181                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2182                     Visit);
2183
2184     table_.Register(kVisitJSWeakSet,
2185                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2186                     Visit);
2187
2188     table_.Register(kVisitJSArrayBuffer,
2189                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2190                     Visit);
2191
2192     table_.Register(kVisitJSTypedArray,
2193                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2194                     Visit);
2195
2196     table_.Register(kVisitJSDataView,
2197                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2198                     Visit);
2199
2200     table_.Register(kVisitJSRegExp,
2201                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2202                     Visit);
2203
2204     if (marks_handling == IGNORE_MARKS) {
2205       table_.Register(kVisitJSFunction,
2206                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
2207                           template VisitSpecialized<JSFunction::kSize>);
2208     } else {
2209       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2210     }
2211
2212     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2213                                    kVisitDataObject,
2214                                    kVisitDataObjectGeneric>();
2215
2216     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2217                                    kVisitJSObject,
2218                                    kVisitJSObjectGeneric>();
2219
2220     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2221                                    kVisitStruct,
2222                                    kVisitStructGeneric>();
2223   }
2224
2225   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2226     return &table_;
2227   }
2228
2229  private:
2230   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
2231
2232   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2233     bool should_record = false;
2234 #ifdef DEBUG
2235     should_record = FLAG_heap_stats;
2236 #endif
2237     should_record = should_record || FLAG_log_gc;
2238     if (should_record) {
2239       if (heap->new_space()->Contains(obj)) {
2240         heap->new_space()->RecordAllocation(obj);
2241       } else {
2242         heap->new_space()->RecordPromotion(obj);
2243       }
2244     }
2245   }
2246
2247   // Helper function used by CopyObject to copy a source object to an
2248   // allocated target object and update the forwarding pointer in the source
2249   // object.  Returns the target object.
2250   INLINE(static void MigrateObject(Heap* heap,
2251                                    HeapObject* source,
2252                                    HeapObject* target,
2253                                    int size)) {
2254     // Copy the content of source to target.
2255     heap->CopyBlock(target->address(), source->address(), size);
2256
2257     // Set the forwarding address.
2258     source->set_map_word(MapWord::FromForwardingAddress(target));
2259
2260     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2261       // Update NewSpace stats if necessary.
2262       RecordCopiedObject(heap, target);
2263       Isolate* isolate = heap->isolate();
2264       HeapProfiler* heap_profiler = isolate->heap_profiler();
2265       if (heap_profiler->is_tracking_object_moves()) {
2266         heap_profiler->ObjectMoveEvent(source->address(), target->address(),
2267                                        size);
2268       }
2269       if (isolate->logger()->is_logging_code_events() ||
2270           isolate->cpu_profiler()->is_profiling()) {
2271         if (target->IsSharedFunctionInfo()) {
2272           PROFILE(isolate, SharedFunctionInfoMoveEvent(
2273               source->address(), target->address()));
2274         }
2275       }
2276     }
2277
2278     if (marks_handling == TRANSFER_MARKS) {
2279       if (Marking::TransferColor(source, target)) {
2280         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2281       }
2282     }
2283   }
2284
2285
2286   template<ObjectContents object_contents, int alignment>
2287   static inline void EvacuateObject(Map* map,
2288                                     HeapObject** slot,
2289                                     HeapObject* object,
2290                                     int object_size) {
2291     SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
2292     SLOW_ASSERT(object->Size() == object_size);
2293
2294     int allocation_size = object_size;
2295     if (alignment != kObjectAlignment) {
2296       ASSERT(alignment == kDoubleAlignment);
2297       allocation_size += kPointerSize;
2298     }
2299
2300     Heap* heap = map->GetHeap();
2301     if (heap->ShouldBePromoted(object->address(), object_size)) {
2302       MaybeObject* maybe_result;
2303
2304       if (object_contents == DATA_OBJECT) {
2305         ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2306         maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2307       } else {
2308         ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2309         maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
2310       }
2311
2312       Object* result = NULL;  // Initialization to please compiler.
2313       if (maybe_result->ToObject(&result)) {
2314         HeapObject* target = HeapObject::cast(result);
2315
2316         if (alignment != kObjectAlignment) {
2317           target = EnsureDoubleAligned(heap, target, allocation_size);
2318         }
2319
2320         // Order is important: slot might be inside of the target if target
2321         // was allocated over a dead object and slot comes from the store
2322         // buffer.
2323         *slot = target;
2324         MigrateObject(heap, object, target, object_size);
2325
2326         if (object_contents == POINTER_OBJECT) {
2327           if (map->instance_type() == JS_FUNCTION_TYPE) {
2328             heap->promotion_queue()->insert(
2329                 target, JSFunction::kNonWeakFieldsEndOffset);
2330           } else {
2331             heap->promotion_queue()->insert(target, object_size);
2332           }
2333         }
2334
2335         heap->tracer()->increment_promoted_objects_size(object_size);
2336         return;
2337       }
2338     }
2339     ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
2340     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2341     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2342     Object* result = allocation->ToObjectUnchecked();
2343     HeapObject* target = HeapObject::cast(result);
2344
2345     if (alignment != kObjectAlignment) {
2346       target = EnsureDoubleAligned(heap, target, allocation_size);
2347     }
2348
2349     // Order is important: slot might be inside of the target if target
2350     // was allocated over a dead object and slot comes from the store
2351     // buffer.
2352     *slot = target;
2353     MigrateObject(heap, object, target, object_size);
2354     return;
2355   }
2356
2357
2358   static inline void EvacuateJSFunction(Map* map,
2359                                         HeapObject** slot,
2360                                         HeapObject* object) {
2361     ObjectEvacuationStrategy<POINTER_OBJECT>::
2362         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2363
2364     HeapObject* target = *slot;
2365     MarkBit mark_bit = Marking::MarkBitFrom(target);
2366     if (Marking::IsBlack(mark_bit)) {
2367       // This object is black and it might not be rescanned by marker.
2368       // We should explicitly record code entry slot for compaction because
2369       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2370       // miss it as it is not HeapObject-tagged.
2371       Address code_entry_slot =
2372           target->address() + JSFunction::kCodeEntryOffset;
2373       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2374       map->GetHeap()->mark_compact_collector()->
2375           RecordCodeEntrySlot(code_entry_slot, code);
2376     }
2377   }
2378
2379
2380   static inline void EvacuateFixedArray(Map* map,
2381                                         HeapObject** slot,
2382                                         HeapObject* object) {
2383     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2384     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2385         map, slot, object, object_size);
2386   }
2387
2388
2389   static inline void EvacuateFixedDoubleArray(Map* map,
2390                                               HeapObject** slot,
2391                                               HeapObject* object) {
2392     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2393     int object_size = FixedDoubleArray::SizeFor(length);
2394     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2395         map, slot, object, object_size);
2396   }
2397
2398
2399   static inline void EvacuateFixedTypedArray(Map* map,
2400                                              HeapObject** slot,
2401                                              HeapObject* object) {
2402     int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2403     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2404         map, slot, object, object_size);
2405   }
2406
2407
2408   static inline void EvacuateFixedFloat64Array(Map* map,
2409                                                HeapObject** slot,
2410                                                HeapObject* object) {
2411     int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2412     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2413         map, slot, object, object_size);
2414   }
2415
2416
2417   static inline void EvacuateByteArray(Map* map,
2418                                        HeapObject** slot,
2419                                        HeapObject* object) {
2420     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2421     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2422         map, slot, object, object_size);
2423   }
2424
2425
2426   static inline void EvacuateSeqOneByteString(Map* map,
2427                                             HeapObject** slot,
2428                                             HeapObject* object) {
2429     int object_size = SeqOneByteString::cast(object)->
2430         SeqOneByteStringSize(map->instance_type());
2431     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2432         map, slot, object, object_size);
2433   }
2434
2435
2436   static inline void EvacuateSeqTwoByteString(Map* map,
2437                                               HeapObject** slot,
2438                                               HeapObject* object) {
2439     int object_size = SeqTwoByteString::cast(object)->
2440         SeqTwoByteStringSize(map->instance_type());
2441     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2442         map, slot, object, object_size);
2443   }
2444
2445
2446   static inline bool IsShortcutCandidate(int type) {
2447     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2448   }
2449
2450   static inline void EvacuateShortcutCandidate(Map* map,
2451                                                HeapObject** slot,
2452                                                HeapObject* object) {
2453     ASSERT(IsShortcutCandidate(map->instance_type()));
2454
2455     Heap* heap = map->GetHeap();
2456
2457     if (marks_handling == IGNORE_MARKS &&
2458         ConsString::cast(object)->unchecked_second() ==
2459         heap->empty_string()) {
2460       HeapObject* first =
2461           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2462
2463       *slot = first;
2464
2465       if (!heap->InNewSpace(first)) {
2466         object->set_map_word(MapWord::FromForwardingAddress(first));
2467         return;
2468       }
2469
2470       MapWord first_word = first->map_word();
2471       if (first_word.IsForwardingAddress()) {
2472         HeapObject* target = first_word.ToForwardingAddress();
2473
2474         *slot = target;
2475         object->set_map_word(MapWord::FromForwardingAddress(target));
2476         return;
2477       }
2478
2479       heap->DoScavengeObject(first->map(), slot, first);
2480       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2481       return;
2482     }
2483
2484     int object_size = ConsString::kSize;
2485     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2486         map, slot, object, object_size);
2487   }
2488
2489   template<ObjectContents object_contents>
2490   class ObjectEvacuationStrategy {
2491    public:
2492     template<int object_size>
2493     static inline void VisitSpecialized(Map* map,
2494                                         HeapObject** slot,
2495                                         HeapObject* object) {
2496       EvacuateObject<object_contents, kObjectAlignment>(
2497           map, slot, object, object_size);
2498     }
2499
2500     static inline void Visit(Map* map,
2501                              HeapObject** slot,
2502                              HeapObject* object) {
2503       int object_size = map->instance_size();
2504       EvacuateObject<object_contents, kObjectAlignment>(
2505           map, slot, object, object_size);
2506     }
2507   };
2508
2509   static VisitorDispatchTable<ScavengingCallback> table_;
2510 };
2511
2512
2513 template<MarksHandling marks_handling,
2514          LoggingAndProfiling logging_and_profiling_mode>
2515 VisitorDispatchTable<ScavengingCallback>
2516     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2517
2518
2519 static void InitializeScavengingVisitorsTables() {
2520   ScavengingVisitor<TRANSFER_MARKS,
2521                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2522   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2523   ScavengingVisitor<TRANSFER_MARKS,
2524                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2525   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2526 }
2527
2528
2529 void Heap::SelectScavengingVisitorsTable() {
2530   bool logging_and_profiling =
2531       isolate()->logger()->is_logging() ||
2532       isolate()->cpu_profiler()->is_profiling() ||
2533       (isolate()->heap_profiler() != NULL &&
2534        isolate()->heap_profiler()->is_tracking_object_moves());
2535
2536   if (!incremental_marking()->IsMarking()) {
2537     if (!logging_and_profiling) {
2538       scavenging_visitors_table_.CopyFrom(
2539           ScavengingVisitor<IGNORE_MARKS,
2540                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2541     } else {
2542       scavenging_visitors_table_.CopyFrom(
2543           ScavengingVisitor<IGNORE_MARKS,
2544                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2545     }
2546   } else {
2547     if (!logging_and_profiling) {
2548       scavenging_visitors_table_.CopyFrom(
2549           ScavengingVisitor<TRANSFER_MARKS,
2550                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2551     } else {
2552       scavenging_visitors_table_.CopyFrom(
2553           ScavengingVisitor<TRANSFER_MARKS,
2554                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2555     }
2556
2557     if (incremental_marking()->IsCompacting()) {
2558       // When compacting forbid short-circuiting of cons-strings.
2559       // Scavenging code relies on the fact that new space object
2560       // can't be evacuated into evacuation candidate but
2561       // short-circuiting violates this assumption.
2562       scavenging_visitors_table_.Register(
2563           StaticVisitorBase::kVisitShortcutCandidate,
2564           scavenging_visitors_table_.GetVisitorById(
2565               StaticVisitorBase::kVisitConsString));
2566     }
2567   }
2568 }
2569
2570
2571 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2572   SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
2573   MapWord first_word = object->map_word();
2574   SLOW_ASSERT(!first_word.IsForwardingAddress());
2575   Map* map = first_word.ToMap();
2576   map->GetHeap()->DoScavengeObject(map, p, object);
2577 }
2578
2579
2580 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2581                                       int instance_size) {
2582   Object* result;
2583   MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2584   if (!maybe_result->ToObject(&result)) return maybe_result;
2585
2586   // Map::cast cannot be used due to uninitialized map field.
2587   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2588   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2589   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2590   reinterpret_cast<Map*>(result)->set_visitor_id(
2591         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2592   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2593   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2594   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2595   reinterpret_cast<Map*>(result)->set_bit_field(0);
2596   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2597   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2598                    Map::OwnsDescriptors::encode(true);
2599   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2600   return result;
2601 }
2602
2603
2604 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2605                                int instance_size,
2606                                ElementsKind elements_kind) {
2607   Object* result;
2608   MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2609   if (!maybe_result->To(&result)) return maybe_result;
2610
2611   Map* map = reinterpret_cast<Map*>(result);
2612   map->set_map_no_write_barrier(meta_map());
2613   map->set_instance_type(instance_type);
2614   map->set_visitor_id(
2615       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2616   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2617   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2618   map->set_instance_size(instance_size);
2619   map->set_inobject_properties(0);
2620   map->set_pre_allocated_property_fields(0);
2621   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2622   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2623                           SKIP_WRITE_BARRIER);
2624   map->init_back_pointer(undefined_value());
2625   map->set_unused_property_fields(0);
2626   map->set_instance_descriptors(empty_descriptor_array());
2627   map->set_bit_field(0);
2628   map->set_bit_field2(1 << Map::kIsExtensible);
2629   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2630                    Map::OwnsDescriptors::encode(true);
2631   map->set_bit_field3(bit_field3);
2632   map->set_elements_kind(elements_kind);
2633
2634   return map;
2635 }
2636
2637
2638 MaybeObject* Heap::AllocateCodeCache() {
2639   CodeCache* code_cache;
2640   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2641     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2642   }
2643   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2644   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2645   return code_cache;
2646 }
2647
2648
2649 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2650   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2651 }
2652
2653
2654 MaybeObject* Heap::AllocateAccessorPair() {
2655   AccessorPair* accessors;
2656   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2657     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2658   }
2659   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2660   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2661   accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
2662   return accessors;
2663 }
2664
2665
2666 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2667   TypeFeedbackInfo* info;
2668   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2669     if (!maybe_info->To(&info)) return maybe_info;
2670   }
2671   info->initialize_storage();
2672   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2673                                 SKIP_WRITE_BARRIER);
2674   return info;
2675 }
2676
2677
2678 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2679   AliasedArgumentsEntry* entry;
2680   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2681     if (!maybe_entry->To(&entry)) return maybe_entry;
2682   }
2683   entry->set_aliased_context_slot(aliased_context_slot);
2684   return entry;
2685 }
2686
2687
2688 const Heap::StringTypeTable Heap::string_type_table[] = {
2689 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2690   {type, size, k##camel_name##MapRootIndex},
2691   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2692 #undef STRING_TYPE_ELEMENT
2693 };
2694
2695
2696 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2697 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2698   {contents, k##name##RootIndex},
2699   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2700 #undef CONSTANT_STRING_ELEMENT
2701 };
2702
2703
2704 const Heap::StructTable Heap::struct_table[] = {
2705 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2706   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2707   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2708 #undef STRUCT_TABLE_ELEMENT
2709 };
2710
2711
2712 bool Heap::CreateInitialMaps() {
2713   Object* obj;
2714   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2715     if (!maybe_obj->ToObject(&obj)) return false;
2716   }
2717   // Map::cast cannot be used due to uninitialized map field.
2718   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2719   set_meta_map(new_meta_map);
2720   new_meta_map->set_map(new_meta_map);
2721
2722   { MaybeObject* maybe_obj =
2723         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2724     if (!maybe_obj->ToObject(&obj)) return false;
2725   }
2726   set_fixed_array_map(Map::cast(obj));
2727
2728   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2729     if (!maybe_obj->ToObject(&obj)) return false;
2730   }
2731   set_oddball_map(Map::cast(obj));
2732
2733   { MaybeObject* maybe_obj =
2734         AllocatePartialMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
2735     if (!maybe_obj->ToObject(&obj)) return false;
2736   }
2737   set_constant_pool_array_map(Map::cast(obj));
2738
2739   // Allocate the empty array.
2740   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2741     if (!maybe_obj->ToObject(&obj)) return false;
2742   }
2743   set_empty_fixed_array(FixedArray::cast(obj));
2744
2745   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2746     if (!maybe_obj->ToObject(&obj)) return false;
2747   }
2748   set_null_value(Oddball::cast(obj));
2749   Oddball::cast(obj)->set_kind(Oddball::kNull);
2750
2751   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2752     if (!maybe_obj->ToObject(&obj)) return false;
2753   }
2754   set_undefined_value(Oddball::cast(obj));
2755   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2756   ASSERT(!InNewSpace(undefined_value()));
2757
2758   // Allocate the empty descriptor array.
2759   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2760     if (!maybe_obj->ToObject(&obj)) return false;
2761   }
2762   set_empty_descriptor_array(DescriptorArray::cast(obj));
2763
2764   // Allocate the constant pool array.
2765   { MaybeObject* maybe_obj = AllocateEmptyConstantPoolArray();
2766     if (!maybe_obj->ToObject(&obj)) return false;
2767   }
2768   set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2769
2770   // Fix the instance_descriptors for the existing maps.
2771   meta_map()->set_code_cache(empty_fixed_array());
2772   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2773   meta_map()->init_back_pointer(undefined_value());
2774   meta_map()->set_instance_descriptors(empty_descriptor_array());
2775
2776   fixed_array_map()->set_code_cache(empty_fixed_array());
2777   fixed_array_map()->set_dependent_code(
2778       DependentCode::cast(empty_fixed_array()));
2779   fixed_array_map()->init_back_pointer(undefined_value());
2780   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2781
2782   oddball_map()->set_code_cache(empty_fixed_array());
2783   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2784   oddball_map()->init_back_pointer(undefined_value());
2785   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2786
2787   constant_pool_array_map()->set_code_cache(empty_fixed_array());
2788   constant_pool_array_map()->set_dependent_code(
2789       DependentCode::cast(empty_fixed_array()));
2790   constant_pool_array_map()->init_back_pointer(undefined_value());
2791   constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2792
2793   // Fix prototype object for existing maps.
2794   meta_map()->set_prototype(null_value());
2795   meta_map()->set_constructor(null_value());
2796
2797   fixed_array_map()->set_prototype(null_value());
2798   fixed_array_map()->set_constructor(null_value());
2799
2800   oddball_map()->set_prototype(null_value());
2801   oddball_map()->set_constructor(null_value());
2802
2803   constant_pool_array_map()->set_prototype(null_value());
2804   constant_pool_array_map()->set_constructor(null_value());
2805
2806   { // Map allocation
2807 #define ALLOCATE_MAP(instance_type, size, field_name)                          \
2808     { Map* map;                                                                \
2809       if (!AllocateMap((instance_type), size)->To(&map)) return false;         \
2810       set_##field_name##_map(map);                                             \
2811     }
2812
2813 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name)                        \
2814     ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2815
2816     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2817     ASSERT(fixed_array_map() != fixed_cow_array_map());
2818
2819     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2820     ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2821     ALLOCATE_MAP(FLOAT32x4_TYPE, Float32x4::kSize, float32x4)
2822     ALLOCATE_MAP(INT32x4_TYPE, Int32x4::kSize, int32x4)
2823     ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2824     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2825
2826     for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2827       const StringTypeTable& entry = string_type_table[i];
2828       { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2829         if (!maybe_obj->ToObject(&obj)) return false;
2830       }
2831       roots_[entry.index] = Map::cast(obj);
2832     }
2833
2834     ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2835     undetectable_string_map()->set_is_undetectable();
2836
2837     ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
2838     undetectable_ascii_string_map()->set_is_undetectable();
2839
2840     ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2841     ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2842     ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2843
2844 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)            \
2845     ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize,   \
2846         external_##type##_array)
2847
2848      TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2849 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2850
2851 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size)         \
2852     ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE,                           \
2853         fixed_##type##_array)
2854
2855      TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2856 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2857
2858     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, non_strict_arguments_elements)
2859
2860     ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2861
2862     ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2863     ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2864     ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2865     ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2866
2867
2868     for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2869       const StructTable& entry = struct_table[i];
2870       Map* map;
2871       if (!AllocateMap(entry.type, entry.size)->To(&map))
2872         return false;
2873       roots_[entry.index] = map;
2874     }
2875
2876     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2877
2878     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2879     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2880     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2881     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2882     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2883     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2884
2885     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2886     native_context_map()->set_dictionary_map(true);
2887     native_context_map()->set_visitor_id(
2888         StaticVisitorBase::kVisitNativeContext);
2889
2890     ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2891         shared_function_info)
2892
2893     ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
2894         message_object)
2895     ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
2896         external)
2897     external_map()->set_is_extensible(false);
2898 #undef ALLOCATE_VARSIZE_MAP
2899 #undef ALLOCATE_MAP
2900   }
2901
2902   { // Empty arrays
2903     { ByteArray* byte_array;
2904       if (!AllocateByteArray(0, TENURED)->To(&byte_array)) return false;
2905       set_empty_byte_array(byte_array);
2906     }
2907
2908 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)           \
2909     { ExternalArray* obj;                                                      \
2910       if (!AllocateEmptyExternalArray(kExternal##Type##Array)->To(&obj))       \
2911           return false;                                                        \
2912       set_empty_external_##type##_array(obj);                                  \
2913     }
2914
2915     TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2916 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2917   }
2918   ASSERT(!InNewSpace(empty_fixed_array()));
2919   return true;
2920 }
2921
2922
2923 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2924   // Statically ensure that it is safe to allocate heap numbers in paged
2925   // spaces.
2926   int size = HeapNumber::kSize;
2927   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2928
2929   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2930
2931   Object* result;
2932   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2933     if (!maybe_result->ToObject(&result)) return maybe_result;
2934   }
2935
2936   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2937   HeapNumber::cast(result)->set_value(value);
2938   return result;
2939 }
2940
2941
2942 MaybeObject* Heap::AllocateFloat32x4(float32x4_value_t value,
2943                                      PretenureFlag pretenure) {
2944   // Statically ensure that it is safe to allocate float32x4 objects in paged
2945   // spaces.
2946   int size = Float32x4::kSize;
2947   STATIC_ASSERT(Float32x4::kSize <= Page::kMaxRegularHeapObjectSize);
2948
2949   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2950
2951   Object* result;
2952   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2953     if (!maybe_result->ToObject(&result)) return maybe_result;
2954   }
2955
2956   HeapObject::cast(result)->set_map_no_write_barrier(float32x4_map());
2957   Float32x4::cast(result)->set_value(value);
2958   return result;
2959 }
2960
2961
2962 MaybeObject* Heap::AllocateInt32x4(int32x4_value_t value,
2963                                     PretenureFlag pretenure) {
2964   // Statically ensure that it is safe to allocate int32x4 objects in paged
2965   // spaces.
2966   int size = Int32x4::kSize;
2967   STATIC_ASSERT(Int32x4::kSize <= Page::kMaxRegularHeapObjectSize);
2968
2969   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2970
2971   Object* result;
2972   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2973     if (!maybe_result->ToObject(&result)) return maybe_result;
2974   }
2975
2976   HeapObject::cast(result)->set_map_no_write_barrier(int32x4_map());
2977   Int32x4::cast(result)->set_value(value);
2978   return result;
2979 }
2980
2981
2982 MaybeObject* Heap::AllocateCell(Object* value) {
2983   int size = Cell::kSize;
2984   STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2985
2986   Object* result;
2987   { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2988     if (!maybe_result->ToObject(&result)) return maybe_result;
2989   }
2990   HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2991   Cell::cast(result)->set_value(value);
2992   return result;
2993 }
2994
2995
2996 MaybeObject* Heap::AllocatePropertyCell() {
2997   int size = PropertyCell::kSize;
2998   STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2999
3000   Object* result;
3001   MaybeObject* maybe_result =
3002       AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
3003   if (!maybe_result->ToObject(&result)) return maybe_result;
3004
3005   HeapObject::cast(result)->set_map_no_write_barrier(
3006       global_property_cell_map());
3007   PropertyCell* cell = PropertyCell::cast(result);
3008   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
3009                            SKIP_WRITE_BARRIER);
3010   cell->set_value(the_hole_value());
3011   cell->set_type(HeapType::None());
3012   return result;
3013 }
3014
3015
3016 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
3017   Box* result;
3018   MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
3019   if (!maybe_result->To(&result)) return maybe_result;
3020   result->set_value(value);
3021   return result;
3022 }
3023
3024
3025 MaybeObject* Heap::AllocateAllocationSite() {
3026   AllocationSite* site;
3027   MaybeObject* maybe_result = Allocate(allocation_site_map(),
3028                                        OLD_POINTER_SPACE);
3029   if (!maybe_result->To(&site)) return maybe_result;
3030   site->Initialize();
3031
3032   // Link the site
3033   site->set_weak_next(allocation_sites_list());
3034   set_allocation_sites_list(site);
3035   return site;
3036 }
3037
3038
3039 MaybeObject* Heap::CreateOddball(const char* to_string,
3040                                  Object* to_number,
3041                                  byte kind) {
3042   Object* result;
3043   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
3044     if (!maybe_result->ToObject(&result)) return maybe_result;
3045   }
3046   return Oddball::cast(result)->Initialize(this, to_string, to_number, kind);
3047 }
3048
3049
3050 bool Heap::CreateApiObjects() {
3051   Object* obj;
3052
3053   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3054     if (!maybe_obj->ToObject(&obj)) return false;
3055   }
3056   // Don't use Smi-only elements optimizations for objects with the neander
3057   // map. There are too many cases where element values are set directly with a
3058   // bottleneck to trap the Smi-only -> fast elements transition, and there
3059   // appears to be no benefit for optimize this case.
3060   Map* new_neander_map = Map::cast(obj);
3061   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
3062   set_neander_map(new_neander_map);
3063
3064   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
3065     if (!maybe_obj->ToObject(&obj)) return false;
3066   }
3067   Object* elements;
3068   { MaybeObject* maybe_elements = AllocateFixedArray(2);
3069     if (!maybe_elements->ToObject(&elements)) return false;
3070   }
3071   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
3072   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
3073   set_message_listeners(JSObject::cast(obj));
3074
3075   return true;
3076 }
3077
3078
3079 void Heap::CreateJSEntryStub() {
3080   JSEntryStub stub;
3081   set_js_entry_code(*stub.GetCode(isolate()));
3082 }
3083
3084
3085 void Heap::CreateJSConstructEntryStub() {
3086   JSConstructEntryStub stub;
3087   set_js_construct_entry_code(*stub.GetCode(isolate()));
3088 }
3089
3090
3091 void Heap::CreateFixedStubs() {
3092   // Here we create roots for fixed stubs. They are needed at GC
3093   // for cooking and uncooking (check out frames.cc).
3094   // The eliminates the need for doing dictionary lookup in the
3095   // stub cache for these stubs.
3096   HandleScope scope(isolate());
3097   // gcc-4.4 has problem generating correct code of following snippet:
3098   // {  JSEntryStub stub;
3099   //    js_entry_code_ = *stub.GetCode();
3100   // }
3101   // {  JSConstructEntryStub stub;
3102   //    js_construct_entry_code_ = *stub.GetCode();
3103   // }
3104   // To workaround the problem, make separate functions without inlining.
3105   Heap::CreateJSEntryStub();
3106   Heap::CreateJSConstructEntryStub();
3107
3108   // Create stubs that should be there, so we don't unexpectedly have to
3109   // create them if we need them during the creation of another stub.
3110   // Stub creation mixes raw pointers and handles in an unsafe manner so
3111   // we cannot create stubs while we are creating stubs.
3112   CodeStub::GenerateStubsAheadOfTime(isolate());
3113 }
3114
3115
3116 bool Heap::CreateInitialObjects() {
3117   Object* obj;
3118
3119   // The -0 value must be set before NumberFromDouble works.
3120   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3121     if (!maybe_obj->ToObject(&obj)) return false;
3122   }
3123   set_minus_zero_value(HeapNumber::cast(obj));
3124   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3125
3126   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3127     if (!maybe_obj->ToObject(&obj)) return false;
3128   }
3129   set_nan_value(HeapNumber::cast(obj));
3130
3131   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3132     if (!maybe_obj->ToObject(&obj)) return false;
3133   }
3134   set_infinity_value(HeapNumber::cast(obj));
3135
3136   // The hole has not been created yet, but we want to put something
3137   // predictable in the gaps in the string table, so lets make that Smi zero.
3138   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3139
3140   // Allocate initial string table.
3141   { MaybeObject* maybe_obj =
3142         StringTable::Allocate(this, kInitialStringTableSize);
3143     if (!maybe_obj->ToObject(&obj)) return false;
3144   }
3145   // Don't use set_string_table() due to asserts.
3146   roots_[kStringTableRootIndex] = obj;
3147
3148   // Finish initializing oddballs after creating the string table.
3149   { MaybeObject* maybe_obj =
3150         undefined_value()->Initialize(this,
3151                                       "undefined",
3152                                       nan_value(),
3153                                       Oddball::kUndefined);
3154     if (!maybe_obj->ToObject(&obj)) return false;
3155   }
3156
3157   // Initialize the null_value.
3158   { MaybeObject* maybe_obj = null_value()->Initialize(
3159       this, "null", Smi::FromInt(0), Oddball::kNull);
3160     if (!maybe_obj->ToObject(&obj)) return false;
3161   }
3162
3163   { MaybeObject* maybe_obj = CreateOddball("true",
3164                                            Smi::FromInt(1),
3165                                            Oddball::kTrue);
3166     if (!maybe_obj->ToObject(&obj)) return false;
3167   }
3168   set_true_value(Oddball::cast(obj));
3169
3170   { MaybeObject* maybe_obj = CreateOddball("false",
3171                                            Smi::FromInt(0),
3172                                            Oddball::kFalse);
3173     if (!maybe_obj->ToObject(&obj)) return false;
3174   }
3175   set_false_value(Oddball::cast(obj));
3176
3177   { MaybeObject* maybe_obj = CreateOddball("hole",
3178                                            Smi::FromInt(-1),
3179                                            Oddball::kTheHole);
3180     if (!maybe_obj->ToObject(&obj)) return false;
3181   }
3182   set_the_hole_value(Oddball::cast(obj));
3183
3184   { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3185                                            Smi::FromInt(-1),
3186                                            Oddball::kUninitialized);
3187     if (!maybe_obj->ToObject(&obj)) return false;
3188   }
3189   set_uninitialized_value(Oddball::cast(obj));
3190
3191   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3192                                            Smi::FromInt(-4),
3193                                            Oddball::kArgumentMarker);
3194     if (!maybe_obj->ToObject(&obj)) return false;
3195   }
3196   set_arguments_marker(Oddball::cast(obj));
3197
3198   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3199                                            Smi::FromInt(-2),
3200                                            Oddball::kOther);
3201     if (!maybe_obj->ToObject(&obj)) return false;
3202   }
3203   set_no_interceptor_result_sentinel(obj);
3204
3205   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3206                                            Smi::FromInt(-3),
3207                                            Oddball::kOther);
3208     if (!maybe_obj->ToObject(&obj)) return false;
3209   }
3210   set_termination_exception(obj);
3211
3212   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3213     { MaybeObject* maybe_obj =
3214           InternalizeUtf8String(constant_string_table[i].contents);
3215       if (!maybe_obj->ToObject(&obj)) return false;
3216     }
3217     roots_[constant_string_table[i].index] = String::cast(obj);
3218   }
3219
3220   // Allocate the hidden string which is used to identify the hidden properties
3221   // in JSObjects. The hash code has a special value so that it will not match
3222   // the empty string when searching for the property. It cannot be part of the
3223   // loop above because it needs to be allocated manually with the special
3224   // hash code in place. The hash code for the hidden_string is zero to ensure
3225   // that it will always be at the first entry in property descriptors.
3226   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3227       OneByteVector("", 0), String::kEmptyStringHash);
3228     if (!maybe_obj->ToObject(&obj)) return false;
3229   }
3230   hidden_string_ = String::cast(obj);
3231
3232   // Allocate the code_stubs dictionary. The initial size is set to avoid
3233   // expanding the dictionary during bootstrapping.
3234   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3235     if (!maybe_obj->ToObject(&obj)) return false;
3236   }
3237   set_code_stubs(UnseededNumberDictionary::cast(obj));
3238
3239
3240   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3241   // is set to avoid expanding the dictionary during bootstrapping.
3242   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3243     if (!maybe_obj->ToObject(&obj)) return false;
3244   }
3245   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3246
3247   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3248     if (!maybe_obj->ToObject(&obj)) return false;
3249   }
3250   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3251
3252   set_instanceof_cache_function(Smi::FromInt(0));
3253   set_instanceof_cache_map(Smi::FromInt(0));
3254   set_instanceof_cache_answer(Smi::FromInt(0));
3255
3256   CreateFixedStubs();
3257
3258   // Allocate the dictionary of intrinsic function names.
3259   { MaybeObject* maybe_obj =
3260         NameDictionary::Allocate(this, Runtime::kNumFunctions);
3261     if (!maybe_obj->ToObject(&obj)) return false;
3262   }
3263   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3264                                                                        obj);
3265     if (!maybe_obj->ToObject(&obj)) return false;
3266   }
3267   set_intrinsic_function_names(NameDictionary::cast(obj));
3268
3269   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3270     if (!maybe_obj->ToObject(&obj)) return false;
3271   }
3272   set_number_string_cache(FixedArray::cast(obj));
3273
3274   // Allocate cache for single character one byte strings.
3275   { MaybeObject* maybe_obj =
3276         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3277     if (!maybe_obj->ToObject(&obj)) return false;
3278   }
3279   set_single_character_string_cache(FixedArray::cast(obj));
3280
3281   // Allocate cache for string split.
3282   { MaybeObject* maybe_obj = AllocateFixedArray(
3283       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3284     if (!maybe_obj->ToObject(&obj)) return false;
3285   }
3286   set_string_split_cache(FixedArray::cast(obj));
3287
3288   { MaybeObject* maybe_obj = AllocateFixedArray(
3289       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3290     if (!maybe_obj->ToObject(&obj)) return false;
3291   }
3292   set_regexp_multiple_cache(FixedArray::cast(obj));
3293
3294   // Allocate cache for external strings pointing to native source code.
3295   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3296     if (!maybe_obj->ToObject(&obj)) return false;
3297   }
3298   set_natives_source_cache(FixedArray::cast(obj));
3299
3300   { MaybeObject* maybe_obj = AllocateCell(undefined_value());
3301     if (!maybe_obj->ToObject(&obj)) return false;
3302   }
3303   set_undefined_cell(Cell::cast(obj));
3304
3305   // Allocate object to hold object observation state.
3306   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3307     if (!maybe_obj->ToObject(&obj)) return false;
3308   }
3309   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3310     if (!maybe_obj->ToObject(&obj)) return false;
3311   }
3312   set_observation_state(JSObject::cast(obj));
3313
3314   { MaybeObject* maybe_obj = AllocateSymbol();
3315     if (!maybe_obj->ToObject(&obj)) return false;
3316   }
3317   Symbol::cast(obj)->set_is_private(true);
3318   set_frozen_symbol(Symbol::cast(obj));
3319
3320   { MaybeObject* maybe_obj = AllocateSymbol();
3321     if (!maybe_obj->ToObject(&obj)) return false;
3322   }
3323   Symbol::cast(obj)->set_is_private(true);
3324   set_elements_transition_symbol(Symbol::cast(obj));
3325
3326   { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3327     if (!maybe_obj->ToObject(&obj)) return false;
3328   }
3329   SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3330   set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3331
3332   { MaybeObject* maybe_obj = AllocateSymbol();
3333     if (!maybe_obj->ToObject(&obj)) return false;
3334   }
3335   Symbol::cast(obj)->set_is_private(true);
3336   set_observed_symbol(Symbol::cast(obj));
3337
3338   { MaybeObject* maybe_obj = AllocateFixedArray(0, TENURED);
3339     if (!maybe_obj->ToObject(&obj)) return false;
3340   }
3341   set_materialized_objects(FixedArray::cast(obj));
3342
3343   // Handling of script id generation is in Factory::NewScript.
3344   set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3345
3346   { MaybeObject* maybe_obj = AllocateAllocationSitesScratchpad();
3347     if (!maybe_obj->ToObject(&obj)) return false;
3348   }
3349   set_allocation_sites_scratchpad(FixedArray::cast(obj));
3350   InitializeAllocationSitesScratchpad();
3351
3352   // Initialize keyed lookup cache.
3353   isolate_->keyed_lookup_cache()->Clear();
3354
3355   // Initialize context slot cache.
3356   isolate_->context_slot_cache()->Clear();
3357
3358   // Initialize descriptor cache.
3359   isolate_->descriptor_lookup_cache()->Clear();
3360
3361   // Initialize compilation cache.
3362   isolate_->compilation_cache()->Clear();
3363
3364   return true;
3365 }
3366
3367
3368 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3369   RootListIndex writable_roots[] = {
3370     kStoreBufferTopRootIndex,
3371     kStackLimitRootIndex,
3372     kNumberStringCacheRootIndex,
3373     kInstanceofCacheFunctionRootIndex,
3374     kInstanceofCacheMapRootIndex,
3375     kInstanceofCacheAnswerRootIndex,
3376     kCodeStubsRootIndex,
3377     kNonMonomorphicCacheRootIndex,
3378     kPolymorphicCodeCacheRootIndex,
3379     kLastScriptIdRootIndex,
3380     kEmptyScriptRootIndex,
3381     kRealStackLimitRootIndex,
3382     kArgumentsAdaptorDeoptPCOffsetRootIndex,
3383     kConstructStubDeoptPCOffsetRootIndex,
3384     kGetterStubDeoptPCOffsetRootIndex,
3385     kSetterStubDeoptPCOffsetRootIndex,
3386     kStringTableRootIndex,
3387   };
3388
3389   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3390     if (root_index == writable_roots[i])
3391       return true;
3392   }
3393   return false;
3394 }
3395
3396
3397 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
3398   return !RootCanBeWrittenAfterInitialization(root_index) &&
3399       !InNewSpace(roots_array_start()[root_index]);
3400 }
3401
3402
3403 Object* RegExpResultsCache::Lookup(Heap* heap,
3404                                    String* key_string,
3405                                    Object* key_pattern,
3406                                    ResultsCacheType type) {
3407   FixedArray* cache;
3408   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3409   if (type == STRING_SPLIT_SUBSTRINGS) {
3410     ASSERT(key_pattern->IsString());
3411     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3412     cache = heap->string_split_cache();
3413   } else {
3414     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3415     ASSERT(key_pattern->IsFixedArray());
3416     cache = heap->regexp_multiple_cache();
3417   }
3418
3419   uint32_t hash = key_string->Hash();
3420   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3421       ~(kArrayEntriesPerCacheEntry - 1));
3422   if (cache->get(index + kStringOffset) == key_string &&
3423       cache->get(index + kPatternOffset) == key_pattern) {
3424     return cache->get(index + kArrayOffset);
3425   }
3426   index =
3427       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3428   if (cache->get(index + kStringOffset) == key_string &&
3429       cache->get(index + kPatternOffset) == key_pattern) {
3430     return cache->get(index + kArrayOffset);
3431   }
3432   return Smi::FromInt(0);
3433 }
3434
3435
3436 void RegExpResultsCache::Enter(Heap* heap,
3437                                String* key_string,
3438                                Object* key_pattern,
3439                                FixedArray* value_array,
3440                                ResultsCacheType type) {
3441   FixedArray* cache;
3442   if (!key_string->IsInternalizedString()) return;
3443   if (type == STRING_SPLIT_SUBSTRINGS) {
3444     ASSERT(key_pattern->IsString());
3445     if (!key_pattern->IsInternalizedString()) return;
3446     cache = heap->string_split_cache();
3447   } else {
3448     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3449     ASSERT(key_pattern->IsFixedArray());
3450     cache = heap->regexp_multiple_cache();
3451   }
3452
3453   uint32_t hash = key_string->Hash();
3454   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3455       ~(kArrayEntriesPerCacheEntry - 1));
3456   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3457     cache->set(index + kStringOffset, key_string);
3458     cache->set(index + kPatternOffset, key_pattern);
3459     cache->set(index + kArrayOffset, value_array);
3460   } else {
3461     uint32_t index2 =
3462         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3463     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3464       cache->set(index2 + kStringOffset, key_string);
3465       cache->set(index2 + kPatternOffset, key_pattern);
3466       cache->set(index2 + kArrayOffset, value_array);
3467     } else {
3468       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3469       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3470       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3471       cache->set(index + kStringOffset, key_string);
3472       cache->set(index + kPatternOffset, key_pattern);
3473       cache->set(index + kArrayOffset, value_array);
3474     }
3475   }
3476   // If the array is a reasonably short list of substrings, convert it into a
3477   // list of internalized strings.
3478   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3479     for (int i = 0; i < value_array->length(); i++) {
3480       String* str = String::cast(value_array->get(i));
3481       Object* internalized_str;
3482       MaybeObject* maybe_string = heap->InternalizeString(str);
3483       if (maybe_string->ToObject(&internalized_str)) {
3484         value_array->set(i, internalized_str);
3485       }
3486     }
3487   }
3488   // Convert backing store to a copy-on-write array.
3489   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3490 }
3491
3492
3493 void RegExpResultsCache::Clear(FixedArray* cache) {
3494   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3495     cache->set(i, Smi::FromInt(0));
3496   }
3497 }
3498
3499
3500 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3501   MaybeObject* maybe_obj =
3502       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3503   return maybe_obj;
3504 }
3505
3506
3507 int Heap::FullSizeNumberStringCacheLength() {
3508   // Compute the size of the number string cache based on the max newspace size.
3509   // The number string cache has a minimum size based on twice the initial cache
3510   // size to ensure that it is bigger after being made 'full size'.
3511   int number_string_cache_size = max_semispace_size_ / 512;
3512   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3513                                  Min(0x4000, number_string_cache_size));
3514   // There is a string and a number per entry so the length is twice the number
3515   // of entries.
3516   return number_string_cache_size * 2;
3517 }
3518
3519
3520 void Heap::AllocateFullSizeNumberStringCache() {
3521   // The idea is to have a small number string cache in the snapshot to keep
3522   // boot-time memory usage down.  If we expand the number string cache already
3523   // while creating the snapshot then that didn't work out.
3524   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3525   MaybeObject* maybe_obj =
3526       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3527   Object* new_cache;
3528   if (maybe_obj->ToObject(&new_cache)) {
3529     // We don't bother to repopulate the cache with entries from the old cache.
3530     // It will be repopulated soon enough with new strings.
3531     set_number_string_cache(FixedArray::cast(new_cache));
3532   }
3533   // If allocation fails then we just return without doing anything.  It is only
3534   // a cache, so best effort is OK here.
3535 }
3536
3537
3538 void Heap::FlushNumberStringCache() {
3539   // Flush the number to string cache.
3540   int len = number_string_cache()->length();
3541   for (int i = 0; i < len; i++) {
3542     number_string_cache()->set_undefined(i);
3543   }
3544 }
3545
3546
3547 static inline int double_get_hash(double d) {
3548   DoubleRepresentation rep(d);
3549   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3550 }
3551
3552
3553 static inline int smi_get_hash(Smi* smi) {
3554   return smi->value();
3555 }
3556
3557
3558 Object* Heap::GetNumberStringCache(Object* number) {
3559   int hash;
3560   int mask = (number_string_cache()->length() >> 1) - 1;
3561   if (number->IsSmi()) {
3562     hash = smi_get_hash(Smi::cast(number)) & mask;
3563   } else {
3564     hash = double_get_hash(number->Number()) & mask;
3565   }
3566   Object* key = number_string_cache()->get(hash * 2);
3567   if (key == number) {
3568     return String::cast(number_string_cache()->get(hash * 2 + 1));
3569   } else if (key->IsHeapNumber() &&
3570              number->IsHeapNumber() &&
3571              key->Number() == number->Number()) {
3572     return String::cast(number_string_cache()->get(hash * 2 + 1));
3573   }
3574   return undefined_value();
3575 }
3576
3577
3578 void Heap::SetNumberStringCache(Object* number, String* string) {
3579   int hash;
3580   int mask = (number_string_cache()->length() >> 1) - 1;
3581   if (number->IsSmi()) {
3582     hash = smi_get_hash(Smi::cast(number)) & mask;
3583   } else {
3584     hash = double_get_hash(number->Number()) & mask;
3585   }
3586   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3587       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3588     // The first time we have a hash collision, we move to the full sized
3589     // number string cache.
3590     AllocateFullSizeNumberStringCache();
3591     return;
3592   }
3593   number_string_cache()->set(hash * 2, number);
3594   number_string_cache()->set(hash * 2 + 1, string);
3595 }
3596
3597
3598 MaybeObject* Heap::NumberToString(Object* number,
3599                                   bool check_number_string_cache) {
3600   isolate_->counters()->number_to_string_runtime()->Increment();
3601   if (check_number_string_cache) {
3602     Object* cached = GetNumberStringCache(number);
3603     if (cached != undefined_value()) {
3604       return cached;
3605     }
3606   }
3607
3608   char arr[100];
3609   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3610   const char* str;
3611   if (number->IsSmi()) {
3612     int num = Smi::cast(number)->value();
3613     str = IntToCString(num, buffer);
3614   } else {
3615     double num = HeapNumber::cast(number)->value();
3616     str = DoubleToCString(num, buffer);
3617   }
3618
3619   Object* js_string;
3620
3621   // We tenure the allocated string since it is referenced from the
3622   // number-string cache which lives in the old space.
3623   MaybeObject* maybe_js_string =
3624       AllocateStringFromOneByte(CStrVector(str), TENURED);
3625   if (maybe_js_string->ToObject(&js_string)) {
3626     SetNumberStringCache(number, String::cast(js_string));
3627   }
3628   return maybe_js_string;
3629 }
3630
3631
3632 MaybeObject* Heap::Uint32ToString(uint32_t value,
3633                                   bool check_number_string_cache) {
3634   Object* number;
3635   MaybeObject* maybe = NumberFromUint32(value);
3636   if (!maybe->To<Object>(&number)) return maybe;
3637   return NumberToString(number, check_number_string_cache);
3638 }
3639
3640
3641 MaybeObject* Heap::AllocateAllocationSitesScratchpad() {
3642   MaybeObject* maybe_obj =
3643       AllocateFixedArray(kAllocationSiteScratchpadSize, TENURED);
3644   return maybe_obj;
3645 }
3646
3647
3648 void Heap::FlushAllocationSitesScratchpad() {
3649   for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
3650     allocation_sites_scratchpad()->set_undefined(i);
3651   }
3652   allocation_sites_scratchpad_length_ = 0;
3653 }
3654
3655
3656 void Heap::InitializeAllocationSitesScratchpad() {
3657   ASSERT(allocation_sites_scratchpad()->length() ==
3658          kAllocationSiteScratchpadSize);
3659   for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
3660     allocation_sites_scratchpad()->set_undefined(i);
3661   }
3662 }
3663
3664
3665 void Heap::AddAllocationSiteToScratchpad(AllocationSite* site) {
3666   if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
3667     allocation_sites_scratchpad()->set(
3668         allocation_sites_scratchpad_length_, site);
3669     allocation_sites_scratchpad_length_++;
3670   }
3671 }
3672
3673
3674 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3675   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3676 }
3677
3678
3679 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3680     ExternalArrayType array_type) {
3681   switch (array_type) {
3682 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)               \
3683     case kExternal##Type##Array:                                              \
3684       return kExternal##Type##ArrayMapRootIndex;
3685
3686     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3687 #undef ARRAY_TYPE_TO_ROOT_INDEX
3688
3689     default:
3690       UNREACHABLE();
3691       return kUndefinedValueRootIndex;
3692   }
3693 }
3694
3695
3696 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3697   return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3698 }
3699
3700
3701 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3702     ExternalArrayType array_type) {
3703   switch (array_type) {
3704 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)               \
3705     case kExternal##Type##Array:                                              \
3706       return kFixed##Type##ArrayMapRootIndex;
3707
3708     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3709 #undef ARRAY_TYPE_TO_ROOT_INDEX
3710
3711     default:
3712       UNREACHABLE();
3713       return kUndefinedValueRootIndex;
3714   }
3715 }
3716
3717
3718 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3719     ElementsKind elementsKind) {
3720   switch (elementsKind) {
3721 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)             \
3722     case EXTERNAL_##TYPE##_ELEMENTS:                                          \
3723       return kEmptyExternal##Type##ArrayRootIndex;
3724
3725     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3726 #undef ELEMENT_KIND_TO_ROOT_INDEX
3727
3728     default:
3729       UNREACHABLE();
3730       return kUndefinedValueRootIndex;
3731   }
3732 }
3733
3734
3735 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3736   return ExternalArray::cast(
3737       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3738 }
3739
3740
3741 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3742   // We need to distinguish the minus zero value and this cannot be
3743   // done after conversion to int. Doing this by comparing bit
3744   // patterns is faster than using fpclassify() et al.
3745   if (IsMinusZero(value)) {
3746     return AllocateHeapNumber(-0.0, pretenure);
3747   }
3748
3749   int int_value = FastD2I(value);
3750   if (value == int_value && Smi::IsValid(int_value)) {
3751     return Smi::FromInt(int_value);
3752   }
3753
3754   // Materialize the value in the heap.
3755   return AllocateHeapNumber(value, pretenure);
3756 }
3757
3758
3759 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3760   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3761   STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3762   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3763   Foreign* result;
3764   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3765   if (!maybe_result->To(&result)) return maybe_result;
3766   result->set_foreign_address(address);
3767   return result;
3768 }
3769
3770
3771 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3772   SharedFunctionInfo* share;
3773   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3774   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3775
3776   // Set pointer fields.
3777   share->set_name(name);
3778   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3779   share->set_code(illegal);
3780   share->set_optimized_code_map(Smi::FromInt(0));
3781   share->set_scope_info(ScopeInfo::Empty(isolate_));
3782   Code* construct_stub =
3783       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3784   share->set_construct_stub(construct_stub);
3785   share->set_instance_class_name(Object_string());
3786   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3787   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3788   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3789   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3790   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3791   share->set_ast_node_count(0);
3792   share->set_counters(0);
3793
3794   // Set integer fields (smi or int, depending on the architecture).
3795   share->set_length(0);
3796   share->set_formal_parameter_count(0);
3797   share->set_expected_nof_properties(0);
3798   share->set_num_literals(0);
3799   share->set_start_position_and_type(0);
3800   share->set_end_position(0);
3801   share->set_function_token_position(0);
3802   // All compiler hints default to false or 0.
3803   share->set_compiler_hints(0);
3804   share->set_opt_count_and_bailout_reason(0);
3805
3806   return share;
3807 }
3808
3809
3810 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3811                                            JSArray* arguments,
3812                                            int start_position,
3813                                            int end_position,
3814                                            Object* script,
3815                                            Object* stack_trace,
3816                                            Object* stack_frames) {
3817   Object* result;
3818   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3819     if (!maybe_result->ToObject(&result)) return maybe_result;
3820   }
3821   JSMessageObject* message = JSMessageObject::cast(result);
3822   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3823   message->initialize_elements();
3824   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3825   message->set_type(type);
3826   message->set_arguments(arguments);
3827   message->set_start_position(start_position);
3828   message->set_end_position(end_position);
3829   message->set_script(script);
3830   message->set_stack_trace(stack_trace);
3831   message->set_stack_frames(stack_frames);
3832   return result;
3833 }
3834
3835
3836 MaybeObject* Heap::AllocateExternalStringFromAscii(
3837     const ExternalAsciiString::Resource* resource) {
3838   size_t length = resource->length();
3839   if (length > static_cast<size_t>(String::kMaxLength)) {
3840     isolate()->context()->mark_out_of_memory();
3841     return Failure::OutOfMemoryException(0x5);
3842   }
3843
3844   Map* map = external_ascii_string_map();
3845   Object* result;
3846   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3847     if (!maybe_result->ToObject(&result)) return maybe_result;
3848   }
3849
3850   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3851   external_string->set_length(static_cast<int>(length));
3852   external_string->set_hash_field(String::kEmptyHashField);
3853   external_string->set_resource(resource);
3854
3855   return result;
3856 }
3857
3858
3859 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3860     const ExternalTwoByteString::Resource* resource) {
3861   size_t length = resource->length();
3862   if (length > static_cast<size_t>(String::kMaxLength)) {
3863     isolate()->context()->mark_out_of_memory();
3864     return Failure::OutOfMemoryException(0x6);
3865   }
3866
3867   // For small strings we check whether the resource contains only
3868   // one byte characters.  If yes, we use a different string map.
3869   static const size_t kOneByteCheckLengthLimit = 32;
3870   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3871       String::IsOneByte(resource->data(), static_cast<int>(length));
3872   Map* map = is_one_byte ?
3873       external_string_with_one_byte_data_map() : external_string_map();
3874   Object* result;
3875   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3876     if (!maybe_result->ToObject(&result)) return maybe_result;
3877   }
3878
3879   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3880   external_string->set_length(static_cast<int>(length));
3881   external_string->set_hash_field(String::kEmptyHashField);
3882   external_string->set_resource(resource);
3883
3884   return result;
3885 }
3886
3887
3888 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3889   if (code <= String::kMaxOneByteCharCode) {
3890     Object* value = single_character_string_cache()->get(code);
3891     if (value != undefined_value()) return value;
3892
3893     uint8_t buffer[1];
3894     buffer[0] = static_cast<uint8_t>(code);
3895     Object* result;
3896     OneByteStringKey key(Vector<const uint8_t>(buffer, 1), HashSeed());
3897     MaybeObject* maybe_result = InternalizeStringWithKey(&key);
3898
3899     if (!maybe_result->ToObject(&result)) return maybe_result;
3900     single_character_string_cache()->set(code, result);
3901     return result;
3902   }
3903
3904   SeqTwoByteString* result;
3905   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3906     if (!maybe_result->To<SeqTwoByteString>(&result)) return maybe_result;
3907   }
3908   result->SeqTwoByteStringSet(0, code);
3909   return result;
3910 }
3911
3912
3913 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3914   if (length < 0 || length > ByteArray::kMaxLength) {
3915     return Failure::OutOfMemoryException(0x7);
3916   }
3917   int size = ByteArray::SizeFor(length);
3918   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3919   Object* result;
3920   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3921     if (!maybe_result->ToObject(&result)) return maybe_result;
3922   }
3923
3924   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3925       byte_array_map());
3926   reinterpret_cast<ByteArray*>(result)->set_length(length);
3927   return result;
3928 }
3929
3930
3931 void Heap::CreateFillerObjectAt(Address addr, int size) {
3932   if (size == 0) return;
3933   HeapObject* filler = HeapObject::FromAddress(addr);
3934   if (size == kPointerSize) {
3935     filler->set_map_no_write_barrier(one_pointer_filler_map());
3936   } else if (size == 2 * kPointerSize) {
3937     filler->set_map_no_write_barrier(two_pointer_filler_map());
3938   } else {
3939     filler->set_map_no_write_barrier(free_space_map());
3940     FreeSpace::cast(filler)->set_size(size);
3941   }
3942 }
3943
3944
3945 MaybeObject* Heap::AllocateExternalArray(int length,
3946                                          ExternalArrayType array_type,
3947                                          void* external_pointer,
3948                                          PretenureFlag pretenure) {
3949   int size = ExternalArray::kAlignedSize;
3950   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3951   Object* result;
3952   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3953     if (!maybe_result->ToObject(&result)) return maybe_result;
3954   }
3955
3956   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3957       MapForExternalArrayType(array_type));
3958   reinterpret_cast<ExternalArray*>(result)->set_length(length);
3959   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3960       external_pointer);
3961
3962   return result;
3963 }
3964
3965 static void ForFixedTypedArray(ExternalArrayType array_type,
3966                                int* element_size,
3967                                ElementsKind* element_kind) {
3968   switch (array_type) {
3969 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)                       \
3970     case kExternal##Type##Array:                                              \
3971       *element_size = size;                                                   \
3972       *element_kind = TYPE##_ELEMENTS;                                        \
3973       return;
3974
3975     TYPED_ARRAYS(TYPED_ARRAY_CASE)
3976 #undef TYPED_ARRAY_CASE
3977
3978     default:
3979       *element_size = 0;  // Bogus
3980       *element_kind = UINT8_ELEMENTS;  // Bogus
3981       UNREACHABLE();
3982   }
3983 }
3984
3985
3986 MaybeObject* Heap::AllocateFixedTypedArray(int length,
3987                                            ExternalArrayType array_type,
3988                                            PretenureFlag pretenure) {
3989   int element_size;
3990   ElementsKind elements_kind;
3991   ForFixedTypedArray(array_type, &element_size, &elements_kind);
3992   int size = OBJECT_POINTER_ALIGN(
3993       length * element_size + FixedTypedArrayBase::kDataOffset);
3994 #ifndef V8_HOST_ARCH_64_BIT
3995   if (array_type == kExternalFloat64Array) {
3996     size += kPointerSize;
3997   }
3998 #endif
3999   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4000
4001   HeapObject* object;
4002   MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
4003   if (!maybe_object->To(&object)) return maybe_object;
4004
4005   if (array_type == kExternalFloat64Array) {
4006     object = EnsureDoubleAligned(this, object, size);
4007   }
4008
4009   FixedTypedArrayBase* elements =
4010       reinterpret_cast<FixedTypedArrayBase*>(object);
4011   elements->set_map(MapForFixedTypedArray(array_type));
4012   elements->set_length(length);
4013   return elements;
4014 }
4015
4016
4017 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4018                               Code::Flags flags,
4019                               Handle<Object> self_reference,
4020                               bool immovable,
4021                               bool crankshafted,
4022                               int prologue_offset) {
4023   // Allocate ByteArray before the Code object, so that we do not risk
4024   // leaving uninitialized Code object (and breaking the heap).
4025   ByteArray* reloc_info;
4026   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4027   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4028
4029   // Compute size.
4030   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4031   int obj_size = Code::SizeFor(body_size);
4032   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4033   MaybeObject* maybe_result;
4034   // Large code objects and code objects which should stay at a fixed address
4035   // are allocated in large object space.
4036   HeapObject* result;
4037   bool force_lo_space = obj_size > code_space()->AreaSize();
4038   if (force_lo_space) {
4039     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4040   } else {
4041     maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
4042   }
4043   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4044
4045   if (immovable && !force_lo_space &&
4046       // Objects on the first page of each space are never moved.
4047       !code_space_->FirstPage()->Contains(result->address())) {
4048     // Discard the first code allocation, which was on a page where it could be
4049     // moved.
4050     CreateFillerObjectAt(result->address(), obj_size);
4051     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4052     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4053   }
4054
4055   // Initialize the object
4056   result->set_map_no_write_barrier(code_map());
4057   Code* code = Code::cast(result);
4058   ASSERT(!isolate_->code_range()->exists() ||
4059       isolate_->code_range()->contains(code->address()));
4060   code->set_instruction_size(desc.instr_size);
4061   code->set_relocation_info(reloc_info);
4062   code->set_flags(flags);
4063   code->set_raw_kind_specific_flags1(0);
4064   code->set_raw_kind_specific_flags2(0);
4065   code->set_is_crankshafted(crankshafted);
4066   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4067   code->set_raw_type_feedback_info(undefined_value());
4068   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4069   code->set_gc_metadata(Smi::FromInt(0));
4070   code->set_ic_age(global_ic_age_);
4071   code->set_prologue_offset(prologue_offset);
4072   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4073     code->set_marked_for_deoptimization(false);
4074   }
4075   code->set_constant_pool(empty_constant_pool_array());
4076
4077 #ifdef ENABLE_DEBUGGER_SUPPORT
4078   if (code->kind() == Code::FUNCTION) {
4079     code->set_has_debug_break_slots(
4080         isolate_->debugger()->IsDebuggerActive());
4081   }
4082 #endif
4083
4084   // Allow self references to created code object by patching the handle to
4085   // point to the newly allocated Code object.
4086   if (!self_reference.is_null()) {
4087     *(self_reference.location()) = code;
4088   }
4089   // Migrate generated code.
4090   // The generated code can contain Object** values (typically from handles)
4091   // that are dereferenced during the copy to point directly to the actual heap
4092   // objects. These pointers can include references to the code object itself,
4093   // through the self_reference parameter.
4094   code->CopyFrom(desc);
4095
4096 #ifdef VERIFY_HEAP
4097   if (FLAG_verify_heap) {
4098     code->Verify();
4099   }
4100 #endif
4101   return code;
4102 }
4103
4104
4105 MaybeObject* Heap::CopyCode(Code* code) {
4106   // Allocate an object the same size as the code object.
4107   int obj_size = code->Size();
4108   MaybeObject* maybe_result;
4109   if (obj_size > code_space()->AreaSize()) {
4110     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4111   } else {
4112     maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
4113   }
4114
4115   Object* result;
4116   if (!maybe_result->ToObject(&result)) return maybe_result;
4117
4118   // Copy code object.
4119   Address old_addr = code->address();
4120   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4121   CopyBlock(new_addr, old_addr, obj_size);
4122   // Relocate the copy.
4123   Code* new_code = Code::cast(result);
4124   ASSERT(!isolate_->code_range()->exists() ||
4125       isolate_->code_range()->contains(code->address()));
4126   new_code->Relocate(new_addr - old_addr);
4127   return new_code;
4128 }
4129
4130
4131 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4132   // Allocate ByteArray before the Code object, so that we do not risk
4133   // leaving uninitialized Code object (and breaking the heap).
4134   Object* reloc_info_array;
4135   { MaybeObject* maybe_reloc_info_array =
4136         AllocateByteArray(reloc_info.length(), TENURED);
4137     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4138       return maybe_reloc_info_array;
4139     }
4140   }
4141
4142   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4143
4144   int new_obj_size = Code::SizeFor(new_body_size);
4145
4146   Address old_addr = code->address();
4147
4148   size_t relocation_offset =
4149       static_cast<size_t>(code->instruction_end() - old_addr);
4150
4151   MaybeObject* maybe_result;
4152   if (new_obj_size > code_space()->AreaSize()) {
4153     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4154   } else {
4155     maybe_result = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
4156   }
4157
4158   Object* result;
4159   if (!maybe_result->ToObject(&result)) return maybe_result;
4160
4161   // Copy code object.
4162   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4163
4164   // Copy header and instructions.
4165   CopyBytes(new_addr, old_addr, relocation_offset);
4166
4167   Code* new_code = Code::cast(result);
4168   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4169
4170   // Copy patched rinfo.
4171   CopyBytes(new_code->relocation_start(),
4172             reloc_info.start(),
4173             static_cast<size_t>(reloc_info.length()));
4174
4175   // Relocate the copy.
4176   ASSERT(!isolate_->code_range()->exists() ||
4177       isolate_->code_range()->contains(code->address()));
4178   new_code->Relocate(new_addr - old_addr);
4179
4180 #ifdef VERIFY_HEAP
4181   if (FLAG_verify_heap) {
4182     code->Verify();
4183   }
4184 #endif
4185   return new_code;
4186 }
4187
4188
4189 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
4190                                        AllocationSite* allocation_site) {
4191   memento->set_map_no_write_barrier(allocation_memento_map());
4192   ASSERT(allocation_site->map() == allocation_site_map());
4193   memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
4194   if (FLAG_allocation_site_pretenuring) {
4195     allocation_site->IncrementMementoCreateCount();
4196   }
4197 }
4198
4199
4200 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4201     Handle<AllocationSite> allocation_site) {
4202   ASSERT(gc_state_ == NOT_IN_GC);
4203   ASSERT(map->instance_type() != MAP_TYPE);
4204   // If allocation failures are disallowed, we may allocate in a different
4205   // space when new space is full and the object is not a large object.
4206   AllocationSpace retry_space =
4207       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4208   int size = map->instance_size() + AllocationMemento::kSize;
4209   Object* result;
4210   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4211   if (!maybe_result->ToObject(&result)) return maybe_result;
4212   // No need for write barrier since object is white and map is in old space.
4213   HeapObject::cast(result)->set_map_no_write_barrier(map);
4214   AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4215       reinterpret_cast<Address>(result) + map->instance_size());
4216   InitializeAllocationMemento(alloc_memento, *allocation_site);
4217   return result;
4218 }
4219
4220
4221 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4222   ASSERT(gc_state_ == NOT_IN_GC);
4223   ASSERT(map->instance_type() != MAP_TYPE);
4224   // If allocation failures are disallowed, we may allocate in a different
4225   // space when new space is full and the object is not a large object.
4226   AllocationSpace retry_space =
4227       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4228   int size = map->instance_size();
4229   Object* result;
4230   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4231   if (!maybe_result->ToObject(&result)) return maybe_result;
4232   // No need for write barrier since object is white and map is in old space.
4233   HeapObject::cast(result)->set_map_no_write_barrier(map);
4234   return result;
4235 }
4236
4237
4238 void Heap::InitializeFunction(JSFunction* function,
4239                               SharedFunctionInfo* shared,
4240                               Object* prototype) {
4241   ASSERT(!prototype->IsMap());
4242   function->initialize_properties();
4243   function->initialize_elements();
4244   function->set_shared(shared);
4245   function->set_code(shared->code());
4246   function->set_prototype_or_initial_map(prototype);
4247   function->set_context(undefined_value());
4248   function->set_literals_or_bindings(empty_fixed_array());
4249   function->set_next_function_link(undefined_value());
4250 }
4251
4252
4253 MaybeObject* Heap::AllocateFunction(Map* function_map,
4254                                     SharedFunctionInfo* shared,
4255                                     Object* prototype,
4256                                     PretenureFlag pretenure) {
4257   AllocationSpace space =
4258       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4259   Object* result;
4260   { MaybeObject* maybe_result = Allocate(function_map, space);
4261     if (!maybe_result->ToObject(&result)) return maybe_result;
4262   }
4263   InitializeFunction(JSFunction::cast(result), shared, prototype);
4264   return result;
4265 }
4266
4267
4268 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4269   // To get fast allocation and map sharing for arguments objects we
4270   // allocate them based on an arguments boilerplate.
4271
4272   JSObject* boilerplate;
4273   int arguments_object_size;
4274   bool strict_mode_callee = callee->IsJSFunction() &&
4275       !JSFunction::cast(callee)->shared()->is_classic_mode();
4276   if (strict_mode_callee) {
4277     boilerplate =
4278         isolate()->context()->native_context()->
4279             strict_mode_arguments_boilerplate();
4280     arguments_object_size = kArgumentsObjectSizeStrict;
4281   } else {
4282     boilerplate =
4283         isolate()->context()->native_context()->arguments_boilerplate();
4284     arguments_object_size = kArgumentsObjectSize;
4285   }
4286
4287   // Check that the size of the boilerplate matches our
4288   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4289   // on the size being a known constant.
4290   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4291
4292   // Do the allocation.
4293   Object* result;
4294   { MaybeObject* maybe_result =
4295         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4296     if (!maybe_result->ToObject(&result)) return maybe_result;
4297   }
4298
4299   // Copy the content. The arguments boilerplate doesn't have any
4300   // fields that point to new space so it's safe to skip the write
4301   // barrier here.
4302   CopyBlock(HeapObject::cast(result)->address(),
4303             boilerplate->address(),
4304             JSObject::kHeaderSize);
4305
4306   // Set the length property.
4307   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4308                                                 Smi::FromInt(length),
4309                                                 SKIP_WRITE_BARRIER);
4310   // Set the callee property for non-strict mode arguments object only.
4311   if (!strict_mode_callee) {
4312     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4313                                                   callee);
4314   }
4315
4316   // Check the state of the object
4317   ASSERT(JSObject::cast(result)->HasFastProperties());
4318   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4319
4320   return result;
4321 }
4322
4323
4324 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4325                                      FixedArray* properties,
4326                                      Map* map) {
4327   obj->set_properties(properties);
4328   obj->initialize_elements();
4329   // TODO(1240798): Initialize the object's body using valid initial values
4330   // according to the object's initial map.  For example, if the map's
4331   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4332   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4333   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4334   // verification code has to cope with (temporarily) invalid objects.  See
4335   // for example, JSArray::JSArrayVerify).
4336   Object* filler;
4337   // We cannot always fill with one_pointer_filler_map because objects
4338   // created from API functions expect their internal fields to be initialized
4339   // with undefined_value.
4340   // Pre-allocated fields need to be initialized with undefined_value as well
4341   // so that object accesses before the constructor completes (e.g. in the
4342   // debugger) will not cause a crash.
4343   if (map->constructor()->IsJSFunction() &&
4344       JSFunction::cast(map->constructor())->shared()->
4345           IsInobjectSlackTrackingInProgress()) {
4346     // We might want to shrink the object later.
4347     ASSERT(obj->GetInternalFieldCount() == 0);
4348     filler = Heap::one_pointer_filler_map();
4349   } else {
4350     filler = Heap::undefined_value();
4351   }
4352   obj->InitializeBody(map, Heap::undefined_value(), filler);
4353 }
4354
4355
4356 MaybeObject* Heap::AllocateJSObjectFromMap(
4357     Map* map, PretenureFlag pretenure, bool allocate_properties) {
4358   // JSFunctions should be allocated using AllocateFunction to be
4359   // properly initialized.
4360   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4361
4362   // Both types of global objects should be allocated using
4363   // AllocateGlobalObject to be properly initialized.
4364   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4365   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4366
4367   // Allocate the backing storage for the properties.
4368   FixedArray* properties;
4369   if (allocate_properties) {
4370     int prop_size = map->InitialPropertiesLength();
4371     ASSERT(prop_size >= 0);
4372     { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4373       if (!maybe_properties->To(&properties)) return maybe_properties;
4374     }
4375   } else {
4376     properties = empty_fixed_array();
4377   }
4378
4379   // Allocate the JSObject.
4380   int size = map->instance_size();
4381   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4382   Object* obj;
4383   MaybeObject* maybe_obj = Allocate(map, space);
4384   if (!maybe_obj->To(&obj)) return maybe_obj;
4385
4386   // Initialize the JSObject.
4387   InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4388   ASSERT(JSObject::cast(obj)->HasFastElements() ||
4389          JSObject::cast(obj)->HasExternalArrayElements());
4390   return obj;
4391 }
4392
4393
4394 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
4395     Map* map, Handle<AllocationSite> allocation_site) {
4396   // JSFunctions should be allocated using AllocateFunction to be
4397   // properly initialized.
4398   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4399
4400   // Both types of global objects should be allocated using
4401   // AllocateGlobalObject to be properly initialized.
4402   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4403   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4404
4405   // Allocate the backing storage for the properties.
4406   int prop_size = map->InitialPropertiesLength();
4407   ASSERT(prop_size >= 0);
4408   FixedArray* properties;
4409   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4410     if (!maybe_properties->To(&properties)) return maybe_properties;
4411   }
4412
4413   // Allocate the JSObject.
4414   int size = map->instance_size();
4415   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
4416   Object* obj;
4417   MaybeObject* maybe_obj =
4418       AllocateWithAllocationSite(map, space, allocation_site);
4419   if (!maybe_obj->To(&obj)) return maybe_obj;
4420
4421   // Initialize the JSObject.
4422   InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4423   ASSERT(JSObject::cast(obj)->HasFastElements());
4424   return obj;
4425 }
4426
4427
4428 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4429                                     PretenureFlag pretenure) {
4430   ASSERT(constructor->has_initial_map());
4431   // Allocate the object based on the constructors initial map.
4432   MaybeObject* result = AllocateJSObjectFromMap(
4433       constructor->initial_map(), pretenure);
4434 #ifdef DEBUG
4435   // Make sure result is NOT a global object if valid.
4436   Object* non_failure;
4437   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4438 #endif
4439   return result;
4440 }
4441
4442
4443 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4444     Handle<AllocationSite> allocation_site) {
4445   ASSERT(constructor->has_initial_map());
4446   // Allocate the object based on the constructors initial map, or the payload
4447   // advice
4448   Map* initial_map = constructor->initial_map();
4449
4450   ElementsKind to_kind = allocation_site->GetElementsKind();
4451   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4452   if (to_kind != initial_map->elements_kind()) {
4453     MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4454     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4455     // Possibly alter the mode, since we found an updated elements kind
4456     // in the type info cell.
4457     mode = AllocationSite::GetMode(to_kind);
4458   }
4459
4460   MaybeObject* result;
4461   if (mode == TRACK_ALLOCATION_SITE) {
4462     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4463         allocation_site);
4464   } else {
4465     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4466   }
4467 #ifdef DEBUG
4468   // Make sure result is NOT a global object if valid.
4469   Object* non_failure;
4470   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4471 #endif
4472   return result;
4473 }
4474
4475
4476 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4477   // Allocate a fresh map. Modules do not have a prototype.
4478   Map* map;
4479   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4480   if (!maybe_map->To(&map)) return maybe_map;
4481   // Allocate the object based on the map.
4482   JSModule* module;
4483   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4484   if (!maybe_module->To(&module)) return maybe_module;
4485   module->set_context(context);
4486   module->set_scope_info(scope_info);
4487   return module;
4488 }
4489
4490
4491 MaybeObject* Heap::AllocateJSArrayAndStorage(
4492     ElementsKind elements_kind,
4493     int length,
4494     int capacity,
4495     ArrayStorageAllocationMode mode,
4496     PretenureFlag pretenure) {
4497   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4498   JSArray* array;
4499   if (!maybe_array->To(&array)) return maybe_array;
4500
4501   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4502   // for performance reasons.
4503   ASSERT(capacity >= length);
4504
4505   if (capacity == 0) {
4506     array->set_length(Smi::FromInt(0));
4507     array->set_elements(empty_fixed_array());
4508     return array;
4509   }
4510
4511   FixedArrayBase* elms;
4512   MaybeObject* maybe_elms = NULL;
4513   if (IsFastDoubleElementsKind(elements_kind)) {
4514     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4515       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4516     } else {
4517       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4518       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4519     }
4520   } else {
4521     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4522     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4523       maybe_elms = AllocateUninitializedFixedArray(capacity);
4524     } else {
4525       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4526       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4527     }
4528   }
4529   if (!maybe_elms->To(&elms)) return maybe_elms;
4530
4531   array->set_elements(elms);
4532   array->set_length(Smi::FromInt(length));
4533   return array;
4534 }
4535
4536
4537 MaybeObject* Heap::AllocateJSArrayStorage(
4538     JSArray* array,
4539     int length,
4540     int capacity,
4541     ArrayStorageAllocationMode mode) {
4542   ASSERT(capacity >= length);
4543
4544   if (capacity == 0) {
4545     array->set_length(Smi::FromInt(0));
4546     array->set_elements(empty_fixed_array());
4547     return array;
4548   }
4549
4550   FixedArrayBase* elms;
4551   MaybeObject* maybe_elms = NULL;
4552   ElementsKind elements_kind = array->GetElementsKind();
4553   if (IsFastDoubleElementsKind(elements_kind)) {
4554     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4555       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4556     } else {
4557       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4558       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4559     }
4560   } else {
4561     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4562     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4563       maybe_elms = AllocateUninitializedFixedArray(capacity);
4564     } else {
4565       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4566       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4567     }
4568   }
4569   if (!maybe_elms->To(&elms)) return maybe_elms;
4570
4571   array->set_elements(elms);
4572   array->set_length(Smi::FromInt(length));
4573   return array;
4574 }
4575
4576
4577 MaybeObject* Heap::AllocateJSArrayWithElements(
4578     FixedArrayBase* elements,
4579     ElementsKind elements_kind,
4580     int length,
4581     PretenureFlag pretenure) {
4582   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4583   JSArray* array;
4584   if (!maybe_array->To(&array)) return maybe_array;
4585
4586   array->set_elements(elements);
4587   array->set_length(Smi::FromInt(length));
4588   array->ValidateElements();
4589   return array;
4590 }
4591
4592
4593 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4594   // Allocate map.
4595   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4596   // maps. Will probably depend on the identity of the handler object, too.
4597   Map* map;
4598   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4599   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4600   map->set_prototype(prototype);
4601
4602   // Allocate the proxy object.
4603   JSProxy* result;
4604   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4605   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4606   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4607   result->set_handler(handler);
4608   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4609   return result;
4610 }
4611
4612
4613 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4614                                            Object* call_trap,
4615                                            Object* construct_trap,
4616                                            Object* prototype) {
4617   // Allocate map.
4618   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4619   // maps. Will probably depend on the identity of the handler object, too.
4620   Map* map;
4621   MaybeObject* maybe_map_obj =
4622       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4623   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4624   map->set_prototype(prototype);
4625
4626   // Allocate the proxy object.
4627   JSFunctionProxy* result;
4628   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4629   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4630   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4631   result->set_handler(handler);
4632   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4633   result->set_call_trap(call_trap);
4634   result->set_construct_trap(construct_trap);
4635   return result;
4636 }
4637
4638
4639 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4640   // Never used to copy functions.  If functions need to be copied we
4641   // have to be careful to clear the literals array.
4642   SLOW_ASSERT(!source->IsJSFunction());
4643
4644   // Make the clone.
4645   Map* map = source->map();
4646   int object_size = map->instance_size();
4647   Object* clone;
4648
4649   ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
4650
4651   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4652
4653   // If we're forced to always allocate, we use the general allocation
4654   // functions which may leave us with an object in old space.
4655   if (always_allocate()) {
4656     { MaybeObject* maybe_clone =
4657           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4658       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4659     }
4660     Address clone_address = HeapObject::cast(clone)->address();
4661     CopyBlock(clone_address,
4662               source->address(),
4663               object_size);
4664     // Update write barrier for all fields that lie beyond the header.
4665     RecordWrites(clone_address,
4666                  JSObject::kHeaderSize,
4667                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4668   } else {
4669     wb_mode = SKIP_WRITE_BARRIER;
4670
4671     { int adjusted_object_size = site != NULL
4672           ? object_size + AllocationMemento::kSize
4673           : object_size;
4674       MaybeObject* maybe_clone =
4675           AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
4676       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4677     }
4678     SLOW_ASSERT(InNewSpace(clone));
4679     // Since we know the clone is allocated in new space, we can copy
4680     // the contents without worrying about updating the write barrier.
4681     CopyBlock(HeapObject::cast(clone)->address(),
4682               source->address(),
4683               object_size);
4684
4685     if (site != NULL) {
4686       AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4687           reinterpret_cast<Address>(clone) + object_size);
4688       InitializeAllocationMemento(alloc_memento, site);
4689     }
4690   }
4691
4692   SLOW_ASSERT(
4693       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4694   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4695   FixedArray* properties = FixedArray::cast(source->properties());
4696   // Update elements if necessary.
4697   if (elements->length() > 0) {
4698     Object* elem;
4699     { MaybeObject* maybe_elem;
4700       if (elements->map() == fixed_cow_array_map()) {
4701         maybe_elem = FixedArray::cast(elements);
4702       } else if (source->HasFastDoubleElements()) {
4703         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4704       } else {
4705         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4706       }
4707       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4708     }
4709     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4710   }
4711   // Update properties if necessary.
4712   if (properties->length() > 0) {
4713     Object* prop;
4714     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4715       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4716     }
4717     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4718   }
4719   // Return the new clone.
4720   return clone;
4721 }
4722
4723
4724 MaybeObject* Heap::ReinitializeJSReceiver(
4725     JSReceiver* object, InstanceType type, int size) {
4726   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4727
4728   // Allocate fresh map.
4729   // TODO(rossberg): Once we optimize proxies, cache these maps.
4730   Map* map;
4731   MaybeObject* maybe = AllocateMap(type, size);
4732   if (!maybe->To<Map>(&map)) return maybe;
4733
4734   // Check that the receiver has at least the size of the fresh object.
4735   int size_difference = object->map()->instance_size() - map->instance_size();
4736   ASSERT(size_difference >= 0);
4737
4738   map->set_prototype(object->map()->prototype());
4739
4740   // Allocate the backing storage for the properties.
4741   int prop_size = map->unused_property_fields() - map->inobject_properties();
4742   Object* properties;
4743   maybe = AllocateFixedArray(prop_size, TENURED);
4744   if (!maybe->ToObject(&properties)) return maybe;
4745
4746   // Functions require some allocation, which might fail here.
4747   SharedFunctionInfo* shared = NULL;
4748   if (type == JS_FUNCTION_TYPE) {
4749     String* name;
4750     OneByteStringKey key(STATIC_ASCII_VECTOR("<freezing call trap>"),
4751                          HashSeed());
4752     maybe = InternalizeStringWithKey(&key);
4753     if (!maybe->To<String>(&name)) return maybe;
4754     maybe = AllocateSharedFunctionInfo(name);
4755     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4756   }
4757
4758   // Because of possible retries of this function after failure,
4759   // we must NOT fail after this point, where we have changed the type!
4760
4761   // Reset the map for the object.
4762   object->set_map(map);
4763   JSObject* jsobj = JSObject::cast(object);
4764
4765   // Reinitialize the object from the constructor map.
4766   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4767
4768   // Functions require some minimal initialization.
4769   if (type == JS_FUNCTION_TYPE) {
4770     map->set_function_with_prototype(true);
4771     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4772     JSFunction::cast(object)->set_context(
4773         isolate()->context()->native_context());
4774   }
4775
4776   // Put in filler if the new object is smaller than the old.
4777   if (size_difference > 0) {
4778     CreateFillerObjectAt(
4779         object->address() + map->instance_size(), size_difference);
4780   }
4781
4782   return object;
4783 }
4784
4785
4786 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4787                                              JSGlobalProxy* object) {
4788   ASSERT(constructor->has_initial_map());
4789   Map* map = constructor->initial_map();
4790
4791   // Check that the already allocated object has the same size and type as
4792   // objects allocated using the constructor.
4793   ASSERT(map->instance_size() == object->map()->instance_size());
4794   ASSERT(map->instance_type() == object->map()->instance_type());
4795
4796   // Allocate the backing storage for the properties.
4797   int prop_size = map->unused_property_fields() - map->inobject_properties();
4798   Object* properties;
4799   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4800     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4801   }
4802
4803   // Reset the map for the object.
4804   object->set_map(constructor->initial_map());
4805
4806   // Reinitialize the object from the constructor map.
4807   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4808   return object;
4809 }
4810
4811
4812 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
4813                                              PretenureFlag pretenure) {
4814   int length = string.length();
4815   if (length == 1) {
4816     return Heap::LookupSingleCharacterStringFromCode(string[0]);
4817   }
4818   Object* result;
4819   { MaybeObject* maybe_result =
4820         AllocateRawOneByteString(string.length(), pretenure);
4821     if (!maybe_result->ToObject(&result)) return maybe_result;
4822   }
4823
4824   // Copy the characters into the new object.
4825   CopyChars(SeqOneByteString::cast(result)->GetChars(),
4826             string.start(),
4827             length);
4828   return result;
4829 }
4830
4831
4832 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4833                                               int non_ascii_start,
4834                                               PretenureFlag pretenure) {
4835   // Continue counting the number of characters in the UTF-8 string, starting
4836   // from the first non-ascii character or word.
4837   Access<UnicodeCache::Utf8Decoder>
4838       decoder(isolate_->unicode_cache()->utf8_decoder());
4839   decoder->Reset(string.start() + non_ascii_start,
4840                  string.length() - non_ascii_start);
4841   int utf16_length = decoder->Utf16Length();
4842   ASSERT(utf16_length > 0);
4843   // Allocate string.
4844   Object* result;
4845   {
4846     int chars = non_ascii_start + utf16_length;
4847     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4848     if (!maybe_result->ToObject(&result)) return maybe_result;
4849   }
4850   // Convert and copy the characters into the new object.
4851   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4852   // Copy ascii portion.
4853   uint16_t* data = twobyte->GetChars();
4854   if (non_ascii_start != 0) {
4855     const char* ascii_data = string.start();
4856     for (int i = 0; i < non_ascii_start; i++) {
4857       *data++ = *ascii_data++;
4858     }
4859   }
4860   // Now write the remainder.
4861   decoder->WriteUtf16(data, utf16_length);
4862   return result;
4863 }
4864
4865
4866 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4867                                              PretenureFlag pretenure) {
4868   // Check if the string is an ASCII string.
4869   Object* result;
4870   int length = string.length();
4871   const uc16* start = string.start();
4872
4873   if (String::IsOneByte(start, length)) {
4874     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
4875     if (!maybe_result->ToObject(&result)) return maybe_result;
4876     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
4877   } else {  // It's not a one byte string.
4878     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4879     if (!maybe_result->ToObject(&result)) return maybe_result;
4880     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4881   }
4882   return result;
4883 }
4884
4885
4886 Map* Heap::InternalizedStringMapForString(String* string) {
4887   // If the string is in new space it cannot be used as internalized.
4888   if (InNewSpace(string)) return NULL;
4889
4890   // Find the corresponding internalized string map for strings.
4891   switch (string->map()->instance_type()) {
4892     case STRING_TYPE: return internalized_string_map();
4893     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
4894     case CONS_STRING_TYPE: return cons_internalized_string_map();
4895     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
4896     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
4897     case EXTERNAL_ASCII_STRING_TYPE:
4898       return external_ascii_internalized_string_map();
4899     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4900       return external_internalized_string_with_one_byte_data_map();
4901     case SHORT_EXTERNAL_STRING_TYPE:
4902       return short_external_internalized_string_map();
4903     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4904       return short_external_ascii_internalized_string_map();
4905     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4906       return short_external_internalized_string_with_one_byte_data_map();
4907     default: return NULL;  // No match found.
4908   }
4909 }
4910
4911
4912 static inline void WriteOneByteData(Vector<const char> vector,
4913                                     uint8_t* chars,
4914                                     int len) {
4915   // Only works for ascii.
4916   ASSERT(vector.length() == len);
4917   OS::MemCopy(chars, vector.start(), len);
4918 }
4919
4920 static inline void WriteTwoByteData(Vector<const char> vector,
4921                                     uint16_t* chars,
4922                                     int len) {
4923   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
4924   unsigned stream_length = vector.length();
4925   while (stream_length != 0) {
4926     unsigned consumed = 0;
4927     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
4928     ASSERT(c != unibrow::Utf8::kBadChar);
4929     ASSERT(consumed <= stream_length);
4930     stream_length -= consumed;
4931     stream += consumed;
4932     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4933       len -= 2;
4934       if (len < 0) break;
4935       *chars++ = unibrow::Utf16::LeadSurrogate(c);
4936       *chars++ = unibrow::Utf16::TrailSurrogate(c);
4937     } else {
4938       len -= 1;
4939       if (len < 0) break;
4940       *chars++ = c;
4941     }
4942   }
4943   ASSERT(stream_length == 0);
4944   ASSERT(len == 0);
4945 }
4946
4947
4948 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
4949   ASSERT(s->length() == len);
4950   String::WriteToFlat(s, chars, 0, len);
4951 }
4952
4953
4954 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
4955   ASSERT(s->length() == len);
4956   String::WriteToFlat(s, chars, 0, len);
4957 }
4958
4959
4960 template<bool is_one_byte, typename T>
4961 MaybeObject* Heap::AllocateInternalizedStringImpl(
4962     T t, int chars, uint32_t hash_field) {
4963   ASSERT(chars >= 0);
4964   // Compute map and object size.
4965   int size;
4966   Map* map;
4967
4968   if (is_one_byte) {
4969     if (chars > SeqOneByteString::kMaxLength) {
4970       return Failure::OutOfMemoryException(0x9);
4971     }
4972     map = ascii_internalized_string_map();
4973     size = SeqOneByteString::SizeFor(chars);
4974   } else {
4975     if (chars > SeqTwoByteString::kMaxLength) {
4976       return Failure::OutOfMemoryException(0xa);
4977     }
4978     map = internalized_string_map();
4979     size = SeqTwoByteString::SizeFor(chars);
4980   }
4981   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
4982
4983   // Allocate string.
4984   Object* result;
4985   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4986     if (!maybe_result->ToObject(&result)) return maybe_result;
4987   }
4988
4989   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4990   // Set length and hash fields of the allocated string.
4991   String* answer = String::cast(result);
4992   answer->set_length(chars);
4993   answer->set_hash_field(hash_field);
4994
4995   ASSERT_EQ(size, answer->Size());
4996
4997   if (is_one_byte) {
4998     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
4999   } else {
5000     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5001   }
5002   return answer;
5003 }
5004
5005
5006 // Need explicit instantiations.
5007 template
5008 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5009 template
5010 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5011     String*, int, uint32_t);
5012 template
5013 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5014     Vector<const char>, int, uint32_t);
5015
5016
5017 MaybeObject* Heap::AllocateRawOneByteString(int length,
5018                                             PretenureFlag pretenure) {
5019   if (length < 0 || length > SeqOneByteString::kMaxLength) {
5020     return Failure::OutOfMemoryException(0xb);
5021   }
5022   int size = SeqOneByteString::SizeFor(length);
5023   ASSERT(size <= SeqOneByteString::kMaxSize);
5024   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5025
5026   Object* result;
5027   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5028     if (!maybe_result->ToObject(&result)) return maybe_result;
5029   }
5030
5031   // Partially initialize the object.
5032   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5033   String::cast(result)->set_length(length);
5034   String::cast(result)->set_hash_field(String::kEmptyHashField);
5035   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5036
5037   return result;
5038 }
5039
5040
5041 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5042                                             PretenureFlag pretenure) {
5043   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5044     return Failure::OutOfMemoryException(0xc);
5045   }
5046   int size = SeqTwoByteString::SizeFor(length);
5047   ASSERT(size <= SeqTwoByteString::kMaxSize);
5048   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5049
5050   Object* result;
5051   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5052     if (!maybe_result->ToObject(&result)) return maybe_result;
5053   }
5054
5055   // Partially initialize the object.
5056   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5057   String::cast(result)->set_length(length);
5058   String::cast(result)->set_hash_field(String::kEmptyHashField);
5059   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5060   return result;
5061 }
5062
5063
5064 MaybeObject* Heap::AllocateJSArray(
5065     ElementsKind elements_kind,
5066     PretenureFlag pretenure) {
5067   Context* native_context = isolate()->context()->native_context();
5068   JSFunction* array_function = native_context->array_function();
5069   Map* map = array_function->initial_map();
5070   Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5071   if (transition_map != NULL) map = transition_map;
5072   return AllocateJSObjectFromMap(map, pretenure);
5073 }
5074
5075
5076 MaybeObject* Heap::AllocateEmptyFixedArray() {
5077   int size = FixedArray::SizeFor(0);
5078   Object* result;
5079   { MaybeObject* maybe_result =
5080         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5081     if (!maybe_result->ToObject(&result)) return maybe_result;
5082   }
5083   // Initialize the object.
5084   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5085       fixed_array_map());
5086   reinterpret_cast<FixedArray*>(result)->set_length(0);
5087   return result;
5088 }
5089
5090
5091 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5092   return AllocateExternalArray(0, array_type, NULL, TENURED);
5093 }
5094
5095
5096 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5097   int len = src->length();
5098   Object* obj;
5099   { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
5100     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5101   }
5102   if (InNewSpace(obj)) {
5103     HeapObject* dst = HeapObject::cast(obj);
5104     dst->set_map_no_write_barrier(map);
5105     CopyBlock(dst->address() + kPointerSize,
5106               src->address() + kPointerSize,
5107               FixedArray::SizeFor(len) - kPointerSize);
5108     return obj;
5109   }
5110   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5111   FixedArray* result = FixedArray::cast(obj);
5112   result->set_length(len);
5113
5114   // Copy the content
5115   DisallowHeapAllocation no_gc;
5116   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5117   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5118   return result;
5119 }
5120
5121
5122 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5123                                                Map* map) {
5124   int len = src->length();
5125   Object* obj;
5126   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5127     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5128   }
5129   HeapObject* dst = HeapObject::cast(obj);
5130   dst->set_map_no_write_barrier(map);
5131   CopyBlock(
5132       dst->address() + FixedDoubleArray::kLengthOffset,
5133       src->address() + FixedDoubleArray::kLengthOffset,
5134       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5135   return obj;
5136 }
5137
5138
5139 MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
5140                                                 Map* map) {
5141   int int64_entries = src->count_of_int64_entries();
5142   int ptr_entries = src->count_of_ptr_entries();
5143   int int32_entries = src->count_of_int32_entries();
5144   Object* obj;
5145   { MaybeObject* maybe_obj =
5146         AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
5147     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5148   }
5149   HeapObject* dst = HeapObject::cast(obj);
5150   dst->set_map_no_write_barrier(map);
5151   CopyBlock(
5152       dst->address() + ConstantPoolArray::kLengthOffset,
5153       src->address() + ConstantPoolArray::kLengthOffset,
5154       ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
5155           - ConstantPoolArray::kLengthOffset);
5156   return obj;
5157 }
5158
5159
5160 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5161   if (length < 0 || length > FixedArray::kMaxLength) {
5162     return Failure::OutOfMemoryException(0xe);
5163   }
5164   int size = FixedArray::SizeFor(length);
5165   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
5166
5167   return AllocateRaw(size, space, OLD_POINTER_SPACE);
5168 }
5169
5170
5171 MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
5172                                                 PretenureFlag pretenure,
5173                                                 Object* filler) {
5174   ASSERT(length >= 0);
5175   ASSERT(empty_fixed_array()->IsFixedArray());
5176   if (length == 0) return empty_fixed_array();
5177
5178   ASSERT(!InNewSpace(filler));
5179   Object* result;
5180   { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
5181     if (!maybe_result->ToObject(&result)) return maybe_result;
5182   }
5183
5184   HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
5185   FixedArray* array = FixedArray::cast(result);
5186   array->set_length(length);
5187   MemsetPointer(array->data_start(), filler, length);
5188   return array;
5189 }
5190
5191
5192 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5193   return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
5194 }
5195
5196
5197 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5198                                                PretenureFlag pretenure) {
5199   return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
5200 }
5201
5202
5203 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5204   if (length == 0) return empty_fixed_array();
5205
5206   Object* obj;
5207   { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
5208     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5209   }
5210
5211   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5212       fixed_array_map());
5213   FixedArray::cast(obj)->set_length(length);
5214   return obj;
5215 }
5216
5217
5218 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5219   int size = FixedDoubleArray::SizeFor(0);
5220   Object* result;
5221   { MaybeObject* maybe_result =
5222         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5223     if (!maybe_result->ToObject(&result)) return maybe_result;
5224   }
5225   // Initialize the object.
5226   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5227       fixed_double_array_map());
5228   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5229   return result;
5230 }
5231
5232
5233 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5234     int length,
5235     PretenureFlag pretenure) {
5236   if (length == 0) return empty_fixed_array();
5237
5238   Object* elements_object;
5239   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5240   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5241   FixedDoubleArray* elements =
5242       reinterpret_cast<FixedDoubleArray*>(elements_object);
5243
5244   elements->set_map_no_write_barrier(fixed_double_array_map());
5245   elements->set_length(length);
5246   return elements;
5247 }
5248
5249
5250 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5251     int length,
5252     PretenureFlag pretenure) {
5253   if (length == 0) return empty_fixed_array();
5254
5255   Object* elements_object;
5256   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5257   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5258   FixedDoubleArray* elements =
5259       reinterpret_cast<FixedDoubleArray*>(elements_object);
5260
5261   for (int i = 0; i < length; ++i) {
5262     elements->set_the_hole(i);
5263   }
5264
5265   elements->set_map_no_write_barrier(fixed_double_array_map());
5266   elements->set_length(length);
5267   return elements;
5268 }
5269
5270
5271 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5272                                                PretenureFlag pretenure) {
5273   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5274     return Failure::OutOfMemoryException(0xf);
5275   }
5276   int size = FixedDoubleArray::SizeFor(length);
5277 #ifndef V8_HOST_ARCH_64_BIT
5278   size += kPointerSize;
5279 #endif
5280   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5281
5282   HeapObject* object;
5283   { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
5284     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5285   }
5286
5287   return EnsureDoubleAligned(this, object, size);
5288 }
5289
5290
5291 MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
5292                                              int number_of_ptr_entries,
5293                                              int number_of_int32_entries) {
5294   ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
5295          number_of_int32_entries > 0);
5296   int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
5297                                         number_of_ptr_entries,
5298                                         number_of_int32_entries);
5299 #ifndef V8_HOST_ARCH_64_BIT
5300   size += kPointerSize;
5301 #endif
5302   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5303
5304   HeapObject* object;
5305   { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
5306     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5307   }
5308   object = EnsureDoubleAligned(this, object, size);
5309   HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
5310
5311   ConstantPoolArray* constant_pool =
5312       reinterpret_cast<ConstantPoolArray*>(object);
5313   constant_pool->SetEntryCounts(number_of_int64_entries,
5314                                 number_of_ptr_entries,
5315                                 number_of_int32_entries);
5316   if (number_of_ptr_entries > 0) {
5317     MemsetPointer(
5318         HeapObject::RawField(
5319             constant_pool,
5320             constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
5321         undefined_value(),
5322         number_of_ptr_entries);
5323   }
5324   return constant_pool;
5325 }
5326
5327
5328 MaybeObject* Heap::AllocateEmptyConstantPoolArray() {
5329   int size = ConstantPoolArray::SizeFor(0, 0, 0);
5330   Object* result;
5331   { MaybeObject* maybe_result =
5332         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5333     if (!maybe_result->ToObject(&result)) return maybe_result;
5334   }
5335   HeapObject::cast(result)->set_map_no_write_barrier(constant_pool_array_map());
5336   ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0);
5337   return result;
5338 }
5339
5340
5341 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5342   Object* result;
5343   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5344     if (!maybe_result->ToObject(&result)) return maybe_result;
5345   }
5346   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5347       hash_table_map());
5348   ASSERT(result->IsHashTable());
5349   return result;
5350 }
5351
5352
5353 MaybeObject* Heap::AllocateSymbol() {
5354   // Statically ensure that it is safe to allocate symbols in paged spaces.
5355   STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
5356
5357   Object* result;
5358   MaybeObject* maybe =
5359       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5360   if (!maybe->ToObject(&result)) return maybe;
5361
5362   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5363
5364   // Generate a random hash value.
5365   int hash;
5366   int attempts = 0;
5367   do {
5368     hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
5369     attempts++;
5370   } while (hash == 0 && attempts < 30);
5371   if (hash == 0) hash = 1;  // never return 0
5372
5373   Symbol::cast(result)->set_hash_field(
5374       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5375   Symbol::cast(result)->set_name(undefined_value());
5376   Symbol::cast(result)->set_flags(Smi::FromInt(0));
5377
5378   ASSERT(!Symbol::cast(result)->is_private());
5379   return result;
5380 }
5381
5382
5383 MaybeObject* Heap::AllocatePrivateSymbol() {
5384   MaybeObject* maybe = AllocateSymbol();
5385   Symbol* symbol;
5386   if (!maybe->To(&symbol)) return maybe;
5387   symbol->set_is_private(true);
5388   return symbol;
5389 }
5390
5391
5392 MaybeObject* Heap::AllocateNativeContext() {
5393   Object* result;
5394   { MaybeObject* maybe_result =
5395         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5396     if (!maybe_result->ToObject(&result)) return maybe_result;
5397   }
5398   Context* context = reinterpret_cast<Context*>(result);
5399   context->set_map_no_write_barrier(native_context_map());
5400   context->set_js_array_maps(undefined_value());
5401   ASSERT(context->IsNativeContext());
5402   ASSERT(result->IsContext());
5403   return result;
5404 }
5405
5406
5407 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5408                                          ScopeInfo* scope_info) {
5409   Object* result;
5410   { MaybeObject* maybe_result =
5411         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5412     if (!maybe_result->ToObject(&result)) return maybe_result;
5413   }
5414   Context* context = reinterpret_cast<Context*>(result);
5415   context->set_map_no_write_barrier(global_context_map());
5416   context->set_closure(function);
5417   context->set_previous(function->context());
5418   context->set_extension(scope_info);
5419   context->set_global_object(function->context()->global_object());
5420   ASSERT(context->IsGlobalContext());
5421   ASSERT(result->IsContext());
5422   return context;
5423 }
5424
5425
5426 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5427   Object* result;
5428   { MaybeObject* maybe_result =
5429         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5430     if (!maybe_result->ToObject(&result)) return maybe_result;
5431   }
5432   Context* context = reinterpret_cast<Context*>(result);
5433   context->set_map_no_write_barrier(module_context_map());
5434   // Instance link will be set later.
5435   context->set_extension(Smi::FromInt(0));
5436   return context;
5437 }
5438
5439
5440 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5441   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5442   Object* result;
5443   { MaybeObject* maybe_result = AllocateFixedArray(length);
5444     if (!maybe_result->ToObject(&result)) return maybe_result;
5445   }
5446   Context* context = reinterpret_cast<Context*>(result);
5447   context->set_map_no_write_barrier(function_context_map());
5448   context->set_closure(function);
5449   context->set_previous(function->context());
5450   context->set_extension(Smi::FromInt(0));
5451   context->set_global_object(function->context()->global_object());
5452   return context;
5453 }
5454
5455
5456 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5457                                         Context* previous,
5458                                         String* name,
5459                                         Object* thrown_object) {
5460   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5461   Object* result;
5462   { MaybeObject* maybe_result =
5463         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5464     if (!maybe_result->ToObject(&result)) return maybe_result;
5465   }
5466   Context* context = reinterpret_cast<Context*>(result);
5467   context->set_map_no_write_barrier(catch_context_map());
5468   context->set_closure(function);
5469   context->set_previous(previous);
5470   context->set_extension(name);
5471   context->set_global_object(previous->global_object());
5472   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5473   return context;
5474 }
5475
5476
5477 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5478                                        Context* previous,
5479                                        JSReceiver* extension) {
5480   Object* result;
5481   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5482     if (!maybe_result->ToObject(&result)) return maybe_result;
5483   }
5484   Context* context = reinterpret_cast<Context*>(result);
5485   context->set_map_no_write_barrier(with_context_map());
5486   context->set_closure(function);
5487   context->set_previous(previous);
5488   context->set_extension(extension);
5489   context->set_global_object(previous->global_object());
5490   return context;
5491 }
5492
5493
5494 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5495                                         Context* previous,
5496                                         ScopeInfo* scope_info) {
5497   Object* result;
5498   { MaybeObject* maybe_result =
5499         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5500     if (!maybe_result->ToObject(&result)) return maybe_result;
5501   }
5502   Context* context = reinterpret_cast<Context*>(result);
5503   context->set_map_no_write_barrier(block_context_map());
5504   context->set_closure(function);
5505   context->set_previous(previous);
5506   context->set_extension(scope_info);
5507   context->set_global_object(previous->global_object());
5508   return context;
5509 }
5510
5511
5512 MaybeObject* Heap::AllocateScopeInfo(int length) {
5513   FixedArray* scope_info;
5514   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5515   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5516   scope_info->set_map_no_write_barrier(scope_info_map());
5517   return scope_info;
5518 }
5519
5520
5521 MaybeObject* Heap::AllocateExternal(void* value) {
5522   Foreign* foreign;
5523   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5524     if (!maybe_result->To(&foreign)) return maybe_result;
5525   }
5526   JSObject* external;
5527   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5528     if (!maybe_result->To(&external)) return maybe_result;
5529   }
5530   external->SetInternalField(0, foreign);
5531   return external;
5532 }
5533
5534
5535 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5536   Map* map;
5537   switch (type) {
5538 #define MAKE_CASE(NAME, Name, name) \
5539     case NAME##_TYPE: map = name##_map(); break;
5540 STRUCT_LIST(MAKE_CASE)
5541 #undef MAKE_CASE
5542     default:
5543       UNREACHABLE();
5544       return Failure::InternalError();
5545   }
5546   int size = map->instance_size();
5547   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5548   Object* result;
5549   { MaybeObject* maybe_result = Allocate(map, space);
5550     if (!maybe_result->ToObject(&result)) return maybe_result;
5551   }
5552   Struct::cast(result)->InitializeBody(size);
5553   return result;
5554 }
5555
5556
5557 bool Heap::IsHeapIterable() {
5558   return (!old_pointer_space()->was_swept_conservatively() &&
5559           !old_data_space()->was_swept_conservatively());
5560 }
5561
5562
5563 void Heap::EnsureHeapIsIterable() {
5564   ASSERT(AllowHeapAllocation::IsAllowed());
5565   if (!IsHeapIterable()) {
5566     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5567   }
5568   ASSERT(IsHeapIterable());
5569 }
5570
5571
5572 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5573   incremental_marking()->Step(step_size,
5574                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5575
5576   if (incremental_marking()->IsComplete()) {
5577     bool uncommit = false;
5578     if (gc_count_at_last_idle_gc_ == gc_count_) {
5579       // No GC since the last full GC, the mutator is probably not active.
5580       isolate_->compilation_cache()->Clear();
5581       uncommit = true;
5582     }
5583     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5584     mark_sweeps_since_idle_round_started_++;
5585     gc_count_at_last_idle_gc_ = gc_count_;
5586     if (uncommit) {
5587       new_space_.Shrink();
5588       UncommitFromSpace();
5589     }
5590   }
5591 }
5592
5593
5594 bool Heap::IdleNotification(int hint) {
5595   // Hints greater than this value indicate that
5596   // the embedder is requesting a lot of GC work.
5597   const int kMaxHint = 1000;
5598   const int kMinHintForIncrementalMarking = 10;
5599   // Minimal hint that allows to do full GC.
5600   const int kMinHintForFullGC = 100;
5601   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5602   // The size factor is in range [5..250]. The numbers here are chosen from
5603   // experiments. If you changes them, make sure to test with
5604   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5605   intptr_t step_size =
5606       size_factor * IncrementalMarking::kAllocatedThreshold;
5607
5608   if (contexts_disposed_ > 0) {
5609     contexts_disposed_ = 0;
5610     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5611     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5612         incremental_marking()->IsStopped()) {
5613       HistogramTimerScope scope(isolate_->counters()->gc_context());
5614       CollectAllGarbage(kReduceMemoryFootprintMask,
5615                         "idle notification: contexts disposed");
5616     } else {
5617       AdvanceIdleIncrementalMarking(step_size);
5618     }
5619
5620     // After context disposal there is likely a lot of garbage remaining, reset
5621     // the idle notification counters in order to trigger more incremental GCs
5622     // on subsequent idle notifications.
5623     StartIdleRound();
5624     return false;
5625   }
5626
5627   if (!FLAG_incremental_marking || Serializer::enabled()) {
5628     return IdleGlobalGC();
5629   }
5630
5631   // By doing small chunks of GC work in each IdleNotification,
5632   // perform a round of incremental GCs and after that wait until
5633   // the mutator creates enough garbage to justify a new round.
5634   // An incremental GC progresses as follows:
5635   // 1. many incremental marking steps,
5636   // 2. one old space mark-sweep-compact,
5637   // 3. many lazy sweep steps.
5638   // Use mark-sweep-compact events to count incremental GCs in a round.
5639
5640   if (incremental_marking()->IsStopped()) {
5641     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5642         !IsSweepingComplete() &&
5643         !AdvanceSweepers(static_cast<int>(step_size))) {
5644       return false;
5645     }
5646   }
5647
5648   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5649     if (EnoughGarbageSinceLastIdleRound()) {
5650       StartIdleRound();
5651     } else {
5652       return true;
5653     }
5654   }
5655
5656   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5657                               mark_sweeps_since_idle_round_started_;
5658
5659   if (incremental_marking()->IsStopped()) {
5660     // If there are no more than two GCs left in this idle round and we are
5661     // allowed to do a full GC, then make those GCs full in order to compact
5662     // the code space.
5663     // TODO(ulan): Once we enable code compaction for incremental marking,
5664     // we can get rid of this special case and always start incremental marking.
5665     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5666       CollectAllGarbage(kReduceMemoryFootprintMask,
5667                         "idle notification: finalize idle round");
5668       mark_sweeps_since_idle_round_started_++;
5669     } else if (hint > kMinHintForIncrementalMarking) {
5670       incremental_marking()->Start();
5671     }
5672   }
5673   if (!incremental_marking()->IsStopped() &&
5674       hint > kMinHintForIncrementalMarking) {
5675     AdvanceIdleIncrementalMarking(step_size);
5676   }
5677
5678   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5679     FinishIdleRound();
5680     return true;
5681   }
5682
5683   return false;
5684 }
5685
5686
5687 bool Heap::IdleGlobalGC() {
5688   static const int kIdlesBeforeScavenge = 4;
5689   static const int kIdlesBeforeMarkSweep = 7;
5690   static const int kIdlesBeforeMarkCompact = 8;
5691   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5692   static const unsigned int kGCsBetweenCleanup = 4;
5693
5694   if (!last_idle_notification_gc_count_init_) {
5695     last_idle_notification_gc_count_ = gc_count_;
5696     last_idle_notification_gc_count_init_ = true;
5697   }
5698
5699   bool uncommit = true;
5700   bool finished = false;
5701
5702   // Reset the number of idle notifications received when a number of
5703   // GCs have taken place. This allows another round of cleanup based
5704   // on idle notifications if enough work has been carried out to
5705   // provoke a number of garbage collections.
5706   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5707     number_idle_notifications_ =
5708         Min(number_idle_notifications_ + 1, kMaxIdleCount);
5709   } else {
5710     number_idle_notifications_ = 0;
5711     last_idle_notification_gc_count_ = gc_count_;
5712   }
5713
5714   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5715     CollectGarbage(NEW_SPACE, "idle notification");
5716     new_space_.Shrink();
5717     last_idle_notification_gc_count_ = gc_count_;
5718   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5719     // Before doing the mark-sweep collections we clear the
5720     // compilation cache to avoid hanging on to source code and
5721     // generated code for cached functions.
5722     isolate_->compilation_cache()->Clear();
5723
5724     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5725     new_space_.Shrink();
5726     last_idle_notification_gc_count_ = gc_count_;
5727
5728   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5729     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5730     new_space_.Shrink();
5731     last_idle_notification_gc_count_ = gc_count_;
5732     number_idle_notifications_ = 0;
5733     finished = true;
5734   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5735     // If we have received more than kIdlesBeforeMarkCompact idle
5736     // notifications we do not perform any cleanup because we don't
5737     // expect to gain much by doing so.
5738     finished = true;
5739   }
5740
5741   if (uncommit) UncommitFromSpace();
5742
5743   return finished;
5744 }
5745
5746
5747 #ifdef DEBUG
5748
5749 void Heap::Print() {
5750   if (!HasBeenSetUp()) return;
5751   isolate()->PrintStack(stdout);
5752   AllSpaces spaces(this);
5753   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5754     space->Print();
5755   }
5756 }
5757
5758
5759 void Heap::ReportCodeStatistics(const char* title) {
5760   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5761   PagedSpace::ResetCodeStatistics(isolate());
5762   // We do not look for code in new space, map space, or old space.  If code
5763   // somehow ends up in those spaces, we would miss it here.
5764   code_space_->CollectCodeStatistics();
5765   lo_space_->CollectCodeStatistics();
5766   PagedSpace::ReportCodeStatistics(isolate());
5767 }
5768
5769
5770 // This function expects that NewSpace's allocated objects histogram is
5771 // populated (via a call to CollectStatistics or else as a side effect of a
5772 // just-completed scavenge collection).
5773 void Heap::ReportHeapStatistics(const char* title) {
5774   USE(title);
5775   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5776          title, gc_count_);
5777   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5778          old_generation_allocation_limit_);
5779
5780   PrintF("\n");
5781   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5782   isolate_->global_handles()->PrintStats();
5783   PrintF("\n");
5784
5785   PrintF("Heap statistics : ");
5786   isolate_->memory_allocator()->ReportStatistics();
5787   PrintF("To space : ");
5788   new_space_.ReportStatistics();
5789   PrintF("Old pointer space : ");
5790   old_pointer_space_->ReportStatistics();
5791   PrintF("Old data space : ");
5792   old_data_space_->ReportStatistics();
5793   PrintF("Code space : ");
5794   code_space_->ReportStatistics();
5795   PrintF("Map space : ");
5796   map_space_->ReportStatistics();
5797   PrintF("Cell space : ");
5798   cell_space_->ReportStatistics();
5799   PrintF("PropertyCell space : ");
5800   property_cell_space_->ReportStatistics();
5801   PrintF("Large object space : ");
5802   lo_space_->ReportStatistics();
5803   PrintF(">>>>>> ========================================= >>>>>>\n");
5804 }
5805
5806 #endif  // DEBUG
5807
5808 bool Heap::Contains(HeapObject* value) {
5809   return Contains(value->address());
5810 }
5811
5812
5813 bool Heap::Contains(Address addr) {
5814   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5815   return HasBeenSetUp() &&
5816     (new_space_.ToSpaceContains(addr) ||
5817      old_pointer_space_->Contains(addr) ||
5818      old_data_space_->Contains(addr) ||
5819      code_space_->Contains(addr) ||
5820      map_space_->Contains(addr) ||
5821      cell_space_->Contains(addr) ||
5822      property_cell_space_->Contains(addr) ||
5823      lo_space_->SlowContains(addr));
5824 }
5825
5826
5827 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5828   return InSpace(value->address(), space);
5829 }
5830
5831
5832 bool Heap::InSpace(Address addr, AllocationSpace space) {
5833   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5834   if (!HasBeenSetUp()) return false;
5835
5836   switch (space) {
5837     case NEW_SPACE:
5838       return new_space_.ToSpaceContains(addr);
5839     case OLD_POINTER_SPACE:
5840       return old_pointer_space_->Contains(addr);
5841     case OLD_DATA_SPACE:
5842       return old_data_space_->Contains(addr);
5843     case CODE_SPACE:
5844       return code_space_->Contains(addr);
5845     case MAP_SPACE:
5846       return map_space_->Contains(addr);
5847     case CELL_SPACE:
5848       return cell_space_->Contains(addr);
5849     case PROPERTY_CELL_SPACE:
5850       return property_cell_space_->Contains(addr);
5851     case LO_SPACE:
5852       return lo_space_->SlowContains(addr);
5853   }
5854
5855   return false;
5856 }
5857
5858
5859 #ifdef VERIFY_HEAP
5860 void Heap::Verify() {
5861   CHECK(HasBeenSetUp());
5862
5863   store_buffer()->Verify();
5864
5865   VerifyPointersVisitor visitor;
5866   IterateRoots(&visitor, VISIT_ONLY_STRONG);
5867
5868   new_space_.Verify();
5869
5870   old_pointer_space_->Verify(&visitor);
5871   map_space_->Verify(&visitor);
5872
5873   VerifyPointersVisitor no_dirty_regions_visitor;
5874   old_data_space_->Verify(&no_dirty_regions_visitor);
5875   code_space_->Verify(&no_dirty_regions_visitor);
5876   cell_space_->Verify(&no_dirty_regions_visitor);
5877   property_cell_space_->Verify(&no_dirty_regions_visitor);
5878
5879   lo_space_->Verify();
5880 }
5881 #endif
5882
5883
5884 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
5885   Utf8StringKey key(string, HashSeed());
5886   return InternalizeStringWithKey(&key);
5887 }
5888
5889
5890 MaybeObject* Heap::InternalizeString(String* string) {
5891   if (string->IsInternalizedString()) return string;
5892   Object* result = NULL;
5893   Object* new_table;
5894   { MaybeObject* maybe_new_table =
5895         string_table()->LookupString(string, &result);
5896     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5897   }
5898   // Can't use set_string_table because StringTable::cast knows that
5899   // StringTable is a singleton and checks for identity.
5900   roots_[kStringTableRootIndex] = new_table;
5901   ASSERT(result != NULL);
5902   return result;
5903 }
5904
5905
5906 bool Heap::InternalizeStringIfExists(String* string, String** result) {
5907   if (string->IsInternalizedString()) {
5908     *result = string;
5909     return true;
5910   }
5911   return string_table()->LookupStringIfExists(string, result);
5912 }
5913
5914
5915 MaybeObject* Heap::InternalizeStringWithKey(HashTableKey* key) {
5916   Object* result = NULL;
5917   Object* new_table;
5918   { MaybeObject* maybe_new_table =
5919         string_table()->LookupKey(key, &result);
5920     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5921   }
5922   // Can't use set_string_table because StringTable::cast knows that
5923   // StringTable is a singleton and checks for identity.
5924   roots_[kStringTableRootIndex] = new_table;
5925   ASSERT(result != NULL);
5926   return result;
5927 }
5928
5929
5930 void Heap::ZapFromSpace() {
5931   NewSpacePageIterator it(new_space_.FromSpaceStart(),
5932                           new_space_.FromSpaceEnd());
5933   while (it.has_next()) {
5934     NewSpacePage* page = it.next();
5935     for (Address cursor = page->area_start(), limit = page->area_end();
5936          cursor < limit;
5937          cursor += kPointerSize) {
5938       Memory::Address_at(cursor) = kFromSpaceZapValue;
5939     }
5940   }
5941 }
5942
5943
5944 void Heap::IterateAndMarkPointersToFromSpace(Address start,
5945                                              Address end,
5946                                              ObjectSlotCallback callback) {
5947   Address slot_address = start;
5948
5949   // We are not collecting slots on new space objects during mutation
5950   // thus we have to scan for pointers to evacuation candidates when we
5951   // promote objects. But we should not record any slots in non-black
5952   // objects. Grey object's slots would be rescanned.
5953   // White object might not survive until the end of collection
5954   // it would be a violation of the invariant to record it's slots.
5955   bool record_slots = false;
5956   if (incremental_marking()->IsCompacting()) {
5957     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
5958     record_slots = Marking::IsBlack(mark_bit);
5959   }
5960
5961   while (slot_address < end) {
5962     Object** slot = reinterpret_cast<Object**>(slot_address);
5963     Object* object = *slot;
5964     // If the store buffer becomes overfull we mark pages as being exempt from
5965     // the store buffer.  These pages are scanned to find pointers that point
5966     // to the new space.  In that case we may hit newly promoted objects and
5967     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
5968     if (object->IsHeapObject()) {
5969       if (Heap::InFromSpace(object)) {
5970         callback(reinterpret_cast<HeapObject**>(slot),
5971                  HeapObject::cast(object));
5972         Object* new_object = *slot;
5973         if (InNewSpace(new_object)) {
5974           SLOW_ASSERT(Heap::InToSpace(new_object));
5975           SLOW_ASSERT(new_object->IsHeapObject());
5976           store_buffer_.EnterDirectlyIntoStoreBuffer(
5977               reinterpret_cast<Address>(slot));
5978         }
5979         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5980       } else if (record_slots &&
5981                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
5982         mark_compact_collector()->RecordSlot(slot, slot, object);
5983       }
5984     }
5985     slot_address += kPointerSize;
5986   }
5987 }
5988
5989
5990 #ifdef DEBUG
5991 typedef bool (*CheckStoreBufferFilter)(Object** addr);
5992
5993
5994 bool IsAMapPointerAddress(Object** addr) {
5995   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
5996   int mod = a % Map::kSize;
5997   return mod >= Map::kPointerFieldsBeginOffset &&
5998          mod < Map::kPointerFieldsEndOffset;
5999 }
6000
6001
6002 bool EverythingsAPointer(Object** addr) {
6003   return true;
6004 }
6005
6006
6007 static void CheckStoreBuffer(Heap* heap,
6008                              Object** current,
6009                              Object** limit,
6010                              Object**** store_buffer_position,
6011                              Object*** store_buffer_top,
6012                              CheckStoreBufferFilter filter,
6013                              Address special_garbage_start,
6014                              Address special_garbage_end) {
6015   Map* free_space_map = heap->free_space_map();
6016   for ( ; current < limit; current++) {
6017     Object* o = *current;
6018     Address current_address = reinterpret_cast<Address>(current);
6019     // Skip free space.
6020     if (o == free_space_map) {
6021       Address current_address = reinterpret_cast<Address>(current);
6022       FreeSpace* free_space =
6023           FreeSpace::cast(HeapObject::FromAddress(current_address));
6024       int skip = free_space->Size();
6025       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6026       ASSERT(skip > 0);
6027       current_address += skip - kPointerSize;
6028       current = reinterpret_cast<Object**>(current_address);
6029       continue;
6030     }
6031     // Skip the current linear allocation space between top and limit which is
6032     // unmarked with the free space map, but can contain junk.
6033     if (current_address == special_garbage_start &&
6034         special_garbage_end != special_garbage_start) {
6035       current_address = special_garbage_end - kPointerSize;
6036       current = reinterpret_cast<Object**>(current_address);
6037       continue;
6038     }
6039     if (!(*filter)(current)) continue;
6040     ASSERT(current_address < special_garbage_start ||
6041            current_address >= special_garbage_end);
6042     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6043     // We have to check that the pointer does not point into new space
6044     // without trying to cast it to a heap object since the hash field of
6045     // a string can contain values like 1 and 3 which are tagged null
6046     // pointers.
6047     if (!heap->InNewSpace(o)) continue;
6048     while (**store_buffer_position < current &&
6049            *store_buffer_position < store_buffer_top) {
6050       (*store_buffer_position)++;
6051     }
6052     if (**store_buffer_position != current ||
6053         *store_buffer_position == store_buffer_top) {
6054       Object** obj_start = current;
6055       while (!(*obj_start)->IsMap()) obj_start--;
6056       UNREACHABLE();
6057     }
6058   }
6059 }
6060
6061
6062 // Check that the store buffer contains all intergenerational pointers by
6063 // scanning a page and ensuring that all pointers to young space are in the
6064 // store buffer.
6065 void Heap::OldPointerSpaceCheckStoreBuffer() {
6066   OldSpace* space = old_pointer_space();
6067   PageIterator pages(space);
6068
6069   store_buffer()->SortUniq();
6070
6071   while (pages.has_next()) {
6072     Page* page = pages.next();
6073     Object** current = reinterpret_cast<Object**>(page->area_start());
6074
6075     Address end = page->area_end();
6076
6077     Object*** store_buffer_position = store_buffer()->Start();
6078     Object*** store_buffer_top = store_buffer()->Top();
6079
6080     Object** limit = reinterpret_cast<Object**>(end);
6081     CheckStoreBuffer(this,
6082                      current,
6083                      limit,
6084                      &store_buffer_position,
6085                      store_buffer_top,
6086                      &EverythingsAPointer,
6087                      space->top(),
6088                      space->limit());
6089   }
6090 }
6091
6092
6093 void Heap::MapSpaceCheckStoreBuffer() {
6094   MapSpace* space = map_space();
6095   PageIterator pages(space);
6096
6097   store_buffer()->SortUniq();
6098
6099   while (pages.has_next()) {
6100     Page* page = pages.next();
6101     Object** current = reinterpret_cast<Object**>(page->area_start());
6102
6103     Address end = page->area_end();
6104
6105     Object*** store_buffer_position = store_buffer()->Start();
6106     Object*** store_buffer_top = store_buffer()->Top();
6107
6108     Object** limit = reinterpret_cast<Object**>(end);
6109     CheckStoreBuffer(this,
6110                      current,
6111                      limit,
6112                      &store_buffer_position,
6113                      store_buffer_top,
6114                      &IsAMapPointerAddress,
6115                      space->top(),
6116                      space->limit());
6117   }
6118 }
6119
6120
6121 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6122   LargeObjectIterator it(lo_space());
6123   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6124     // We only have code, sequential strings, or fixed arrays in large
6125     // object space, and only fixed arrays can possibly contain pointers to
6126     // the young generation.
6127     if (object->IsFixedArray()) {
6128       Object*** store_buffer_position = store_buffer()->Start();
6129       Object*** store_buffer_top = store_buffer()->Top();
6130       Object** current = reinterpret_cast<Object**>(object->address());
6131       Object** limit =
6132           reinterpret_cast<Object**>(object->address() + object->Size());
6133       CheckStoreBuffer(this,
6134                        current,
6135                        limit,
6136                        &store_buffer_position,
6137                        store_buffer_top,
6138                        &EverythingsAPointer,
6139                        NULL,
6140                        NULL);
6141     }
6142   }
6143 }
6144 #endif
6145
6146
6147 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6148   IterateStrongRoots(v, mode);
6149   IterateWeakRoots(v, mode);
6150 }
6151
6152
6153 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6154   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6155   v->Synchronize(VisitorSynchronization::kStringTable);
6156   if (mode != VISIT_ALL_IN_SCAVENGE &&
6157       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6158     // Scavenge collections have special processing for this.
6159     external_string_table_.Iterate(v);
6160   }
6161   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6162 }
6163
6164
6165 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6166   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6167   v->Synchronize(VisitorSynchronization::kStrongRootList);
6168
6169   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6170   v->Synchronize(VisitorSynchronization::kInternalizedString);
6171
6172   isolate_->bootstrapper()->Iterate(v);
6173   v->Synchronize(VisitorSynchronization::kBootstrapper);
6174   isolate_->Iterate(v);
6175   v->Synchronize(VisitorSynchronization::kTop);
6176   Relocatable::Iterate(isolate_, v);
6177   v->Synchronize(VisitorSynchronization::kRelocatable);
6178
6179 #ifdef ENABLE_DEBUGGER_SUPPORT
6180   isolate_->debug()->Iterate(v);
6181   if (isolate_->deoptimizer_data() != NULL) {
6182     isolate_->deoptimizer_data()->Iterate(v);
6183   }
6184 #endif
6185   v->Synchronize(VisitorSynchronization::kDebug);
6186   isolate_->compilation_cache()->Iterate(v);
6187   v->Synchronize(VisitorSynchronization::kCompilationCache);
6188
6189   // Iterate over local handles in handle scopes.
6190   isolate_->handle_scope_implementer()->Iterate(v);
6191   isolate_->IterateDeferredHandles(v);
6192   v->Synchronize(VisitorSynchronization::kHandleScope);
6193
6194   // Iterate over the builtin code objects and code stubs in the
6195   // heap. Note that it is not necessary to iterate over code objects
6196   // on scavenge collections.
6197   if (mode != VISIT_ALL_IN_SCAVENGE) {
6198     isolate_->builtins()->IterateBuiltins(v);
6199   }
6200   v->Synchronize(VisitorSynchronization::kBuiltins);
6201
6202   // Iterate over global handles.
6203   switch (mode) {
6204     case VISIT_ONLY_STRONG:
6205       isolate_->global_handles()->IterateStrongRoots(v);
6206       break;
6207     case VISIT_ALL_IN_SCAVENGE:
6208       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6209       break;
6210     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6211     case VISIT_ALL:
6212       isolate_->global_handles()->IterateAllRoots(v);
6213       break;
6214   }
6215   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6216
6217   // Iterate over eternal handles.
6218   if (mode == VISIT_ALL_IN_SCAVENGE) {
6219     isolate_->eternal_handles()->IterateNewSpaceRoots(v);
6220   } else {
6221     isolate_->eternal_handles()->IterateAllRoots(v);
6222   }
6223   v->Synchronize(VisitorSynchronization::kEternalHandles);
6224
6225   // Iterate over pointers being held by inactive threads.
6226   isolate_->thread_manager()->Iterate(v);
6227   v->Synchronize(VisitorSynchronization::kThreadManager);
6228
6229   // Iterate over the pointers the Serialization/Deserialization code is
6230   // holding.
6231   // During garbage collection this keeps the partial snapshot cache alive.
6232   // During deserialization of the startup snapshot this creates the partial
6233   // snapshot cache and deserializes the objects it refers to.  During
6234   // serialization this does nothing, since the partial snapshot cache is
6235   // empty.  However the next thing we do is create the partial snapshot,
6236   // filling up the partial snapshot cache with objects it needs as we go.
6237   SerializerDeserializer::Iterate(isolate_, v);
6238   // We don't do a v->Synchronize call here, because in debug mode that will
6239   // output a flag to the snapshot.  However at this point the serializer and
6240   // deserializer are deliberately a little unsynchronized (see above) so the
6241   // checking of the sync flag in the snapshot would fail.
6242 }
6243
6244
6245 // TODO(1236194): Since the heap size is configurable on the command line
6246 // and through the API, we should gracefully handle the case that the heap
6247 // size is not big enough to fit all the initial objects.
6248 bool Heap::ConfigureHeap(int max_semispace_size,
6249                          intptr_t max_old_gen_size,
6250                          intptr_t max_executable_size) {
6251   if (HasBeenSetUp()) return false;
6252
6253   if (FLAG_stress_compaction) {
6254     // This will cause more frequent GCs when stressing.
6255     max_semispace_size_ = Page::kPageSize;
6256   }
6257
6258   if (max_semispace_size > 0) {
6259     if (max_semispace_size < Page::kPageSize) {
6260       max_semispace_size = Page::kPageSize;
6261       if (FLAG_trace_gc) {
6262         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6263                  Page::kPageSize >> 10);
6264       }
6265     }
6266     max_semispace_size_ = max_semispace_size;
6267   }
6268
6269   if (Snapshot::IsEnabled()) {
6270     // If we are using a snapshot we always reserve the default amount
6271     // of memory for each semispace because code in the snapshot has
6272     // write-barrier code that relies on the size and alignment of new
6273     // space.  We therefore cannot use a larger max semispace size
6274     // than the default reserved semispace size.
6275     if (max_semispace_size_ > reserved_semispace_size_) {
6276       max_semispace_size_ = reserved_semispace_size_;
6277       if (FLAG_trace_gc) {
6278         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6279                  reserved_semispace_size_ >> 10);
6280       }
6281     }
6282   } else {
6283     // If we are not using snapshots we reserve space for the actual
6284     // max semispace size.
6285     reserved_semispace_size_ = max_semispace_size_;
6286   }
6287
6288   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6289   if (max_executable_size > 0) {
6290     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6291   }
6292
6293   // The max executable size must be less than or equal to the max old
6294   // generation size.
6295   if (max_executable_size_ > max_old_generation_size_) {
6296     max_executable_size_ = max_old_generation_size_;
6297   }
6298
6299   // The new space size must be a power of two to support single-bit testing
6300   // for containment.
6301   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6302   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6303   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6304
6305   // The external allocation limit should be below 256 MB on all architectures
6306   // to avoid unnecessary low memory notifications, as that is the threshold
6307   // for some embedders.
6308   external_allocation_limit_ = 12 * max_semispace_size_;
6309   ASSERT(external_allocation_limit_ <= 256 * MB);
6310
6311   // The old generation is paged and needs at least one page for each space.
6312   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6313   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6314                                                        Page::kPageSize),
6315                                  RoundUp(max_old_generation_size_,
6316                                          Page::kPageSize));
6317
6318   // We rely on being able to allocate new arrays in paged spaces.
6319   ASSERT(Page::kMaxRegularHeapObjectSize >=
6320          (JSArray::kSize +
6321           FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
6322           AllocationMemento::kSize));
6323
6324   configured_ = true;
6325   return true;
6326 }
6327
6328
6329 bool Heap::ConfigureHeapDefault() {
6330   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6331                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6332                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6333 }
6334
6335
6336 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6337   *stats->start_marker = HeapStats::kStartMarker;
6338   *stats->end_marker = HeapStats::kEndMarker;
6339   *stats->new_space_size = new_space_.SizeAsInt();
6340   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6341   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6342   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6343   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6344   *stats->old_data_space_capacity = old_data_space_->Capacity();
6345   *stats->code_space_size = code_space_->SizeOfObjects();
6346   *stats->code_space_capacity = code_space_->Capacity();
6347   *stats->map_space_size = map_space_->SizeOfObjects();
6348   *stats->map_space_capacity = map_space_->Capacity();
6349   *stats->cell_space_size = cell_space_->SizeOfObjects();
6350   *stats->cell_space_capacity = cell_space_->Capacity();
6351   *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6352   *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6353   *stats->lo_space_size = lo_space_->Size();
6354   isolate_->global_handles()->RecordStats(stats);
6355   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6356   *stats->memory_allocator_capacity =
6357       isolate()->memory_allocator()->Size() +
6358       isolate()->memory_allocator()->Available();
6359   *stats->os_error = OS::GetLastError();
6360       isolate()->memory_allocator()->Available();
6361   if (take_snapshot) {
6362     HeapIterator iterator(this);
6363     for (HeapObject* obj = iterator.next();
6364          obj != NULL;
6365          obj = iterator.next()) {
6366       InstanceType type = obj->map()->instance_type();
6367       ASSERT(0 <= type && type <= LAST_TYPE);
6368       stats->objects_per_type[type]++;
6369       stats->size_per_type[type] += obj->Size();
6370     }
6371   }
6372 }
6373
6374
6375 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6376   return old_pointer_space_->SizeOfObjects()
6377       + old_data_space_->SizeOfObjects()
6378       + code_space_->SizeOfObjects()
6379       + map_space_->SizeOfObjects()
6380       + cell_space_->SizeOfObjects()
6381       + property_cell_space_->SizeOfObjects()
6382       + lo_space_->SizeOfObjects();
6383 }
6384
6385
6386 bool Heap::AdvanceSweepers(int step_size) {
6387   ASSERT(isolate()->num_sweeper_threads() == 0);
6388   bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
6389   sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
6390   return sweeping_complete;
6391 }
6392
6393
6394 int64_t Heap::PromotedExternalMemorySize() {
6395   if (amount_of_external_allocated_memory_
6396       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6397   return amount_of_external_allocated_memory_
6398       - amount_of_external_allocated_memory_at_last_global_gc_;
6399 }
6400
6401
6402 void Heap::EnableInlineAllocation() {
6403   if (!inline_allocation_disabled_) return;
6404   inline_allocation_disabled_ = false;
6405
6406   // Update inline allocation limit for new space.
6407   new_space()->UpdateInlineAllocationLimit(0);
6408 }
6409
6410
6411 void Heap::DisableInlineAllocation() {
6412   if (inline_allocation_disabled_) return;
6413   inline_allocation_disabled_ = true;
6414
6415   // Update inline allocation limit for new space.
6416   new_space()->UpdateInlineAllocationLimit(0);
6417
6418   // Update inline allocation limit for old spaces.
6419   PagedSpaces spaces(this);
6420   for (PagedSpace* space = spaces.next();
6421        space != NULL;
6422        space = spaces.next()) {
6423     space->EmptyAllocationInfo();
6424   }
6425 }
6426
6427
6428 V8_DECLARE_ONCE(initialize_gc_once);
6429
6430 static void InitializeGCOnce() {
6431   InitializeScavengingVisitorsTables();
6432   NewSpaceScavenger::Initialize();
6433   MarkCompactCollector::Initialize();
6434 }
6435
6436
6437 bool Heap::SetUp() {
6438 #ifdef DEBUG
6439   allocation_timeout_ = FLAG_gc_interval;
6440 #endif
6441
6442   // Initialize heap spaces and initial maps and objects. Whenever something
6443   // goes wrong, just return false. The caller should check the results and
6444   // call Heap::TearDown() to release allocated memory.
6445   //
6446   // If the heap is not yet configured (e.g. through the API), configure it.
6447   // Configuration is based on the flags new-space-size (really the semispace
6448   // size) and old-space-size if set or the initial values of semispace_size_
6449   // and old_generation_size_ otherwise.
6450   if (!configured_) {
6451     if (!ConfigureHeapDefault()) return false;
6452   }
6453
6454   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6455
6456   MarkMapPointersAsEncoded(false);
6457
6458   // Set up memory allocator.
6459   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6460       return false;
6461
6462   // Set up new space.
6463   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6464     return false;
6465   }
6466
6467   // Initialize old pointer space.
6468   old_pointer_space_ =
6469       new OldSpace(this,
6470                    max_old_generation_size_,
6471                    OLD_POINTER_SPACE,
6472                    NOT_EXECUTABLE);
6473   if (old_pointer_space_ == NULL) return false;
6474   if (!old_pointer_space_->SetUp()) return false;
6475
6476   // Initialize old data space.
6477   old_data_space_ =
6478       new OldSpace(this,
6479                    max_old_generation_size_,
6480                    OLD_DATA_SPACE,
6481                    NOT_EXECUTABLE);
6482   if (old_data_space_ == NULL) return false;
6483   if (!old_data_space_->SetUp()) return false;
6484
6485   // Initialize the code space, set its maximum capacity to the old
6486   // generation size. It needs executable memory.
6487   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6488   // virtual address space, so that they can call each other with near calls.
6489   if (code_range_size_ > 0) {
6490     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6491       return false;
6492     }
6493   }
6494
6495   code_space_ =
6496       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6497   if (code_space_ == NULL) return false;
6498   if (!code_space_->SetUp()) return false;
6499
6500   // Initialize map space.
6501   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6502   if (map_space_ == NULL) return false;
6503   if (!map_space_->SetUp()) return false;
6504
6505   // Initialize simple cell space.
6506   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6507   if (cell_space_ == NULL) return false;
6508   if (!cell_space_->SetUp()) return false;
6509
6510   // Initialize global property cell space.
6511   property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6512                                                PROPERTY_CELL_SPACE);
6513   if (property_cell_space_ == NULL) return false;
6514   if (!property_cell_space_->SetUp()) return false;
6515
6516   // The large object code space may contain code or data.  We set the memory
6517   // to be non-executable here for safety, but this means we need to enable it
6518   // explicitly when allocating large code objects.
6519   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6520   if (lo_space_ == NULL) return false;
6521   if (!lo_space_->SetUp()) return false;
6522
6523   // Set up the seed that is used to randomize the string hash function.
6524   ASSERT(hash_seed() == 0);
6525   if (FLAG_randomize_hashes) {
6526     if (FLAG_hash_seed == 0) {
6527       int rnd = isolate()->random_number_generator()->NextInt();
6528       set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
6529     } else {
6530       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6531     }
6532   }
6533
6534   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6535   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6536
6537   store_buffer()->SetUp();
6538
6539   mark_compact_collector()->SetUp();
6540
6541   if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
6542
6543   return true;
6544 }
6545
6546
6547 bool Heap::CreateHeapObjects() {
6548   // Create initial maps.
6549   if (!CreateInitialMaps()) return false;
6550   if (!CreateApiObjects()) return false;
6551
6552   // Create initial objects
6553   if (!CreateInitialObjects()) return false;
6554
6555   native_contexts_list_ = undefined_value();
6556   array_buffers_list_ = undefined_value();
6557   allocation_sites_list_ = undefined_value();
6558   weak_object_to_code_table_ = undefined_value();
6559   return true;
6560 }
6561
6562
6563 void Heap::SetStackLimits() {
6564   ASSERT(isolate_ != NULL);
6565   ASSERT(isolate_ == isolate());
6566   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6567   // something that looks like an out of range Smi to the GC.
6568
6569   // Set up the special root array entries containing the stack limits.
6570   // These are actually addresses, but the tag makes the GC ignore it.
6571   roots_[kStackLimitRootIndex] =
6572       reinterpret_cast<Object*>(
6573           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6574   roots_[kRealStackLimitRootIndex] =
6575       reinterpret_cast<Object*>(
6576           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6577 }
6578
6579
6580 void Heap::TearDown() {
6581 #ifdef VERIFY_HEAP
6582   if (FLAG_verify_heap) {
6583     Verify();
6584   }
6585 #endif
6586
6587   UpdateMaximumCommitted();
6588
6589   if (FLAG_print_cumulative_gc_stat) {
6590     PrintF("\n");
6591     PrintF("gc_count=%d ", gc_count_);
6592     PrintF("mark_sweep_count=%d ", ms_count_);
6593     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6594     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6595     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6596     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6597            get_max_alive_after_gc());
6598     PrintF("total_marking_time=%.1f ", marking_time());
6599     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6600     PrintF("\n\n");
6601   }
6602
6603   if (FLAG_print_max_heap_committed) {
6604     PrintF("\n");
6605     PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
6606       MaximumCommittedMemory());
6607     PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
6608       new_space_.MaximumCommittedMemory());
6609     PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
6610       old_data_space_->MaximumCommittedMemory());
6611     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6612       old_pointer_space_->MaximumCommittedMemory());
6613     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6614       old_pointer_space_->MaximumCommittedMemory());
6615     PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
6616       code_space_->MaximumCommittedMemory());
6617     PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
6618       map_space_->MaximumCommittedMemory());
6619     PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
6620       cell_space_->MaximumCommittedMemory());
6621     PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
6622       property_cell_space_->MaximumCommittedMemory());
6623     PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
6624       lo_space_->MaximumCommittedMemory());
6625     PrintF("\n\n");
6626   }
6627
6628   TearDownArrayBuffers();
6629
6630   isolate_->global_handles()->TearDown();
6631
6632   external_string_table_.TearDown();
6633
6634   mark_compact_collector()->TearDown();
6635
6636   new_space_.TearDown();
6637
6638   if (old_pointer_space_ != NULL) {
6639     old_pointer_space_->TearDown();
6640     delete old_pointer_space_;
6641     old_pointer_space_ = NULL;
6642   }
6643
6644   if (old_data_space_ != NULL) {
6645     old_data_space_->TearDown();
6646     delete old_data_space_;
6647     old_data_space_ = NULL;
6648   }
6649
6650   if (code_space_ != NULL) {
6651     code_space_->TearDown();
6652     delete code_space_;
6653     code_space_ = NULL;
6654   }
6655
6656   if (map_space_ != NULL) {
6657     map_space_->TearDown();
6658     delete map_space_;
6659     map_space_ = NULL;
6660   }
6661
6662   if (cell_space_ != NULL) {
6663     cell_space_->TearDown();
6664     delete cell_space_;
6665     cell_space_ = NULL;
6666   }
6667
6668   if (property_cell_space_ != NULL) {
6669     property_cell_space_->TearDown();
6670     delete property_cell_space_;
6671     property_cell_space_ = NULL;
6672   }
6673
6674   if (lo_space_ != NULL) {
6675     lo_space_->TearDown();
6676     delete lo_space_;
6677     lo_space_ = NULL;
6678   }
6679
6680   store_buffer()->TearDown();
6681   incremental_marking()->TearDown();
6682
6683   isolate_->memory_allocator()->TearDown();
6684
6685   delete relocation_mutex_;
6686   relocation_mutex_ = NULL;
6687 }
6688
6689
6690 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
6691                                  GCType gc_type,
6692                                  bool pass_isolate) {
6693   ASSERT(callback != NULL);
6694   GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
6695   ASSERT(!gc_prologue_callbacks_.Contains(pair));
6696   return gc_prologue_callbacks_.Add(pair);
6697 }
6698
6699
6700 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
6701   ASSERT(callback != NULL);
6702   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6703     if (gc_prologue_callbacks_[i].callback == callback) {
6704       gc_prologue_callbacks_.Remove(i);
6705       return;
6706     }
6707   }
6708   UNREACHABLE();
6709 }
6710
6711
6712 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
6713                                  GCType gc_type,
6714                                  bool pass_isolate) {
6715   ASSERT(callback != NULL);
6716   GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
6717   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6718   return gc_epilogue_callbacks_.Add(pair);
6719 }
6720
6721
6722 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
6723   ASSERT(callback != NULL);
6724   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6725     if (gc_epilogue_callbacks_[i].callback == callback) {
6726       gc_epilogue_callbacks_.Remove(i);
6727       return;
6728     }
6729   }
6730   UNREACHABLE();
6731 }
6732
6733
6734 MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
6735                                                  DependentCode* dep) {
6736   ASSERT(!InNewSpace(obj));
6737   ASSERT(!InNewSpace(dep));
6738   MaybeObject* maybe_obj =
6739       WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
6740   WeakHashTable* table;
6741   if (!maybe_obj->To(&table)) return maybe_obj;
6742   if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
6743     WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
6744   }
6745   set_weak_object_to_code_table(table);
6746   ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
6747   return weak_object_to_code_table_;
6748 }
6749
6750
6751 DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
6752   Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
6753   if (dep->IsDependentCode()) return DependentCode::cast(dep);
6754   return DependentCode::cast(empty_fixed_array());
6755 }
6756
6757
6758 void Heap::EnsureWeakObjectToCodeTable() {
6759   if (!weak_object_to_code_table()->IsHashTable()) {
6760     set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
6761   }
6762 }
6763
6764
6765 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
6766   v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
6767 }
6768
6769 #ifdef DEBUG
6770
6771 class PrintHandleVisitor: public ObjectVisitor {
6772  public:
6773   void VisitPointers(Object** start, Object** end) {
6774     for (Object** p = start; p < end; p++)
6775       PrintF("  handle %p to %p\n",
6776              reinterpret_cast<void*>(p),
6777              reinterpret_cast<void*>(*p));
6778   }
6779 };
6780
6781
6782 void Heap::PrintHandles() {
6783   PrintF("Handles:\n");
6784   PrintHandleVisitor v;
6785   isolate_->handle_scope_implementer()->Iterate(&v);
6786 }
6787
6788 #endif
6789
6790
6791 Space* AllSpaces::next() {
6792   switch (counter_++) {
6793     case NEW_SPACE:
6794       return heap_->new_space();
6795     case OLD_POINTER_SPACE:
6796       return heap_->old_pointer_space();
6797     case OLD_DATA_SPACE:
6798       return heap_->old_data_space();
6799     case CODE_SPACE:
6800       return heap_->code_space();
6801     case MAP_SPACE:
6802       return heap_->map_space();
6803     case CELL_SPACE:
6804       return heap_->cell_space();
6805     case PROPERTY_CELL_SPACE:
6806       return heap_->property_cell_space();
6807     case LO_SPACE:
6808       return heap_->lo_space();
6809     default:
6810       return NULL;
6811   }
6812 }
6813
6814
6815 PagedSpace* PagedSpaces::next() {
6816   switch (counter_++) {
6817     case OLD_POINTER_SPACE:
6818       return heap_->old_pointer_space();
6819     case OLD_DATA_SPACE:
6820       return heap_->old_data_space();
6821     case CODE_SPACE:
6822       return heap_->code_space();
6823     case MAP_SPACE:
6824       return heap_->map_space();
6825     case CELL_SPACE:
6826       return heap_->cell_space();
6827     case PROPERTY_CELL_SPACE:
6828       return heap_->property_cell_space();
6829     default:
6830       return NULL;
6831   }
6832 }
6833
6834
6835
6836 OldSpace* OldSpaces::next() {
6837   switch (counter_++) {
6838     case OLD_POINTER_SPACE:
6839       return heap_->old_pointer_space();
6840     case OLD_DATA_SPACE:
6841       return heap_->old_data_space();
6842     case CODE_SPACE:
6843       return heap_->code_space();
6844     default:
6845       return NULL;
6846   }
6847 }
6848
6849
6850 SpaceIterator::SpaceIterator(Heap* heap)
6851     : heap_(heap),
6852       current_space_(FIRST_SPACE),
6853       iterator_(NULL),
6854       size_func_(NULL) {
6855 }
6856
6857
6858 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
6859     : heap_(heap),
6860       current_space_(FIRST_SPACE),
6861       iterator_(NULL),
6862       size_func_(size_func) {
6863 }
6864
6865
6866 SpaceIterator::~SpaceIterator() {
6867   // Delete active iterator if any.
6868   delete iterator_;
6869 }
6870
6871
6872 bool SpaceIterator::has_next() {
6873   // Iterate until no more spaces.
6874   return current_space_ != LAST_SPACE;
6875 }
6876
6877
6878 ObjectIterator* SpaceIterator::next() {
6879   if (iterator_ != NULL) {
6880     delete iterator_;
6881     iterator_ = NULL;
6882     // Move to the next space
6883     current_space_++;
6884     if (current_space_ > LAST_SPACE) {
6885       return NULL;
6886     }
6887   }
6888
6889   // Return iterator for the new current space.
6890   return CreateIterator();
6891 }
6892
6893
6894 // Create an iterator for the space to iterate.
6895 ObjectIterator* SpaceIterator::CreateIterator() {
6896   ASSERT(iterator_ == NULL);
6897
6898   switch (current_space_) {
6899     case NEW_SPACE:
6900       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
6901       break;
6902     case OLD_POINTER_SPACE:
6903       iterator_ =
6904           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
6905       break;
6906     case OLD_DATA_SPACE:
6907       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
6908       break;
6909     case CODE_SPACE:
6910       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
6911       break;
6912     case MAP_SPACE:
6913       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
6914       break;
6915     case CELL_SPACE:
6916       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
6917       break;
6918     case PROPERTY_CELL_SPACE:
6919       iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
6920                                          size_func_);
6921       break;
6922     case LO_SPACE:
6923       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
6924       break;
6925   }
6926
6927   // Return the newly allocated iterator;
6928   ASSERT(iterator_ != NULL);
6929   return iterator_;
6930 }
6931
6932
6933 class HeapObjectsFilter {
6934  public:
6935   virtual ~HeapObjectsFilter() {}
6936   virtual bool SkipObject(HeapObject* object) = 0;
6937 };
6938
6939
6940 class UnreachableObjectsFilter : public HeapObjectsFilter {
6941  public:
6942   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
6943     MarkReachableObjects();
6944   }
6945
6946   ~UnreachableObjectsFilter() {
6947     heap_->mark_compact_collector()->ClearMarkbits();
6948   }
6949
6950   bool SkipObject(HeapObject* object) {
6951     MarkBit mark_bit = Marking::MarkBitFrom(object);
6952     return !mark_bit.Get();
6953   }
6954
6955  private:
6956   class MarkingVisitor : public ObjectVisitor {
6957    public:
6958     MarkingVisitor() : marking_stack_(10) {}
6959
6960     void VisitPointers(Object** start, Object** end) {
6961       for (Object** p = start; p < end; p++) {
6962         if (!(*p)->IsHeapObject()) continue;
6963         HeapObject* obj = HeapObject::cast(*p);
6964         MarkBit mark_bit = Marking::MarkBitFrom(obj);
6965         if (!mark_bit.Get()) {
6966           mark_bit.Set();
6967           marking_stack_.Add(obj);
6968         }
6969       }
6970     }
6971
6972     void TransitiveClosure() {
6973       while (!marking_stack_.is_empty()) {
6974         HeapObject* obj = marking_stack_.RemoveLast();
6975         obj->Iterate(this);
6976       }
6977     }
6978
6979    private:
6980     List<HeapObject*> marking_stack_;
6981   };
6982
6983   void MarkReachableObjects() {
6984     MarkingVisitor visitor;
6985     heap_->IterateRoots(&visitor, VISIT_ALL);
6986     visitor.TransitiveClosure();
6987   }
6988
6989   Heap* heap_;
6990   DisallowHeapAllocation no_allocation_;
6991 };
6992
6993
6994 HeapIterator::HeapIterator(Heap* heap)
6995     : heap_(heap),
6996       filtering_(HeapIterator::kNoFiltering),
6997       filter_(NULL) {
6998   Init();
6999 }
7000
7001
7002 HeapIterator::HeapIterator(Heap* heap,
7003                            HeapIterator::HeapObjectsFiltering filtering)
7004     : heap_(heap),
7005       filtering_(filtering),
7006       filter_(NULL) {
7007   Init();
7008 }
7009
7010
7011 HeapIterator::~HeapIterator() {
7012   Shutdown();
7013 }
7014
7015
7016 void HeapIterator::Init() {
7017   // Start the iteration.
7018   space_iterator_ = new SpaceIterator(heap_);
7019   switch (filtering_) {
7020     case kFilterUnreachable:
7021       filter_ = new UnreachableObjectsFilter(heap_);
7022       break;
7023     default:
7024       break;
7025   }
7026   object_iterator_ = space_iterator_->next();
7027 }
7028
7029
7030 void HeapIterator::Shutdown() {
7031 #ifdef DEBUG
7032   // Assert that in filtering mode we have iterated through all
7033   // objects. Otherwise, heap will be left in an inconsistent state.
7034   if (filtering_ != kNoFiltering) {
7035     ASSERT(object_iterator_ == NULL);
7036   }
7037 #endif
7038   // Make sure the last iterator is deallocated.
7039   delete space_iterator_;
7040   space_iterator_ = NULL;
7041   object_iterator_ = NULL;
7042   delete filter_;
7043   filter_ = NULL;
7044 }
7045
7046
7047 HeapObject* HeapIterator::next() {
7048   if (filter_ == NULL) return NextObject();
7049
7050   HeapObject* obj = NextObject();
7051   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7052   return obj;
7053 }
7054
7055
7056 HeapObject* HeapIterator::NextObject() {
7057   // No iterator means we are done.
7058   if (object_iterator_ == NULL) return NULL;
7059
7060   if (HeapObject* obj = object_iterator_->next_object()) {
7061     // If the current iterator has more objects we are fine.
7062     return obj;
7063   } else {
7064     // Go though the spaces looking for one that has objects.
7065     while (space_iterator_->has_next()) {
7066       object_iterator_ = space_iterator_->next();
7067       if (HeapObject* obj = object_iterator_->next_object()) {
7068         return obj;
7069       }
7070     }
7071   }
7072   // Done with the last space.
7073   object_iterator_ = NULL;
7074   return NULL;
7075 }
7076
7077
7078 void HeapIterator::reset() {
7079   // Restart the iterator.
7080   Shutdown();
7081   Init();
7082 }
7083
7084
7085 #ifdef DEBUG
7086
7087 Object* const PathTracer::kAnyGlobalObject = NULL;
7088
7089 class PathTracer::MarkVisitor: public ObjectVisitor {
7090  public:
7091   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7092   void VisitPointers(Object** start, Object** end) {
7093     // Scan all HeapObject pointers in [start, end)
7094     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7095       if ((*p)->IsHeapObject())
7096         tracer_->MarkRecursively(p, this);
7097     }
7098   }
7099
7100  private:
7101   PathTracer* tracer_;
7102 };
7103
7104
7105 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7106  public:
7107   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7108   void VisitPointers(Object** start, Object** end) {
7109     // Scan all HeapObject pointers in [start, end)
7110     for (Object** p = start; p < end; p++) {
7111       if ((*p)->IsHeapObject())
7112         tracer_->UnmarkRecursively(p, this);
7113     }
7114   }
7115
7116  private:
7117   PathTracer* tracer_;
7118 };
7119
7120
7121 void PathTracer::VisitPointers(Object** start, Object** end) {
7122   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7123   // Visit all HeapObject pointers in [start, end)
7124   for (Object** p = start; !done && (p < end); p++) {
7125     if ((*p)->IsHeapObject()) {
7126       TracePathFrom(p);
7127       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7128     }
7129   }
7130 }
7131
7132
7133 void PathTracer::Reset() {
7134   found_target_ = false;
7135   object_stack_.Clear();
7136 }
7137
7138
7139 void PathTracer::TracePathFrom(Object** root) {
7140   ASSERT((search_target_ == kAnyGlobalObject) ||
7141          search_target_->IsHeapObject());
7142   found_target_in_trace_ = false;
7143   Reset();
7144
7145   MarkVisitor mark_visitor(this);
7146   MarkRecursively(root, &mark_visitor);
7147
7148   UnmarkVisitor unmark_visitor(this);
7149   UnmarkRecursively(root, &unmark_visitor);
7150
7151   ProcessResults();
7152 }
7153
7154
7155 static bool SafeIsNativeContext(HeapObject* obj) {
7156   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7157 }
7158
7159
7160 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7161   if (!(*p)->IsHeapObject()) return;
7162
7163   HeapObject* obj = HeapObject::cast(*p);
7164
7165   Object* map = obj->map();
7166
7167   if (!map->IsHeapObject()) return;  // visited before
7168
7169   if (found_target_in_trace_) return;  // stop if target found
7170   object_stack_.Add(obj);
7171   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7172       (obj == search_target_)) {
7173     found_target_in_trace_ = true;
7174     found_target_ = true;
7175     return;
7176   }
7177
7178   bool is_native_context = SafeIsNativeContext(obj);
7179
7180   // not visited yet
7181   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7182
7183   Address map_addr = map_p->address();
7184
7185   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7186
7187   // Scan the object body.
7188   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7189     // This is specialized to scan Context's properly.
7190     Object** start = reinterpret_cast<Object**>(obj->address() +
7191                                                 Context::kHeaderSize);
7192     Object** end = reinterpret_cast<Object**>(obj->address() +
7193         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7194     mark_visitor->VisitPointers(start, end);
7195   } else {
7196     obj->IterateBody(map_p->instance_type(),
7197                      obj->SizeFromMap(map_p),
7198                      mark_visitor);
7199   }
7200
7201   // Scan the map after the body because the body is a lot more interesting
7202   // when doing leak detection.
7203   MarkRecursively(&map, mark_visitor);
7204
7205   if (!found_target_in_trace_)  // don't pop if found the target
7206     object_stack_.RemoveLast();
7207 }
7208
7209
7210 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7211   if (!(*p)->IsHeapObject()) return;
7212
7213   HeapObject* obj = HeapObject::cast(*p);
7214
7215   Object* map = obj->map();
7216
7217   if (map->IsHeapObject()) return;  // unmarked already
7218
7219   Address map_addr = reinterpret_cast<Address>(map);
7220
7221   map_addr -= kMarkTag;
7222
7223   ASSERT_TAG_ALIGNED(map_addr);
7224
7225   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7226
7227   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7228
7229   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7230
7231   obj->IterateBody(Map::cast(map_p)->instance_type(),
7232                    obj->SizeFromMap(Map::cast(map_p)),
7233                    unmark_visitor);
7234 }
7235
7236
7237 void PathTracer::ProcessResults() {
7238   if (found_target_) {
7239     PrintF("=====================================\n");
7240     PrintF("====        Path to object       ====\n");
7241     PrintF("=====================================\n\n");
7242
7243     ASSERT(!object_stack_.is_empty());
7244     for (int i = 0; i < object_stack_.length(); i++) {
7245       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7246       Object* obj = object_stack_[i];
7247       obj->Print();
7248     }
7249     PrintF("=====================================\n");
7250   }
7251 }
7252
7253
7254 // Triggers a depth-first traversal of reachable objects from one
7255 // given root object and finds a path to a specific heap object and
7256 // prints it.
7257 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7258   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7259   tracer.VisitPointer(&root);
7260 }
7261
7262
7263 // Triggers a depth-first traversal of reachable objects from roots
7264 // and finds a path to a specific heap object and prints it.
7265 void Heap::TracePathToObject(Object* target) {
7266   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7267   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7268 }
7269
7270
7271 // Triggers a depth-first traversal of reachable objects from roots
7272 // and finds a path to any global object and prints it. Useful for
7273 // determining the source for leaks of global objects.
7274 void Heap::TracePathToGlobal() {
7275   PathTracer tracer(PathTracer::kAnyGlobalObject,
7276                     PathTracer::FIND_ALL,
7277                     VISIT_ALL);
7278   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7279 }
7280 #endif
7281
7282
7283 static intptr_t CountTotalHolesSize(Heap* heap) {
7284   intptr_t holes_size = 0;
7285   OldSpaces spaces(heap);
7286   for (OldSpace* space = spaces.next();
7287        space != NULL;
7288        space = spaces.next()) {
7289     holes_size += space->Waste() + space->Available();
7290   }
7291   return holes_size;
7292 }
7293
7294
7295 GCTracer::GCTracer(Heap* heap,
7296                    const char* gc_reason,
7297                    const char* collector_reason)
7298     : start_time_(0.0),
7299       start_object_size_(0),
7300       start_memory_size_(0),
7301       gc_count_(0),
7302       full_gc_count_(0),
7303       allocated_since_last_gc_(0),
7304       spent_in_mutator_(0),
7305       promoted_objects_size_(0),
7306       nodes_died_in_new_space_(0),
7307       nodes_copied_in_new_space_(0),
7308       nodes_promoted_(0),
7309       heap_(heap),
7310       gc_reason_(gc_reason),
7311       collector_reason_(collector_reason) {
7312   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7313   start_time_ = OS::TimeCurrentMillis();
7314   start_object_size_ = heap_->SizeOfObjects();
7315   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7316
7317   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7318     scopes_[i] = 0;
7319   }
7320
7321   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7322
7323   allocated_since_last_gc_ =
7324       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7325
7326   if (heap_->last_gc_end_timestamp_ > 0) {
7327     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7328   }
7329
7330   steps_count_ = heap_->incremental_marking()->steps_count();
7331   steps_took_ = heap_->incremental_marking()->steps_took();
7332   longest_step_ = heap_->incremental_marking()->longest_step();
7333   steps_count_since_last_gc_ =
7334       heap_->incremental_marking()->steps_count_since_last_gc();
7335   steps_took_since_last_gc_ =
7336       heap_->incremental_marking()->steps_took_since_last_gc();
7337 }
7338
7339
7340 GCTracer::~GCTracer() {
7341   // Printf ONE line iff flag is set.
7342   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7343
7344   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7345
7346   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7347   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7348
7349   double time = heap_->last_gc_end_timestamp_ - start_time_;
7350
7351   // Update cumulative GC statistics if required.
7352   if (FLAG_print_cumulative_gc_stat) {
7353     heap_->total_gc_time_ms_ += time;
7354     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7355     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7356                                      heap_->alive_after_last_gc_);
7357     if (!first_gc) {
7358       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7359                                    spent_in_mutator_);
7360     }
7361   } else if (FLAG_trace_gc_verbose) {
7362     heap_->total_gc_time_ms_ += time;
7363   }
7364
7365   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7366
7367   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7368
7369   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7370   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7371
7372   if (!FLAG_trace_gc_nvp) {
7373     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7374
7375     double end_memory_size_mb =
7376         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7377
7378     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7379            CollectorString(),
7380            static_cast<double>(start_object_size_) / MB,
7381            static_cast<double>(start_memory_size_) / MB,
7382            SizeOfHeapObjects(),
7383            end_memory_size_mb);
7384
7385     if (external_time > 0) PrintF("%d / ", external_time);
7386     PrintF("%.1f ms", time);
7387     if (steps_count_ > 0) {
7388       if (collector_ == SCAVENGER) {
7389         PrintF(" (+ %.1f ms in %d steps since last GC)",
7390                steps_took_since_last_gc_,
7391                steps_count_since_last_gc_);
7392       } else {
7393         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7394                    "biggest step %.1f ms)",
7395                steps_took_,
7396                steps_count_,
7397                longest_step_);
7398       }
7399     }
7400
7401     if (gc_reason_ != NULL) {
7402       PrintF(" [%s]", gc_reason_);
7403     }
7404
7405     if (collector_reason_ != NULL) {
7406       PrintF(" [%s]", collector_reason_);
7407     }
7408
7409     PrintF(".\n");
7410   } else {
7411     PrintF("pause=%.1f ", time);
7412     PrintF("mutator=%.1f ", spent_in_mutator_);
7413     PrintF("gc=");
7414     switch (collector_) {
7415       case SCAVENGER:
7416         PrintF("s");
7417         break;
7418       case MARK_COMPACTOR:
7419         PrintF("ms");
7420         break;
7421       default:
7422         UNREACHABLE();
7423     }
7424     PrintF(" ");
7425
7426     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7427     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7428     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7429     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7430     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7431     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7432     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7433     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7434     PrintF("compaction_ptrs=%.1f ",
7435         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7436     PrintF("intracompaction_ptrs=%.1f ",
7437         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7438     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7439     PrintF("weakcollection_process=%.1f ",
7440         scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7441     PrintF("weakcollection_clear=%.1f ",
7442         scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7443
7444     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7445     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7446     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7447            in_free_list_or_wasted_before_gc_);
7448     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7449
7450     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7451     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7452     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7453     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7454     PrintF("nodes_promoted=%d ", nodes_promoted_);
7455
7456     if (collector_ == SCAVENGER) {
7457       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7458       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7459     } else {
7460       PrintF("stepscount=%d ", steps_count_);
7461       PrintF("stepstook=%.1f ", steps_took_);
7462       PrintF("longeststep=%.1f ", longest_step_);
7463     }
7464
7465     PrintF("\n");
7466   }
7467
7468   heap_->PrintShortHeapStatistics();
7469 }
7470
7471
7472 const char* GCTracer::CollectorString() {
7473   switch (collector_) {
7474     case SCAVENGER:
7475       return "Scavenge";
7476     case MARK_COMPACTOR:
7477       return "Mark-sweep";
7478   }
7479   return "Unknown GC";
7480 }
7481
7482
7483 int KeyedLookupCache::Hash(Map* map, Name* name) {
7484   // Uses only lower 32 bits if pointers are larger.
7485   uintptr_t addr_hash =
7486       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7487   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7488 }
7489
7490
7491 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7492   int index = (Hash(map, name) & kHashMask);
7493   for (int i = 0; i < kEntriesPerBucket; i++) {
7494     Key& key = keys_[index + i];
7495     if ((key.map == map) && key.name->Equals(name)) {
7496       return field_offsets_[index + i];
7497     }
7498   }
7499   return kNotFound;
7500 }
7501
7502
7503 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7504   if (!name->IsUniqueName()) {
7505     String* internalized_string;
7506     if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
7507             String::cast(name), &internalized_string)) {
7508       return;
7509     }
7510     name = internalized_string;
7511   }
7512   // This cache is cleared only between mark compact passes, so we expect the
7513   // cache to only contain old space names.
7514   ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
7515
7516   int index = (Hash(map, name) & kHashMask);
7517   // After a GC there will be free slots, so we use them in order (this may
7518   // help to get the most frequently used one in position 0).
7519   for (int i = 0; i< kEntriesPerBucket; i++) {
7520     Key& key = keys_[index];
7521     Object* free_entry_indicator = NULL;
7522     if (key.map == free_entry_indicator) {
7523       key.map = map;
7524       key.name = name;
7525       field_offsets_[index + i] = field_offset;
7526       return;
7527     }
7528   }
7529   // No free entry found in this bucket, so we move them all down one and
7530   // put the new entry at position zero.
7531   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7532     Key& key = keys_[index + i];
7533     Key& key2 = keys_[index + i - 1];
7534     key = key2;
7535     field_offsets_[index + i] = field_offsets_[index + i - 1];
7536   }
7537
7538   // Write the new first entry.
7539   Key& key = keys_[index];
7540   key.map = map;
7541   key.name = name;
7542   field_offsets_[index] = field_offset;
7543 }
7544
7545
7546 void KeyedLookupCache::Clear() {
7547   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7548 }
7549
7550
7551 void DescriptorLookupCache::Clear() {
7552   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7553 }
7554
7555
7556 #ifdef DEBUG
7557 void Heap::GarbageCollectionGreedyCheck() {
7558   ASSERT(FLAG_gc_greedy);
7559   if (isolate_->bootstrapper()->IsActive()) return;
7560   if (disallow_allocation_failure()) return;
7561   CollectGarbage(NEW_SPACE);
7562 }
7563 #endif
7564
7565
7566 void ExternalStringTable::CleanUp() {
7567   int last = 0;
7568   for (int i = 0; i < new_space_strings_.length(); ++i) {
7569     if (new_space_strings_[i] == heap_->the_hole_value()) {
7570       continue;
7571     }
7572     ASSERT(new_space_strings_[i]->IsExternalString());
7573     if (heap_->InNewSpace(new_space_strings_[i])) {
7574       new_space_strings_[last++] = new_space_strings_[i];
7575     } else {
7576       old_space_strings_.Add(new_space_strings_[i]);
7577     }
7578   }
7579   new_space_strings_.Rewind(last);
7580   new_space_strings_.Trim();
7581
7582   last = 0;
7583   for (int i = 0; i < old_space_strings_.length(); ++i) {
7584     if (old_space_strings_[i] == heap_->the_hole_value()) {
7585       continue;
7586     }
7587     ASSERT(old_space_strings_[i]->IsExternalString());
7588     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7589     old_space_strings_[last++] = old_space_strings_[i];
7590   }
7591   old_space_strings_.Rewind(last);
7592   old_space_strings_.Trim();
7593 #ifdef VERIFY_HEAP
7594   if (FLAG_verify_heap) {
7595     Verify();
7596   }
7597 #endif
7598 }
7599
7600
7601 void ExternalStringTable::TearDown() {
7602   for (int i = 0; i < new_space_strings_.length(); ++i) {
7603     heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
7604   }
7605   new_space_strings_.Free();
7606   for (int i = 0; i < old_space_strings_.length(); ++i) {
7607     heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
7608   }
7609   old_space_strings_.Free();
7610 }
7611
7612
7613 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7614   chunk->set_next_chunk(chunks_queued_for_free_);
7615   chunks_queued_for_free_ = chunk;
7616 }
7617
7618
7619 void Heap::FreeQueuedChunks() {
7620   if (chunks_queued_for_free_ == NULL) return;
7621   MemoryChunk* next;
7622   MemoryChunk* chunk;
7623   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7624     next = chunk->next_chunk();
7625     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7626
7627     if (chunk->owner()->identity() == LO_SPACE) {
7628       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7629       // If FromAnyPointerAddress encounters a slot that belongs to a large
7630       // chunk queued for deletion it will fail to find the chunk because
7631       // it try to perform a search in the list of pages owned by of the large
7632       // object space and queued chunks were detached from that list.
7633       // To work around this we split large chunk into normal kPageSize aligned
7634       // pieces and initialize size, owner and flags field of every piece.
7635       // If FromAnyPointerAddress encounters a slot that belongs to one of
7636       // these smaller pieces it will treat it as a slot on a normal Page.
7637       Address chunk_end = chunk->address() + chunk->size();
7638       MemoryChunk* inner = MemoryChunk::FromAddress(
7639           chunk->address() + Page::kPageSize);
7640       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7641       while (inner <= inner_last) {
7642         // Size of a large chunk is always a multiple of
7643         // OS::AllocateAlignment() so there is always
7644         // enough space for a fake MemoryChunk header.
7645         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7646         // Guard against overflow.
7647         if (area_end < inner->address()) area_end = chunk_end;
7648         inner->SetArea(inner->address(), area_end);
7649         inner->set_size(Page::kPageSize);
7650         inner->set_owner(lo_space());
7651         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7652         inner = MemoryChunk::FromAddress(
7653             inner->address() + Page::kPageSize);
7654       }
7655     }
7656   }
7657   isolate_->heap()->store_buffer()->Compact();
7658   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7659   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7660     next = chunk->next_chunk();
7661     isolate_->memory_allocator()->Free(chunk);
7662   }
7663   chunks_queued_for_free_ = NULL;
7664 }
7665
7666
7667 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7668   uintptr_t p = reinterpret_cast<uintptr_t>(page);
7669   // Tag the page pointer to make it findable in the dump file.
7670   if (compacted) {
7671     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7672   } else {
7673     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7674   }
7675   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7676       reinterpret_cast<Address>(p);
7677   remembered_unmapped_pages_index_++;
7678   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7679 }
7680
7681
7682 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7683   memset(object_counts_, 0, sizeof(object_counts_));
7684   memset(object_sizes_, 0, sizeof(object_sizes_));
7685   if (clear_last_time_stats) {
7686     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7687     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7688   }
7689 }
7690
7691
7692 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7693
7694
7695 void Heap::CheckpointObjectStats() {
7696   LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
7697   Counters* counters = isolate()->counters();
7698 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
7699   counters->count_of_##name()->Increment(                                      \
7700       static_cast<int>(object_counts_[name]));                                 \
7701   counters->count_of_##name()->Decrement(                                      \
7702       static_cast<int>(object_counts_last_time_[name]));                       \
7703   counters->size_of_##name()->Increment(                                       \
7704       static_cast<int>(object_sizes_[name]));                                  \
7705   counters->size_of_##name()->Decrement(                                       \
7706       static_cast<int>(object_sizes_last_time_[name]));
7707   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7708 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7709   int index;
7710 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7711   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
7712   counters->count_of_CODE_TYPE_##name()->Increment(       \
7713       static_cast<int>(object_counts_[index]));           \
7714   counters->count_of_CODE_TYPE_##name()->Decrement(       \
7715       static_cast<int>(object_counts_last_time_[index])); \
7716   counters->size_of_CODE_TYPE_##name()->Increment(        \
7717       static_cast<int>(object_sizes_[index]));            \
7718   counters->size_of_CODE_TYPE_##name()->Decrement(        \
7719       static_cast<int>(object_sizes_last_time_[index]));
7720   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7721 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7722 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7723   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
7724   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
7725       static_cast<int>(object_counts_[index]));           \
7726   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
7727       static_cast<int>(object_counts_last_time_[index])); \
7728   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
7729       static_cast<int>(object_sizes_[index]));            \
7730   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
7731       static_cast<int>(object_sizes_last_time_[index]));
7732   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7733 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7734 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
7735   index =                                                                     \
7736       FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
7737   counters->count_of_CODE_AGE_##name()->Increment(                            \
7738       static_cast<int>(object_counts_[index]));                               \
7739   counters->count_of_CODE_AGE_##name()->Decrement(                            \
7740       static_cast<int>(object_counts_last_time_[index]));                     \
7741   counters->size_of_CODE_AGE_##name()->Increment(                             \
7742       static_cast<int>(object_sizes_[index]));                                \
7743   counters->size_of_CODE_AGE_##name()->Decrement(                             \
7744       static_cast<int>(object_sizes_last_time_[index]));
7745   CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
7746 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7747
7748   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7749   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7750   ClearObjectStats();
7751 }
7752
7753 } }  // namespace v8::internal