Upstream version 5.34.92.0
[platform/framework/web/crosswalk.git] / src / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
36 #include "debug.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "isolate-inl.h"
42 #include "mark-compact.h"
43 #include "natives.h"
44 #include "objects-visiting.h"
45 #include "objects-visiting-inl.h"
46 #include "once.h"
47 #include "runtime-profiler.h"
48 #include "scopeinfo.h"
49 #include "snapshot.h"
50 #include "store-buffer.h"
51 #include "utils/random-number-generator.h"
52 #include "v8conversions.h"
53 #include "v8threads.h"
54 #include "v8utils.h"
55 #include "vm-state-inl.h"
56 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
57 #include "regexp-macro-assembler.h"
58 #include "arm/regexp-macro-assembler-arm.h"
59 #endif
60 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
61 #include "regexp-macro-assembler.h"
62 #include "mips/regexp-macro-assembler-mips.h"
63 #endif
64
65 namespace v8 {
66 namespace internal {
67
68
69 Heap::Heap()
70     : isolate_(NULL),
71       code_range_size_(kIs64BitArch ? 512 * MB : 0),
72 // semispace_size_ should be a power of 2 and old_generation_size_ should be
73 // a multiple of Page::kPageSize.
74       reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
75       max_semispace_size_(8 * (kPointerSize / 4)  * MB),
76       initial_semispace_size_(Page::kPageSize),
77       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
78       max_executable_size_(256ul * (kPointerSize / 4) * MB),
79 // Variables set based on semispace_size_ and old_generation_size_ in
80 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
81 // Will be 4 * reserved_semispace_size_ to ensure that young
82 // generation can be aligned to its size.
83       maximum_committed_(0),
84       survived_since_last_expansion_(0),
85       sweep_generation_(0),
86       always_allocate_scope_depth_(0),
87       linear_allocation_scope_depth_(0),
88       contexts_disposed_(0),
89       global_ic_age_(0),
90       flush_monomorphic_ics_(false),
91       scan_on_scavenge_pages_(0),
92       new_space_(this),
93       old_pointer_space_(NULL),
94       old_data_space_(NULL),
95       code_space_(NULL),
96       map_space_(NULL),
97       cell_space_(NULL),
98       property_cell_space_(NULL),
99       lo_space_(NULL),
100       gc_state_(NOT_IN_GC),
101       gc_post_processing_depth_(0),
102       ms_count_(0),
103       gc_count_(0),
104       remembered_unmapped_pages_index_(0),
105       unflattened_strings_length_(0),
106 #ifdef DEBUG
107       allocation_timeout_(0),
108       disallow_allocation_failure_(false),
109 #endif  // DEBUG
110       new_space_high_promotion_mode_active_(false),
111       old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
112       size_of_old_gen_at_last_old_space_gc_(0),
113       external_allocation_limit_(0),
114       amount_of_external_allocated_memory_(0),
115       amount_of_external_allocated_memory_at_last_global_gc_(0),
116       old_gen_exhausted_(false),
117       inline_allocation_disabled_(false),
118       store_buffer_rebuilder_(store_buffer()),
119       hidden_string_(NULL),
120       gc_safe_size_of_old_object_(NULL),
121       total_regexp_code_generated_(0),
122       tracer_(NULL),
123       young_survivors_after_last_gc_(0),
124       high_survival_rate_period_length_(0),
125       low_survival_rate_period_length_(0),
126       survival_rate_(0),
127       previous_survival_rate_trend_(Heap::STABLE),
128       survival_rate_trend_(Heap::STABLE),
129       max_gc_pause_(0.0),
130       total_gc_time_ms_(0.0),
131       max_alive_after_gc_(0),
132       min_in_mutator_(kMaxInt),
133       alive_after_last_gc_(0),
134       last_gc_end_timestamp_(0.0),
135       marking_time_(0.0),
136       sweeping_time_(0.0),
137       mark_compact_collector_(this),
138       store_buffer_(this),
139       marking_(this),
140       incremental_marking_(this),
141       number_idle_notifications_(0),
142       last_idle_notification_gc_count_(0),
143       last_idle_notification_gc_count_init_(false),
144       mark_sweeps_since_idle_round_started_(0),
145       gc_count_at_last_idle_gc_(0),
146       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
147       full_codegen_bytes_generated_(0),
148       crankshaft_codegen_bytes_generated_(0),
149       gcs_since_last_deopt_(0),
150 #ifdef VERIFY_HEAP
151       no_weak_object_verification_scope_depth_(0),
152 #endif
153       allocation_sites_scratchpad_length(0),
154       promotion_queue_(this),
155       configured_(false),
156       external_string_table_(this),
157       chunks_queued_for_free_(NULL),
158       relocation_mutex_(NULL) {
159   // Allow build-time customization of the max semispace size. Building
160   // V8 with snapshots and a non-default max semispace size is much
161   // easier if you can define it as part of the build environment.
162 #if defined(V8_MAX_SEMISPACE_SIZE)
163   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
164 #endif
165
166   // Ensure old_generation_size_ is a multiple of kPageSize.
167   ASSERT(MB >= Page::kPageSize);
168
169   intptr_t max_virtual = OS::MaxVirtualMemory();
170
171   if (max_virtual > 0) {
172     if (code_range_size_ > 0) {
173       // Reserve no more than 1/8 of the memory for the code range.
174       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
175     }
176   }
177
178   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
179   native_contexts_list_ = NULL;
180   array_buffers_list_ = Smi::FromInt(0);
181   allocation_sites_list_ = Smi::FromInt(0);
182   // Put a dummy entry in the remembered pages so we can find the list the
183   // minidump even if there are no real unmapped pages.
184   RememberUnmappedPage(NULL, false);
185
186   ClearObjectStats(true);
187 }
188
189
190 intptr_t Heap::Capacity() {
191   if (!HasBeenSetUp()) return 0;
192
193   return new_space_.Capacity() +
194       old_pointer_space_->Capacity() +
195       old_data_space_->Capacity() +
196       code_space_->Capacity() +
197       map_space_->Capacity() +
198       cell_space_->Capacity() +
199       property_cell_space_->Capacity();
200 }
201
202
203 intptr_t Heap::CommittedMemory() {
204   if (!HasBeenSetUp()) return 0;
205
206   return new_space_.CommittedMemory() +
207       old_pointer_space_->CommittedMemory() +
208       old_data_space_->CommittedMemory() +
209       code_space_->CommittedMemory() +
210       map_space_->CommittedMemory() +
211       cell_space_->CommittedMemory() +
212       property_cell_space_->CommittedMemory() +
213       lo_space_->Size();
214 }
215
216
217 size_t Heap::CommittedPhysicalMemory() {
218   if (!HasBeenSetUp()) return 0;
219
220   return new_space_.CommittedPhysicalMemory() +
221       old_pointer_space_->CommittedPhysicalMemory() +
222       old_data_space_->CommittedPhysicalMemory() +
223       code_space_->CommittedPhysicalMemory() +
224       map_space_->CommittedPhysicalMemory() +
225       cell_space_->CommittedPhysicalMemory() +
226       property_cell_space_->CommittedPhysicalMemory() +
227       lo_space_->CommittedPhysicalMemory();
228 }
229
230
231 intptr_t Heap::CommittedMemoryExecutable() {
232   if (!HasBeenSetUp()) return 0;
233
234   return isolate()->memory_allocator()->SizeExecutable();
235 }
236
237
238 void Heap::UpdateMaximumCommitted() {
239   if (!HasBeenSetUp()) return;
240
241   intptr_t current_committed_memory = CommittedMemory();
242   if (current_committed_memory > maximum_committed_) {
243     maximum_committed_ = current_committed_memory;
244   }
245 }
246
247
248 intptr_t Heap::Available() {
249   if (!HasBeenSetUp()) return 0;
250
251   return new_space_.Available() +
252       old_pointer_space_->Available() +
253       old_data_space_->Available() +
254       code_space_->Available() +
255       map_space_->Available() +
256       cell_space_->Available() +
257       property_cell_space_->Available();
258 }
259
260
261 bool Heap::HasBeenSetUp() {
262   return old_pointer_space_ != NULL &&
263          old_data_space_ != NULL &&
264          code_space_ != NULL &&
265          map_space_ != NULL &&
266          cell_space_ != NULL &&
267          property_cell_space_ != NULL &&
268          lo_space_ != NULL;
269 }
270
271
272 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
273   if (IntrusiveMarking::IsMarked(object)) {
274     return IntrusiveMarking::SizeOfMarkedObject(object);
275   }
276   return object->SizeFromMap(object->map());
277 }
278
279
280 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
281                                               const char** reason) {
282   // Is global GC requested?
283   if (space != NEW_SPACE) {
284     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
285     *reason = "GC in old space requested";
286     return MARK_COMPACTOR;
287   }
288
289   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
290     *reason = "GC in old space forced by flags";
291     return MARK_COMPACTOR;
292   }
293
294   // Is enough data promoted to justify a global GC?
295   if (OldGenerationAllocationLimitReached()) {
296     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
297     *reason = "promotion limit reached";
298     return MARK_COMPACTOR;
299   }
300
301   // Have allocation in OLD and LO failed?
302   if (old_gen_exhausted_) {
303     isolate_->counters()->
304         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
305     *reason = "old generations exhausted";
306     return MARK_COMPACTOR;
307   }
308
309   // Is there enough space left in OLD to guarantee that a scavenge can
310   // succeed?
311   //
312   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
313   // for object promotion. It counts only the bytes that the memory
314   // allocator has not yet allocated from the OS and assigned to any space,
315   // and does not count available bytes already in the old space or code
316   // space.  Undercounting is safe---we may get an unrequested full GC when
317   // a scavenge would have succeeded.
318   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
319     isolate_->counters()->
320         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
321     *reason = "scavenge might not succeed";
322     return MARK_COMPACTOR;
323   }
324
325   // Default
326   *reason = NULL;
327   return SCAVENGER;
328 }
329
330
331 // TODO(1238405): Combine the infrastructure for --heap-stats and
332 // --log-gc to avoid the complicated preprocessor and flag testing.
333 void Heap::ReportStatisticsBeforeGC() {
334   // Heap::ReportHeapStatistics will also log NewSpace statistics when
335   // compiled --log-gc is set.  The following logic is used to avoid
336   // double logging.
337 #ifdef DEBUG
338   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
339   if (FLAG_heap_stats) {
340     ReportHeapStatistics("Before GC");
341   } else if (FLAG_log_gc) {
342     new_space_.ReportStatistics();
343   }
344   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
345 #else
346   if (FLAG_log_gc) {
347     new_space_.CollectStatistics();
348     new_space_.ReportStatistics();
349     new_space_.ClearHistograms();
350   }
351 #endif  // DEBUG
352 }
353
354
355 void Heap::PrintShortHeapStatistics() {
356   if (!FLAG_trace_gc_verbose) return;
357   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
358                ", available: %6" V8_PTR_PREFIX "d KB\n",
359            isolate_->memory_allocator()->Size() / KB,
360            isolate_->memory_allocator()->Available() / KB);
361   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
362                ", available: %6" V8_PTR_PREFIX "d KB"
363                ", committed: %6" V8_PTR_PREFIX "d KB\n",
364            new_space_.Size() / KB,
365            new_space_.Available() / KB,
366            new_space_.CommittedMemory() / KB);
367   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
368                ", available: %6" V8_PTR_PREFIX "d KB"
369                ", committed: %6" V8_PTR_PREFIX "d KB\n",
370            old_pointer_space_->SizeOfObjects() / KB,
371            old_pointer_space_->Available() / KB,
372            old_pointer_space_->CommittedMemory() / KB);
373   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
374                ", available: %6" V8_PTR_PREFIX "d KB"
375                ", committed: %6" V8_PTR_PREFIX "d KB\n",
376            old_data_space_->SizeOfObjects() / KB,
377            old_data_space_->Available() / KB,
378            old_data_space_->CommittedMemory() / KB);
379   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
380                ", available: %6" V8_PTR_PREFIX "d KB"
381                ", committed: %6" V8_PTR_PREFIX "d KB\n",
382            code_space_->SizeOfObjects() / KB,
383            code_space_->Available() / KB,
384            code_space_->CommittedMemory() / KB);
385   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
386                ", available: %6" V8_PTR_PREFIX "d KB"
387                ", committed: %6" V8_PTR_PREFIX "d KB\n",
388            map_space_->SizeOfObjects() / KB,
389            map_space_->Available() / KB,
390            map_space_->CommittedMemory() / KB);
391   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
392                ", available: %6" V8_PTR_PREFIX "d KB"
393                ", committed: %6" V8_PTR_PREFIX "d KB\n",
394            cell_space_->SizeOfObjects() / KB,
395            cell_space_->Available() / KB,
396            cell_space_->CommittedMemory() / KB);
397   PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
398                ", available: %6" V8_PTR_PREFIX "d KB"
399                ", committed: %6" V8_PTR_PREFIX "d KB\n",
400            property_cell_space_->SizeOfObjects() / KB,
401            property_cell_space_->Available() / KB,
402            property_cell_space_->CommittedMemory() / KB);
403   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
404                ", available: %6" V8_PTR_PREFIX "d KB"
405                ", committed: %6" V8_PTR_PREFIX "d KB\n",
406            lo_space_->SizeOfObjects() / KB,
407            lo_space_->Available() / KB,
408            lo_space_->CommittedMemory() / KB);
409   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
410                ", available: %6" V8_PTR_PREFIX "d KB"
411                ", committed: %6" V8_PTR_PREFIX "d KB\n",
412            this->SizeOfObjects() / KB,
413            this->Available() / KB,
414            this->CommittedMemory() / KB);
415   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
416            static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
417   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
418 }
419
420
421 // TODO(1238405): Combine the infrastructure for --heap-stats and
422 // --log-gc to avoid the complicated preprocessor and flag testing.
423 void Heap::ReportStatisticsAfterGC() {
424   // Similar to the before GC, we use some complicated logic to ensure that
425   // NewSpace statistics are logged exactly once when --log-gc is turned on.
426 #if defined(DEBUG)
427   if (FLAG_heap_stats) {
428     new_space_.CollectStatistics();
429     ReportHeapStatistics("After GC");
430   } else if (FLAG_log_gc) {
431     new_space_.ReportStatistics();
432   }
433 #else
434   if (FLAG_log_gc) new_space_.ReportStatistics();
435 #endif  // DEBUG
436 }
437
438
439 void Heap::GarbageCollectionPrologue() {
440   {  AllowHeapAllocation for_the_first_part_of_prologue;
441     ClearJSFunctionResultCaches();
442     gc_count_++;
443     unflattened_strings_length_ = 0;
444
445     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
446       mark_compact_collector()->EnableCodeFlushing(true);
447     }
448
449 #ifdef VERIFY_HEAP
450     if (FLAG_verify_heap) {
451       Verify();
452     }
453 #endif
454   }
455
456   UpdateMaximumCommitted();
457
458 #ifdef DEBUG
459   ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
460
461   if (FLAG_gc_verbose) Print();
462
463   ReportStatisticsBeforeGC();
464 #endif  // DEBUG
465
466   store_buffer()->GCPrologue();
467
468   if (isolate()->concurrent_osr_enabled()) {
469     isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
470   }
471 }
472
473
474 intptr_t Heap::SizeOfObjects() {
475   intptr_t total = 0;
476   AllSpaces spaces(this);
477   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
478     total += space->SizeOfObjects();
479   }
480   return total;
481 }
482
483
484 void Heap::ClearAllICsByKind(Code::Kind kind) {
485   HeapObjectIterator it(code_space());
486
487   for (Object* object = it.Next(); object != NULL; object = it.Next()) {
488     Code* code = Code::cast(object);
489     Code::Kind current_kind = code->kind();
490     if (current_kind == Code::FUNCTION ||
491         current_kind == Code::OPTIMIZED_FUNCTION) {
492       code->ClearInlineCaches(kind);
493     }
494   }
495 }
496
497
498 void Heap::RepairFreeListsAfterBoot() {
499   PagedSpaces spaces(this);
500   for (PagedSpace* space = spaces.next();
501        space != NULL;
502        space = spaces.next()) {
503     space->RepairFreeListsAfterBoot();
504   }
505 }
506
507
508 void Heap::ProcessPretenuringFeedback() {
509   if (FLAG_allocation_site_pretenuring &&
510       new_space_high_promotion_mode_active_) {
511     int tenure_decisions = 0;
512     int dont_tenure_decisions = 0;
513     int allocation_mementos_found = 0;
514     int allocation_sites = 0;
515     int active_allocation_sites = 0;
516
517     // If the scratchpad overflowed, we have to iterate over the allocation
518     // sites list.
519     bool use_scratchpad =
520         allocation_sites_scratchpad_length < kAllocationSiteScratchpadSize;
521
522     int i = 0;
523     Object* list_element = allocation_sites_list();
524     bool trigger_deoptimization = false;
525     while (use_scratchpad ?
526               i < allocation_sites_scratchpad_length :
527               list_element->IsAllocationSite()) {
528       AllocationSite* site = use_scratchpad ?
529         allocation_sites_scratchpad[i] : AllocationSite::cast(list_element);
530       allocation_mementos_found += site->memento_found_count();
531       if (site->memento_found_count() > 0) {
532         active_allocation_sites++;
533       }
534       if (site->DigestPretenuringFeedback()) trigger_deoptimization = true;
535       if (site->GetPretenureMode() == TENURED) {
536         tenure_decisions++;
537       } else {
538         dont_tenure_decisions++;
539       }
540       allocation_sites++;
541       if (use_scratchpad) {
542         i++;
543       } else {
544         list_element = site->weak_next();
545       }
546     }
547
548     if (trigger_deoptimization) isolate_->stack_guard()->DeoptMarkedCode();
549
550     allocation_sites_scratchpad_length = 0;
551
552     // TODO(mvstanton): Pretenure decisions are only made once for an allocation
553     // site. Find a sane way to decide about revisiting the decision later.
554
555     if (FLAG_trace_track_allocation_sites &&
556         (allocation_mementos_found > 0 ||
557          tenure_decisions > 0 ||
558          dont_tenure_decisions > 0)) {
559       PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
560              "#mementos, #tenure decisions, #donttenure decisions) "
561              "(%s, %d, %d, %d, %d, %d)\n",
562              use_scratchpad ? "use scratchpad" : "use list",
563              allocation_sites,
564              active_allocation_sites,
565              allocation_mementos_found,
566              tenure_decisions,
567              dont_tenure_decisions);
568     }
569   }
570 }
571
572
573 void Heap::GarbageCollectionEpilogue() {
574   store_buffer()->GCEpilogue();
575
576   // In release mode, we only zap the from space under heap verification.
577   if (Heap::ShouldZapGarbage()) {
578     ZapFromSpace();
579   }
580
581 #ifdef VERIFY_HEAP
582   if (FLAG_verify_heap) {
583     Verify();
584   }
585 #endif
586
587   AllowHeapAllocation for_the_rest_of_the_epilogue;
588
589 #ifdef DEBUG
590   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
591   if (FLAG_print_handles) PrintHandles();
592   if (FLAG_gc_verbose) Print();
593   if (FLAG_code_stats) ReportCodeStatistics("After GC");
594 #endif
595   if (FLAG_deopt_every_n_garbage_collections > 0) {
596     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
597       Deoptimizer::DeoptimizeAll(isolate());
598       gcs_since_last_deopt_ = 0;
599     }
600   }
601
602   UpdateMaximumCommitted();
603
604   isolate_->counters()->alive_after_last_gc()->Set(
605       static_cast<int>(SizeOfObjects()));
606
607   isolate_->counters()->string_table_capacity()->Set(
608       string_table()->Capacity());
609   isolate_->counters()->number_of_symbols()->Set(
610       string_table()->NumberOfElements());
611
612   if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
613     isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
614         static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
615             (crankshaft_codegen_bytes_generated_
616             + full_codegen_bytes_generated_)));
617   }
618
619   if (CommittedMemory() > 0) {
620     isolate_->counters()->external_fragmentation_total()->AddSample(
621         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
622
623     isolate_->counters()->heap_fraction_new_space()->
624         AddSample(static_cast<int>(
625             (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
626     isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
627         static_cast<int>(
628             (old_pointer_space()->CommittedMemory() * 100.0) /
629             CommittedMemory()));
630     isolate_->counters()->heap_fraction_old_data_space()->AddSample(
631         static_cast<int>(
632             (old_data_space()->CommittedMemory() * 100.0) /
633             CommittedMemory()));
634     isolate_->counters()->heap_fraction_code_space()->
635         AddSample(static_cast<int>(
636             (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
637     isolate_->counters()->heap_fraction_map_space()->AddSample(
638         static_cast<int>(
639             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
640     isolate_->counters()->heap_fraction_cell_space()->AddSample(
641         static_cast<int>(
642             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
643     isolate_->counters()->heap_fraction_property_cell_space()->
644         AddSample(static_cast<int>(
645             (property_cell_space()->CommittedMemory() * 100.0) /
646             CommittedMemory()));
647     isolate_->counters()->heap_fraction_lo_space()->
648         AddSample(static_cast<int>(
649             (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
650
651     isolate_->counters()->heap_sample_total_committed()->AddSample(
652         static_cast<int>(CommittedMemory() / KB));
653     isolate_->counters()->heap_sample_total_used()->AddSample(
654         static_cast<int>(SizeOfObjects() / KB));
655     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
656         static_cast<int>(map_space()->CommittedMemory() / KB));
657     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
658         static_cast<int>(cell_space()->CommittedMemory() / KB));
659     isolate_->counters()->
660         heap_sample_property_cell_space_committed()->
661             AddSample(static_cast<int>(
662                 property_cell_space()->CommittedMemory() / KB));
663     isolate_->counters()->heap_sample_code_space_committed()->AddSample(
664         static_cast<int>(code_space()->CommittedMemory() / KB));
665
666     isolate_->counters()->heap_sample_maximum_committed()->AddSample(
667         static_cast<int>(MaximumCommittedMemory() / KB));
668   }
669
670 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
671   isolate_->counters()->space##_bytes_available()->Set(                        \
672       static_cast<int>(space()->Available()));                                 \
673   isolate_->counters()->space##_bytes_committed()->Set(                        \
674       static_cast<int>(space()->CommittedMemory()));                           \
675   isolate_->counters()->space##_bytes_used()->Set(                             \
676       static_cast<int>(space()->SizeOfObjects()));
677 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
678   if (space()->CommittedMemory() > 0) {                                        \
679     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
680         static_cast<int>(100 -                                                 \
681             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
682   }
683 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
684   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
685   UPDATE_FRAGMENTATION_FOR_SPACE(space)
686
687   UPDATE_COUNTERS_FOR_SPACE(new_space)
688   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
689   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
690   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
691   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
692   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
693   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
694   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
695 #undef UPDATE_COUNTERS_FOR_SPACE
696 #undef UPDATE_FRAGMENTATION_FOR_SPACE
697 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
698
699 #if defined(DEBUG)
700   ReportStatisticsAfterGC();
701 #endif  // DEBUG
702 #ifdef ENABLE_DEBUGGER_SUPPORT
703   isolate_->debug()->AfterGarbageCollection();
704 #endif  // ENABLE_DEBUGGER_SUPPORT
705 }
706
707
708 void Heap::CollectAllGarbage(int flags,
709                              const char* gc_reason,
710                              const v8::GCCallbackFlags gc_callback_flags) {
711   // Since we are ignoring the return value, the exact choice of space does
712   // not matter, so long as we do not specify NEW_SPACE, which would not
713   // cause a full GC.
714   mark_compact_collector_.SetFlags(flags);
715   CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
716   mark_compact_collector_.SetFlags(kNoGCFlags);
717 }
718
719
720 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
721   // Since we are ignoring the return value, the exact choice of space does
722   // not matter, so long as we do not specify NEW_SPACE, which would not
723   // cause a full GC.
724   // Major GC would invoke weak handle callbacks on weakly reachable
725   // handles, but won't collect weakly reachable objects until next
726   // major GC.  Therefore if we collect aggressively and weak handle callback
727   // has been invoked, we rerun major GC to release objects which become
728   // garbage.
729   // Note: as weak callbacks can execute arbitrary code, we cannot
730   // hope that eventually there will be no weak callbacks invocations.
731   // Therefore stop recollecting after several attempts.
732   if (isolate()->concurrent_recompilation_enabled()) {
733     // The optimizing compiler may be unnecessarily holding on to memory.
734     DisallowHeapAllocation no_recursive_gc;
735     isolate()->optimizing_compiler_thread()->Flush();
736   }
737   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
738                                      kReduceMemoryFootprintMask);
739   isolate_->compilation_cache()->Clear();
740   const int kMaxNumberOfAttempts = 7;
741   const int kMinNumberOfAttempts = 2;
742   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
743     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
744         attempt + 1 >= kMinNumberOfAttempts) {
745       break;
746     }
747   }
748   mark_compact_collector()->SetFlags(kNoGCFlags);
749   new_space_.Shrink();
750   UncommitFromSpace();
751   incremental_marking()->UncommitMarkingDeque();
752 }
753
754
755 bool Heap::CollectGarbage(AllocationSpace space,
756                           GarbageCollector collector,
757                           const char* gc_reason,
758                           const char* collector_reason,
759                           const v8::GCCallbackFlags gc_callback_flags) {
760   // The VM is in the GC state until exiting this function.
761   VMState<GC> state(isolate_);
762
763 #ifdef DEBUG
764   // Reset the allocation timeout to the GC interval, but make sure to
765   // allow at least a few allocations after a collection. The reason
766   // for this is that we have a lot of allocation sequences and we
767   // assume that a garbage collection will allow the subsequent
768   // allocation attempts to go through.
769   allocation_timeout_ = Max(6, FLAG_gc_interval);
770 #endif
771
772   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
773     if (FLAG_trace_incremental_marking) {
774       PrintF("[IncrementalMarking] Scavenge during marking.\n");
775     }
776   }
777
778   if (collector == MARK_COMPACTOR &&
779       !mark_compact_collector()->abort_incremental_marking() &&
780       !incremental_marking()->IsStopped() &&
781       !incremental_marking()->should_hurry() &&
782       FLAG_incremental_marking_steps) {
783     // Make progress in incremental marking.
784     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
785     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
786                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
787     if (!incremental_marking()->IsComplete()) {
788       if (FLAG_trace_incremental_marking) {
789         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
790       }
791       collector = SCAVENGER;
792       collector_reason = "incremental marking delaying mark-sweep";
793     }
794   }
795
796   bool next_gc_likely_to_collect_more = false;
797
798   { GCTracer tracer(this, gc_reason, collector_reason);
799     ASSERT(AllowHeapAllocation::IsAllowed());
800     DisallowHeapAllocation no_allocation_during_gc;
801     GarbageCollectionPrologue();
802     // The GC count was incremented in the prologue.  Tell the tracer about
803     // it.
804     tracer.set_gc_count(gc_count_);
805
806     // Tell the tracer which collector we've selected.
807     tracer.set_collector(collector);
808
809     {
810       HistogramTimerScope histogram_timer_scope(
811           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
812                                    : isolate_->counters()->gc_compactor());
813       next_gc_likely_to_collect_more =
814           PerformGarbageCollection(collector, &tracer, gc_callback_flags);
815     }
816
817     GarbageCollectionEpilogue();
818   }
819
820   // Start incremental marking for the next cycle. The heap snapshot
821   // generator needs incremental marking to stay off after it aborted.
822   if (!mark_compact_collector()->abort_incremental_marking() &&
823       incremental_marking()->IsStopped() &&
824       incremental_marking()->WorthActivating() &&
825       NextGCIsLikelyToBeFull()) {
826     incremental_marking()->Start();
827   }
828
829   return next_gc_likely_to_collect_more;
830 }
831
832
833 int Heap::NotifyContextDisposed() {
834   if (isolate()->concurrent_recompilation_enabled()) {
835     // Flush the queued recompilation tasks.
836     isolate()->optimizing_compiler_thread()->Flush();
837   }
838   flush_monomorphic_ics_ = true;
839   AgeInlineCaches();
840   return ++contexts_disposed_;
841 }
842
843
844 void Heap::PerformScavenge() {
845   GCTracer tracer(this, NULL, NULL);
846   if (incremental_marking()->IsStopped()) {
847     PerformGarbageCollection(SCAVENGER, &tracer);
848   } else {
849     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
850   }
851 }
852
853
854 void Heap::MoveElements(FixedArray* array,
855                         int dst_index,
856                         int src_index,
857                         int len) {
858   if (len == 0) return;
859
860   ASSERT(array->map() != fixed_cow_array_map());
861   Object** dst_objects = array->data_start() + dst_index;
862   OS::MemMove(dst_objects,
863               array->data_start() + src_index,
864               len * kPointerSize);
865   if (!InNewSpace(array)) {
866     for (int i = 0; i < len; i++) {
867       // TODO(hpayer): check store buffer for entries
868       if (InNewSpace(dst_objects[i])) {
869         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
870       }
871     }
872   }
873   incremental_marking()->RecordWrites(array);
874 }
875
876
877 #ifdef VERIFY_HEAP
878 // Helper class for verifying the string table.
879 class StringTableVerifier : public ObjectVisitor {
880  public:
881   void VisitPointers(Object** start, Object** end) {
882     // Visit all HeapObject pointers in [start, end).
883     for (Object** p = start; p < end; p++) {
884       if ((*p)->IsHeapObject()) {
885         // Check that the string is actually internalized.
886         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
887               (*p)->IsInternalizedString());
888       }
889     }
890   }
891 };
892
893
894 static void VerifyStringTable(Heap* heap) {
895   StringTableVerifier verifier;
896   heap->string_table()->IterateElements(&verifier);
897 }
898 #endif  // VERIFY_HEAP
899
900
901 static bool AbortIncrementalMarkingAndCollectGarbage(
902     Heap* heap,
903     AllocationSpace space,
904     const char* gc_reason = NULL) {
905   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
906   bool result = heap->CollectGarbage(space, gc_reason);
907   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
908   return result;
909 }
910
911
912 void Heap::ReserveSpace(int *sizes, Address *locations_out) {
913   bool gc_performed = true;
914   int counter = 0;
915   static const int kThreshold = 20;
916   while (gc_performed && counter++ < kThreshold) {
917     gc_performed = false;
918     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
919     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
920       if (sizes[space] != 0) {
921         MaybeObject* allocation;
922         if (space == NEW_SPACE) {
923           allocation = new_space()->AllocateRaw(sizes[space]);
924         } else {
925           allocation = paged_space(space)->AllocateRaw(sizes[space]);
926         }
927         FreeListNode* node;
928         if (!allocation->To<FreeListNode>(&node)) {
929           if (space == NEW_SPACE) {
930             Heap::CollectGarbage(NEW_SPACE,
931                                  "failed to reserve space in the new space");
932           } else {
933             AbortIncrementalMarkingAndCollectGarbage(
934                 this,
935                 static_cast<AllocationSpace>(space),
936                 "failed to reserve space in paged space");
937           }
938           gc_performed = true;
939           break;
940         } else {
941           // Mark with a free list node, in case we have a GC before
942           // deserializing.
943           node->set_size(this, sizes[space]);
944           locations_out[space] = node->address();
945         }
946       }
947     }
948   }
949
950   if (gc_performed) {
951     // Failed to reserve the space after several attempts.
952     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
953   }
954 }
955
956
957 void Heap::EnsureFromSpaceIsCommitted() {
958   if (new_space_.CommitFromSpaceIfNeeded()) return;
959
960   // Committing memory to from space failed.
961   // Memory is exhausted and we will die.
962   V8::FatalProcessOutOfMemory("Committing semi space failed.");
963 }
964
965
966 void Heap::ClearJSFunctionResultCaches() {
967   if (isolate_->bootstrapper()->IsActive()) return;
968
969   Object* context = native_contexts_list_;
970   while (!context->IsUndefined()) {
971     // Get the caches for this context. GC can happen when the context
972     // is not fully initialized, so the caches can be undefined.
973     Object* caches_or_undefined =
974         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
975     if (!caches_or_undefined->IsUndefined()) {
976       FixedArray* caches = FixedArray::cast(caches_or_undefined);
977       // Clear the caches:
978       int length = caches->length();
979       for (int i = 0; i < length; i++) {
980         JSFunctionResultCache::cast(caches->get(i))->Clear();
981       }
982     }
983     // Get the next context:
984     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
985   }
986 }
987
988
989 void Heap::ClearNormalizedMapCaches() {
990   if (isolate_->bootstrapper()->IsActive() &&
991       !incremental_marking()->IsMarking()) {
992     return;
993   }
994
995   Object* context = native_contexts_list_;
996   while (!context->IsUndefined()) {
997     // GC can happen when the context is not fully initialized,
998     // so the cache can be undefined.
999     Object* cache =
1000         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
1001     if (!cache->IsUndefined()) {
1002       NormalizedMapCache::cast(cache)->Clear();
1003     }
1004     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
1005   }
1006 }
1007
1008
1009 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
1010   if (start_new_space_size == 0) return;
1011
1012   double survival_rate =
1013       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
1014       start_new_space_size;
1015
1016   if (survival_rate > kYoungSurvivalRateHighThreshold) {
1017     high_survival_rate_period_length_++;
1018   } else {
1019     high_survival_rate_period_length_ = 0;
1020   }
1021
1022   if (survival_rate < kYoungSurvivalRateLowThreshold) {
1023     low_survival_rate_period_length_++;
1024   } else {
1025     low_survival_rate_period_length_ = 0;
1026   }
1027
1028   double survival_rate_diff = survival_rate_ - survival_rate;
1029
1030   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
1031     set_survival_rate_trend(DECREASING);
1032   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
1033     set_survival_rate_trend(INCREASING);
1034   } else {
1035     set_survival_rate_trend(STABLE);
1036   }
1037
1038   survival_rate_ = survival_rate;
1039 }
1040
1041 bool Heap::PerformGarbageCollection(
1042     GarbageCollector collector,
1043     GCTracer* tracer,
1044     const v8::GCCallbackFlags gc_callback_flags) {
1045   bool next_gc_likely_to_collect_more = false;
1046
1047   if (collector != SCAVENGER) {
1048     PROFILE(isolate_, CodeMovingGCEvent());
1049   }
1050
1051 #ifdef VERIFY_HEAP
1052   if (FLAG_verify_heap) {
1053     VerifyStringTable(this);
1054   }
1055 #endif
1056
1057   GCType gc_type =
1058       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
1059
1060   {
1061     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1062     VMState<EXTERNAL> state(isolate_);
1063     HandleScope handle_scope(isolate_);
1064     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
1065   }
1066
1067   EnsureFromSpaceIsCommitted();
1068
1069   int start_new_space_size = Heap::new_space()->SizeAsInt();
1070
1071   if (IsHighSurvivalRate()) {
1072     // We speed up the incremental marker if it is running so that it
1073     // does not fall behind the rate of promotion, which would cause a
1074     // constantly growing old space.
1075     incremental_marking()->NotifyOfHighPromotionRate();
1076   }
1077
1078   if (collector == MARK_COMPACTOR) {
1079     // Perform mark-sweep with optional compaction.
1080     MarkCompact(tracer);
1081     sweep_generation_++;
1082
1083     UpdateSurvivalRateTrend(start_new_space_size);
1084
1085     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
1086
1087     old_generation_allocation_limit_ =
1088         OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
1089
1090     old_gen_exhausted_ = false;
1091   } else {
1092     tracer_ = tracer;
1093     Scavenge();
1094     tracer_ = NULL;
1095
1096     UpdateSurvivalRateTrend(start_new_space_size);
1097   }
1098
1099   if (!new_space_high_promotion_mode_active_ &&
1100       new_space_.Capacity() == new_space_.MaximumCapacity() &&
1101       IsStableOrIncreasingSurvivalTrend() &&
1102       IsHighSurvivalRate()) {
1103     // Stable high survival rates even though young generation is at
1104     // maximum capacity indicates that most objects will be promoted.
1105     // To decrease scavenger pauses and final mark-sweep pauses, we
1106     // have to limit maximal capacity of the young generation.
1107     SetNewSpaceHighPromotionModeActive(true);
1108     if (FLAG_trace_gc) {
1109       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
1110                new_space_.InitialCapacity() / MB);
1111     }
1112     // The high promotion mode is our indicator to turn on pretenuring. We have
1113     // to deoptimize all optimized code in global pretenuring mode and all
1114     // code which should be tenured in local pretenuring mode.
1115     if (FLAG_pretenuring) {
1116       if (FLAG_allocation_site_pretenuring) {
1117         ResetAllAllocationSitesDependentCode(NOT_TENURED);
1118       } else {
1119         isolate_->stack_guard()->FullDeopt();
1120       }
1121     }
1122   } else if (new_space_high_promotion_mode_active_ &&
1123       IsStableOrDecreasingSurvivalTrend() &&
1124       IsLowSurvivalRate()) {
1125     // Decreasing low survival rates might indicate that the above high
1126     // promotion mode is over and we should allow the young generation
1127     // to grow again.
1128     SetNewSpaceHighPromotionModeActive(false);
1129     if (FLAG_trace_gc) {
1130       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
1131                new_space_.MaximumCapacity() / MB);
1132     }
1133     // Trigger deoptimization here to turn off global pretenuring as soon as
1134     // possible.
1135     if (FLAG_pretenuring && !FLAG_allocation_site_pretenuring) {
1136       isolate_->stack_guard()->FullDeopt();
1137     }
1138   }
1139
1140   if (new_space_high_promotion_mode_active_ &&
1141       new_space_.Capacity() > new_space_.InitialCapacity()) {
1142     new_space_.Shrink();
1143   }
1144
1145   isolate_->counters()->objs_since_last_young()->Set(0);
1146
1147   // Callbacks that fire after this point might trigger nested GCs and
1148   // restart incremental marking, the assertion can't be moved down.
1149   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1150
1151   gc_post_processing_depth_++;
1152   { AllowHeapAllocation allow_allocation;
1153     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1154     next_gc_likely_to_collect_more =
1155         isolate_->global_handles()->PostGarbageCollectionProcessing(
1156             collector, tracer);
1157   }
1158   gc_post_processing_depth_--;
1159
1160   isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1161
1162   // Update relocatables.
1163   Relocatable::PostGarbageCollectionProcessing(isolate_);
1164
1165   if (collector == MARK_COMPACTOR) {
1166     // Register the amount of external allocated memory.
1167     amount_of_external_allocated_memory_at_last_global_gc_ =
1168         amount_of_external_allocated_memory_;
1169   }
1170
1171   {
1172     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1173     VMState<EXTERNAL> state(isolate_);
1174     HandleScope handle_scope(isolate_);
1175     CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
1176   }
1177
1178 #ifdef VERIFY_HEAP
1179   if (FLAG_verify_heap) {
1180     VerifyStringTable(this);
1181   }
1182 #endif
1183
1184   return next_gc_likely_to_collect_more;
1185 }
1186
1187
1188 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1189   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1190     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1191       if (!gc_prologue_callbacks_[i].pass_isolate_) {
1192         v8::GCPrologueCallback callback =
1193             reinterpret_cast<v8::GCPrologueCallback>(
1194                 gc_prologue_callbacks_[i].callback);
1195         callback(gc_type, flags);
1196       } else {
1197         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1198         gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1199       }
1200     }
1201   }
1202 }
1203
1204
1205 void Heap::CallGCEpilogueCallbacks(GCType gc_type,
1206                                    GCCallbackFlags gc_callback_flags) {
1207   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1208     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1209       if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1210         v8::GCPrologueCallback callback =
1211             reinterpret_cast<v8::GCPrologueCallback>(
1212                 gc_epilogue_callbacks_[i].callback);
1213         callback(gc_type, gc_callback_flags);
1214       } else {
1215         v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1216         gc_epilogue_callbacks_[i].callback(
1217             isolate, gc_type, gc_callback_flags);
1218       }
1219     }
1220   }
1221 }
1222
1223
1224 void Heap::MarkCompact(GCTracer* tracer) {
1225   gc_state_ = MARK_COMPACT;
1226   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1227
1228   uint64_t size_of_objects_before_gc = SizeOfObjects();
1229
1230   mark_compact_collector_.Prepare(tracer);
1231
1232   ms_count_++;
1233   tracer->set_full_gc_count(ms_count_);
1234
1235   MarkCompactPrologue();
1236
1237   mark_compact_collector_.CollectGarbage();
1238
1239   LOG(isolate_, ResourceEvent("markcompact", "end"));
1240
1241   gc_state_ = NOT_IN_GC;
1242
1243   isolate_->counters()->objs_since_last_full()->Set(0);
1244
1245   flush_monomorphic_ics_ = false;
1246
1247   if (FLAG_allocation_site_pretenuring) {
1248     EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
1249   }
1250 }
1251
1252
1253 void Heap::MarkCompactPrologue() {
1254   // At any old GC clear the keyed lookup cache to enable collection of unused
1255   // maps.
1256   isolate_->keyed_lookup_cache()->Clear();
1257   isolate_->context_slot_cache()->Clear();
1258   isolate_->descriptor_lookup_cache()->Clear();
1259   RegExpResultsCache::Clear(string_split_cache());
1260   RegExpResultsCache::Clear(regexp_multiple_cache());
1261
1262   isolate_->compilation_cache()->MarkCompactPrologue();
1263
1264   CompletelyClearInstanceofCache();
1265
1266   FlushNumberStringCache();
1267   if (FLAG_cleanup_code_caches_at_gc) {
1268     polymorphic_code_cache()->set_cache(undefined_value());
1269   }
1270
1271   ClearNormalizedMapCaches();
1272 }
1273
1274
1275 // Helper class for copying HeapObjects
1276 class ScavengeVisitor: public ObjectVisitor {
1277  public:
1278   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1279
1280   void VisitPointer(Object** p) { ScavengePointer(p); }
1281
1282   void VisitPointers(Object** start, Object** end) {
1283     // Copy all HeapObject pointers in [start, end)
1284     for (Object** p = start; p < end; p++) ScavengePointer(p);
1285   }
1286
1287  private:
1288   void ScavengePointer(Object** p) {
1289     Object* object = *p;
1290     if (!heap_->InNewSpace(object)) return;
1291     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1292                          reinterpret_cast<HeapObject*>(object));
1293   }
1294
1295   Heap* heap_;
1296 };
1297
1298
1299 #ifdef VERIFY_HEAP
1300 // Visitor class to verify pointers in code or data space do not point into
1301 // new space.
1302 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1303  public:
1304   explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1305   void VisitPointers(Object** start, Object**end) {
1306     for (Object** current = start; current < end; current++) {
1307       if ((*current)->IsHeapObject()) {
1308         CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1309       }
1310     }
1311   }
1312
1313  private:
1314   Heap* heap_;
1315 };
1316
1317
1318 static void VerifyNonPointerSpacePointers(Heap* heap) {
1319   // Verify that there are no pointers to new space in spaces where we
1320   // do not expect them.
1321   VerifyNonPointerSpacePointersVisitor v(heap);
1322   HeapObjectIterator code_it(heap->code_space());
1323   for (HeapObject* object = code_it.Next();
1324        object != NULL; object = code_it.Next())
1325     object->Iterate(&v);
1326
1327   // The old data space was normally swept conservatively so that the iterator
1328   // doesn't work, so we normally skip the next bit.
1329   if (!heap->old_data_space()->was_swept_conservatively()) {
1330     HeapObjectIterator data_it(heap->old_data_space());
1331     for (HeapObject* object = data_it.Next();
1332          object != NULL; object = data_it.Next())
1333       object->Iterate(&v);
1334   }
1335 }
1336 #endif  // VERIFY_HEAP
1337
1338
1339 void Heap::CheckNewSpaceExpansionCriteria() {
1340   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1341       survived_since_last_expansion_ > new_space_.Capacity() &&
1342       !new_space_high_promotion_mode_active_) {
1343     // Grow the size of new space if there is room to grow, enough data
1344     // has survived scavenge since the last expansion and we are not in
1345     // high promotion mode.
1346     new_space_.Grow();
1347     survived_since_last_expansion_ = 0;
1348   }
1349 }
1350
1351
1352 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1353   return heap->InNewSpace(*p) &&
1354       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1355 }
1356
1357
1358 void Heap::ScavengeStoreBufferCallback(
1359     Heap* heap,
1360     MemoryChunk* page,
1361     StoreBufferEvent event) {
1362   heap->store_buffer_rebuilder_.Callback(page, event);
1363 }
1364
1365
1366 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1367   if (event == kStoreBufferStartScanningPagesEvent) {
1368     start_of_current_page_ = NULL;
1369     current_page_ = NULL;
1370   } else if (event == kStoreBufferScanningPageEvent) {
1371     if (current_page_ != NULL) {
1372       // If this page already overflowed the store buffer during this iteration.
1373       if (current_page_->scan_on_scavenge()) {
1374         // Then we should wipe out the entries that have been added for it.
1375         store_buffer_->SetTop(start_of_current_page_);
1376       } else if (store_buffer_->Top() - start_of_current_page_ >=
1377                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1378         // Did we find too many pointers in the previous page?  The heuristic is
1379         // that no page can take more then 1/5 the remaining slots in the store
1380         // buffer.
1381         current_page_->set_scan_on_scavenge(true);
1382         store_buffer_->SetTop(start_of_current_page_);
1383       } else {
1384         // In this case the page we scanned took a reasonable number of slots in
1385         // the store buffer.  It has now been rehabilitated and is no longer
1386         // marked scan_on_scavenge.
1387         ASSERT(!current_page_->scan_on_scavenge());
1388       }
1389     }
1390     start_of_current_page_ = store_buffer_->Top();
1391     current_page_ = page;
1392   } else if (event == kStoreBufferFullEvent) {
1393     // The current page overflowed the store buffer again.  Wipe out its entries
1394     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1395     // several times while scanning.
1396     if (current_page_ == NULL) {
1397       // Store Buffer overflowed while scanning promoted objects.  These are not
1398       // in any particular page, though they are likely to be clustered by the
1399       // allocation routines.
1400       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1401     } else {
1402       // Store Buffer overflowed while scanning a particular old space page for
1403       // pointers to new space.
1404       ASSERT(current_page_ == page);
1405       ASSERT(page != NULL);
1406       current_page_->set_scan_on_scavenge(true);
1407       ASSERT(start_of_current_page_ != store_buffer_->Top());
1408       store_buffer_->SetTop(start_of_current_page_);
1409     }
1410   } else {
1411     UNREACHABLE();
1412   }
1413 }
1414
1415
1416 void PromotionQueue::Initialize() {
1417   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1418   // entries (where each is a pair of intptr_t). This allows us to simplify
1419   // the test fpr when to switch pages.
1420   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1421          == 0);
1422   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1423   front_ = rear_ =
1424       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1425   emergency_stack_ = NULL;
1426   guard_ = false;
1427 }
1428
1429
1430 void PromotionQueue::RelocateQueueHead() {
1431   ASSERT(emergency_stack_ == NULL);
1432
1433   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1434   intptr_t* head_start = rear_;
1435   intptr_t* head_end =
1436       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1437
1438   int entries_count =
1439       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1440
1441   emergency_stack_ = new List<Entry>(2 * entries_count);
1442
1443   while (head_start != head_end) {
1444     int size = static_cast<int>(*(head_start++));
1445     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1446     emergency_stack_->Add(Entry(obj, size));
1447   }
1448   rear_ = head_end;
1449 }
1450
1451
1452 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1453  public:
1454   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1455
1456   virtual Object* RetainAs(Object* object) {
1457     if (!heap_->InFromSpace(object)) {
1458       return object;
1459     }
1460
1461     MapWord map_word = HeapObject::cast(object)->map_word();
1462     if (map_word.IsForwardingAddress()) {
1463       return map_word.ToForwardingAddress();
1464     }
1465     return NULL;
1466   }
1467
1468  private:
1469   Heap* heap_;
1470 };
1471
1472
1473 void Heap::Scavenge() {
1474   RelocationLock relocation_lock(this);
1475
1476 #ifdef VERIFY_HEAP
1477   if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1478 #endif
1479
1480   gc_state_ = SCAVENGE;
1481
1482   // Implements Cheney's copying algorithm
1483   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1484
1485   // Clear descriptor cache.
1486   isolate_->descriptor_lookup_cache()->Clear();
1487
1488   // Used for updating survived_since_last_expansion_ at function end.
1489   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1490
1491   CheckNewSpaceExpansionCriteria();
1492
1493   SelectScavengingVisitorsTable();
1494
1495   incremental_marking()->PrepareForScavenge();
1496
1497   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1498   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1499
1500   // Flip the semispaces.  After flipping, to space is empty, from space has
1501   // live objects.
1502   new_space_.Flip();
1503   new_space_.ResetAllocationInfo();
1504
1505   // We need to sweep newly copied objects which can be either in the
1506   // to space or promoted to the old generation.  For to-space
1507   // objects, we treat the bottom of the to space as a queue.  Newly
1508   // copied and unswept objects lie between a 'front' mark and the
1509   // allocation pointer.
1510   //
1511   // Promoted objects can go into various old-generation spaces, and
1512   // can be allocated internally in the spaces (from the free list).
1513   // We treat the top of the to space as a queue of addresses of
1514   // promoted objects.  The addresses of newly promoted and unswept
1515   // objects lie between a 'front' mark and a 'rear' mark that is
1516   // updated as a side effect of promoting an object.
1517   //
1518   // There is guaranteed to be enough room at the top of the to space
1519   // for the addresses of promoted objects: every object promoted
1520   // frees up its size in bytes from the top of the new space, and
1521   // objects are at least one pointer in size.
1522   Address new_space_front = new_space_.ToSpaceStart();
1523   promotion_queue_.Initialize();
1524
1525 #ifdef DEBUG
1526   store_buffer()->Clean();
1527 #endif
1528
1529   ScavengeVisitor scavenge_visitor(this);
1530   // Copy roots.
1531   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1532
1533   // Copy objects reachable from the old generation.
1534   {
1535     StoreBufferRebuildScope scope(this,
1536                                   store_buffer(),
1537                                   &ScavengeStoreBufferCallback);
1538     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1539   }
1540
1541   // Copy objects reachable from simple cells by scavenging cell values
1542   // directly.
1543   HeapObjectIterator cell_iterator(cell_space_);
1544   for (HeapObject* heap_object = cell_iterator.Next();
1545        heap_object != NULL;
1546        heap_object = cell_iterator.Next()) {
1547     if (heap_object->IsCell()) {
1548       Cell* cell = Cell::cast(heap_object);
1549       Address value_address = cell->ValueAddress();
1550       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1551     }
1552   }
1553
1554   // Copy objects reachable from global property cells by scavenging global
1555   // property cell values directly.
1556   HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1557   for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1558        heap_object != NULL;
1559        heap_object = js_global_property_cell_iterator.Next()) {
1560     if (heap_object->IsPropertyCell()) {
1561       PropertyCell* cell = PropertyCell::cast(heap_object);
1562       Address value_address = cell->ValueAddress();
1563       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1564       Address type_address = cell->TypeAddress();
1565       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1566     }
1567   }
1568
1569   // Copy objects reachable from the code flushing candidates list.
1570   MarkCompactCollector* collector = mark_compact_collector();
1571   if (collector->is_code_flushing_enabled()) {
1572     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1573   }
1574
1575   // Scavenge object reachable from the native contexts list directly.
1576   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1577
1578   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1579
1580   while (isolate()->global_handles()->IterateObjectGroups(
1581       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1582     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1583   }
1584   isolate()->global_handles()->RemoveObjectGroups();
1585   isolate()->global_handles()->RemoveImplicitRefGroups();
1586
1587   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1588       &IsUnscavengedHeapObject);
1589   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1590       &scavenge_visitor);
1591   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1592
1593   UpdateNewSpaceReferencesInExternalStringTable(
1594       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1595
1596   promotion_queue_.Destroy();
1597
1598   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1599
1600   ScavengeWeakObjectRetainer weak_object_retainer(this);
1601   ProcessWeakReferences(&weak_object_retainer);
1602
1603   ASSERT(new_space_front == new_space_.top());
1604
1605   // Set age mark.
1606   new_space_.set_age_mark(new_space_.top());
1607
1608   new_space_.LowerInlineAllocationLimit(
1609       new_space_.inline_allocation_limit_step());
1610
1611   // Update how much has survived scavenge.
1612   IncrementYoungSurvivorsCounter(static_cast<int>(
1613       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1614
1615   ProcessPretenuringFeedback();
1616
1617   LOG(isolate_, ResourceEvent("scavenge", "end"));
1618
1619   gc_state_ = NOT_IN_GC;
1620
1621   scavenges_since_last_idle_round_++;
1622 }
1623
1624
1625 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1626                                                                 Object** p) {
1627   MapWord first_word = HeapObject::cast(*p)->map_word();
1628
1629   if (!first_word.IsForwardingAddress()) {
1630     // Unreachable external string can be finalized.
1631     heap->FinalizeExternalString(String::cast(*p));
1632     return NULL;
1633   }
1634
1635   // String is still reachable.
1636   return String::cast(first_word.ToForwardingAddress());
1637 }
1638
1639
1640 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1641     ExternalStringTableUpdaterCallback updater_func) {
1642 #ifdef VERIFY_HEAP
1643   if (FLAG_verify_heap) {
1644     external_string_table_.Verify();
1645   }
1646 #endif
1647
1648   if (external_string_table_.new_space_strings_.is_empty()) return;
1649
1650   Object** start = &external_string_table_.new_space_strings_[0];
1651   Object** end = start + external_string_table_.new_space_strings_.length();
1652   Object** last = start;
1653
1654   for (Object** p = start; p < end; ++p) {
1655     ASSERT(InFromSpace(*p));
1656     String* target = updater_func(this, p);
1657
1658     if (target == NULL) continue;
1659
1660     ASSERT(target->IsExternalString());
1661
1662     if (InNewSpace(target)) {
1663       // String is still in new space.  Update the table entry.
1664       *last = target;
1665       ++last;
1666     } else {
1667       // String got promoted.  Move it to the old string list.
1668       external_string_table_.AddOldString(target);
1669     }
1670   }
1671
1672   ASSERT(last <= end);
1673   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1674 }
1675
1676
1677 void Heap::UpdateReferencesInExternalStringTable(
1678     ExternalStringTableUpdaterCallback updater_func) {
1679
1680   // Update old space string references.
1681   if (external_string_table_.old_space_strings_.length() > 0) {
1682     Object** start = &external_string_table_.old_space_strings_[0];
1683     Object** end = start + external_string_table_.old_space_strings_.length();
1684     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1685   }
1686
1687   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1688 }
1689
1690
1691 template <class T>
1692 struct WeakListVisitor;
1693
1694
1695 template <class T>
1696 static Object* VisitWeakList(Heap* heap,
1697                              Object* list,
1698                              WeakObjectRetainer* retainer,
1699                              bool record_slots) {
1700   Object* undefined = heap->undefined_value();
1701   Object* head = undefined;
1702   T* tail = NULL;
1703   MarkCompactCollector* collector = heap->mark_compact_collector();
1704   while (list != undefined) {
1705     // Check whether to keep the candidate in the list.
1706     T* candidate = reinterpret_cast<T*>(list);
1707     Object* retained = retainer->RetainAs(list);
1708     if (retained != NULL) {
1709       if (head == undefined) {
1710         // First element in the list.
1711         head = retained;
1712       } else {
1713         // Subsequent elements in the list.
1714         ASSERT(tail != NULL);
1715         WeakListVisitor<T>::SetWeakNext(tail, retained);
1716         if (record_slots) {
1717           Object** next_slot =
1718             HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1719           collector->RecordSlot(next_slot, next_slot, retained);
1720         }
1721       }
1722       // Retained object is new tail.
1723       ASSERT(!retained->IsUndefined());
1724       candidate = reinterpret_cast<T*>(retained);
1725       tail = candidate;
1726
1727
1728       // tail is a live object, visit it.
1729       WeakListVisitor<T>::VisitLiveObject(
1730           heap, tail, retainer, record_slots);
1731     } else {
1732       WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1733     }
1734
1735     // Move to next element in the list.
1736     list = WeakListVisitor<T>::WeakNext(candidate);
1737   }
1738
1739   // Terminate the list if there is one or more elements.
1740   if (tail != NULL) {
1741     WeakListVisitor<T>::SetWeakNext(tail, undefined);
1742   }
1743   return head;
1744 }
1745
1746
1747 template<>
1748 struct WeakListVisitor<JSFunction> {
1749   static void SetWeakNext(JSFunction* function, Object* next) {
1750     function->set_next_function_link(next);
1751   }
1752
1753   static Object* WeakNext(JSFunction* function) {
1754     return function->next_function_link();
1755   }
1756
1757   static int WeakNextOffset() {
1758     return JSFunction::kNextFunctionLinkOffset;
1759   }
1760
1761   static void VisitLiveObject(Heap*, JSFunction*,
1762                               WeakObjectRetainer*, bool) {
1763   }
1764
1765   static void VisitPhantomObject(Heap*, JSFunction*) {
1766   }
1767 };
1768
1769
1770 template<>
1771 struct WeakListVisitor<Code> {
1772   static void SetWeakNext(Code* code, Object* next) {
1773     code->set_next_code_link(next);
1774   }
1775
1776   static Object* WeakNext(Code* code) {
1777     return code->next_code_link();
1778   }
1779
1780   static int WeakNextOffset() {
1781     return Code::kNextCodeLinkOffset;
1782   }
1783
1784   static void VisitLiveObject(Heap*, Code*,
1785                               WeakObjectRetainer*, bool) {
1786   }
1787
1788   static void VisitPhantomObject(Heap*, Code*) {
1789   }
1790 };
1791
1792
1793 template<>
1794 struct WeakListVisitor<Context> {
1795   static void SetWeakNext(Context* context, Object* next) {
1796     context->set(Context::NEXT_CONTEXT_LINK,
1797                  next,
1798                  UPDATE_WRITE_BARRIER);
1799   }
1800
1801   static Object* WeakNext(Context* context) {
1802     return context->get(Context::NEXT_CONTEXT_LINK);
1803   }
1804
1805   static void VisitLiveObject(Heap* heap,
1806                               Context* context,
1807                               WeakObjectRetainer* retainer,
1808                               bool record_slots) {
1809     // Process the three weak lists linked off the context.
1810     DoWeakList<JSFunction>(heap, context, retainer, record_slots,
1811         Context::OPTIMIZED_FUNCTIONS_LIST);
1812     DoWeakList<Code>(heap, context, retainer, record_slots,
1813         Context::OPTIMIZED_CODE_LIST);
1814     DoWeakList<Code>(heap, context, retainer, record_slots,
1815         Context::DEOPTIMIZED_CODE_LIST);
1816   }
1817
1818   template<class T>
1819   static void DoWeakList(Heap* heap,
1820                          Context* context,
1821                          WeakObjectRetainer* retainer,
1822                          bool record_slots,
1823                          int index) {
1824     // Visit the weak list, removing dead intermediate elements.
1825     Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
1826         record_slots);
1827
1828     // Update the list head.
1829     context->set(index, list_head, UPDATE_WRITE_BARRIER);
1830
1831     if (record_slots) {
1832       // Record the updated slot if necessary.
1833       Object** head_slot = HeapObject::RawField(
1834           context, FixedArray::SizeFor(index));
1835       heap->mark_compact_collector()->RecordSlot(
1836           head_slot, head_slot, list_head);
1837     }
1838   }
1839
1840   static void VisitPhantomObject(Heap*, Context*) {
1841   }
1842
1843   static int WeakNextOffset() {
1844     return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1845   }
1846 };
1847
1848
1849 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1850   // We don't record weak slots during marking or scavenges.
1851   // Instead we do it once when we complete mark-compact cycle.
1852   // Note that write barrier has no effect if we are already in the middle of
1853   // compacting mark-sweep cycle and we have to record slots manually.
1854   bool record_slots =
1855       gc_state() == MARK_COMPACT &&
1856       mark_compact_collector()->is_compacting();
1857   ProcessArrayBuffers(retainer, record_slots);
1858   ProcessNativeContexts(retainer, record_slots);
1859   // TODO(mvstanton): AllocationSites only need to be processed during
1860   // MARK_COMPACT, as they live in old space. Verify and address.
1861   ProcessAllocationSites(retainer, record_slots);
1862 }
1863
1864 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1865                                  bool record_slots) {
1866   Object* head =
1867       VisitWeakList<Context>(
1868           this, native_contexts_list(), retainer, record_slots);
1869   // Update the head of the list of contexts.
1870   native_contexts_list_ = head;
1871 }
1872
1873
1874 template<>
1875 struct WeakListVisitor<JSArrayBufferView> {
1876   static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1877     obj->set_weak_next(next);
1878   }
1879
1880   static Object* WeakNext(JSArrayBufferView* obj) {
1881     return obj->weak_next();
1882   }
1883
1884   static void VisitLiveObject(Heap*,
1885                               JSArrayBufferView* obj,
1886                               WeakObjectRetainer* retainer,
1887                               bool record_slots) {}
1888
1889   static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1890
1891   static int WeakNextOffset() {
1892     return JSArrayBufferView::kWeakNextOffset;
1893   }
1894 };
1895
1896
1897 template<>
1898 struct WeakListVisitor<JSArrayBuffer> {
1899   static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1900     obj->set_weak_next(next);
1901   }
1902
1903   static Object* WeakNext(JSArrayBuffer* obj) {
1904     return obj->weak_next();
1905   }
1906
1907   static void VisitLiveObject(Heap* heap,
1908                               JSArrayBuffer* array_buffer,
1909                               WeakObjectRetainer* retainer,
1910                               bool record_slots) {
1911     Object* typed_array_obj =
1912         VisitWeakList<JSArrayBufferView>(
1913             heap,
1914             array_buffer->weak_first_view(),
1915             retainer, record_slots);
1916     array_buffer->set_weak_first_view(typed_array_obj);
1917     if (typed_array_obj != heap->undefined_value() && record_slots) {
1918       Object** slot = HeapObject::RawField(
1919           array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1920       heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1921     }
1922   }
1923
1924   static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1925     Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1926   }
1927
1928   static int WeakNextOffset() {
1929     return JSArrayBuffer::kWeakNextOffset;
1930   }
1931 };
1932
1933
1934 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1935                                bool record_slots) {
1936   Object* array_buffer_obj =
1937       VisitWeakList<JSArrayBuffer>(this,
1938                                    array_buffers_list(),
1939                                    retainer, record_slots);
1940   set_array_buffers_list(array_buffer_obj);
1941 }
1942
1943
1944 void Heap::TearDownArrayBuffers() {
1945   Object* undefined = undefined_value();
1946   for (Object* o = array_buffers_list(); o != undefined;) {
1947     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1948     Runtime::FreeArrayBuffer(isolate(), buffer);
1949     o = buffer->weak_next();
1950   }
1951   array_buffers_list_ = undefined;
1952 }
1953
1954
1955 template<>
1956 struct WeakListVisitor<AllocationSite> {
1957   static void SetWeakNext(AllocationSite* obj, Object* next) {
1958     obj->set_weak_next(next);
1959   }
1960
1961   static Object* WeakNext(AllocationSite* obj) {
1962     return obj->weak_next();
1963   }
1964
1965   static void VisitLiveObject(Heap* heap,
1966                               AllocationSite* site,
1967                               WeakObjectRetainer* retainer,
1968                               bool record_slots) {}
1969
1970   static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1971
1972   static int WeakNextOffset() {
1973     return AllocationSite::kWeakNextOffset;
1974   }
1975 };
1976
1977
1978 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1979                                   bool record_slots) {
1980   Object* allocation_site_obj =
1981       VisitWeakList<AllocationSite>(this,
1982                                     allocation_sites_list(),
1983                                     retainer, record_slots);
1984   set_allocation_sites_list(allocation_site_obj);
1985 }
1986
1987
1988 void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
1989   DisallowHeapAllocation no_allocation_scope;
1990   Object* cur = allocation_sites_list();
1991   bool marked = false;
1992   while (cur->IsAllocationSite()) {
1993     AllocationSite* casted = AllocationSite::cast(cur);
1994     if (casted->GetPretenureMode() == flag) {
1995       casted->ResetPretenureDecision();
1996       bool got_marked = casted->dependent_code()->MarkCodeForDeoptimization(
1997           isolate_,
1998           DependentCode::kAllocationSiteTenuringChangedGroup);
1999       if (got_marked) marked = true;
2000     }
2001     cur = casted->weak_next();
2002   }
2003   if (marked) isolate_->stack_guard()->DeoptMarkedCode();
2004 }
2005
2006
2007 void Heap::EvaluateOldSpaceLocalPretenuring(
2008     uint64_t size_of_objects_before_gc) {
2009   uint64_t size_of_objects_after_gc = SizeOfObjects();
2010   double old_generation_survival_rate =
2011       (static_cast<double>(size_of_objects_after_gc) * 100) /
2012           static_cast<double>(size_of_objects_before_gc);
2013
2014   if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
2015     // Too many objects died in the old generation, pretenuring of wrong
2016     // allocation sites may be the cause for that. We have to deopt all
2017     // dependent code registered in the allocation sites to re-evaluate
2018     // our pretenuring decisions.
2019     ResetAllAllocationSitesDependentCode(TENURED);
2020     if (FLAG_trace_pretenuring) {
2021       PrintF("Deopt all allocation sites dependent code due to low survival "
2022              "rate in the old generation %f\n", old_generation_survival_rate);
2023     }
2024   }
2025 }
2026
2027
2028 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
2029   DisallowHeapAllocation no_allocation;
2030   // All external strings are listed in the external string table.
2031
2032   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
2033    public:
2034     explicit ExternalStringTableVisitorAdapter(
2035         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
2036     virtual void VisitPointers(Object** start, Object** end) {
2037       for (Object** p = start; p < end; p++) {
2038         ASSERT((*p)->IsExternalString());
2039         visitor_->VisitExternalString(Utils::ToLocal(
2040             Handle<String>(String::cast(*p))));
2041       }
2042     }
2043    private:
2044     v8::ExternalResourceVisitor* visitor_;
2045   } external_string_table_visitor(visitor);
2046
2047   external_string_table_.Iterate(&external_string_table_visitor);
2048 }
2049
2050
2051 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
2052  public:
2053   static inline void VisitPointer(Heap* heap, Object** p) {
2054     Object* object = *p;
2055     if (!heap->InNewSpace(object)) return;
2056     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
2057                          reinterpret_cast<HeapObject*>(object));
2058   }
2059 };
2060
2061
2062 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
2063                          Address new_space_front) {
2064   do {
2065     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
2066     // The addresses new_space_front and new_space_.top() define a
2067     // queue of unprocessed copied objects.  Process them until the
2068     // queue is empty.
2069     while (new_space_front != new_space_.top()) {
2070       if (!NewSpacePage::IsAtEnd(new_space_front)) {
2071         HeapObject* object = HeapObject::FromAddress(new_space_front);
2072         new_space_front +=
2073           NewSpaceScavenger::IterateBody(object->map(), object);
2074       } else {
2075         new_space_front =
2076             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
2077       }
2078     }
2079
2080     // Promote and process all the to-be-promoted objects.
2081     {
2082       StoreBufferRebuildScope scope(this,
2083                                     store_buffer(),
2084                                     &ScavengeStoreBufferCallback);
2085       while (!promotion_queue()->is_empty()) {
2086         HeapObject* target;
2087         int size;
2088         promotion_queue()->remove(&target, &size);
2089
2090         // Promoted object might be already partially visited
2091         // during old space pointer iteration. Thus we search specificly
2092         // for pointers to from semispace instead of looking for pointers
2093         // to new space.
2094         ASSERT(!target->IsMap());
2095         IterateAndMarkPointersToFromSpace(target->address(),
2096                                           target->address() + size,
2097                                           &ScavengeObject);
2098       }
2099     }
2100
2101     // Take another spin if there are now unswept objects in new space
2102     // (there are currently no more unswept promoted objects).
2103   } while (new_space_front != new_space_.top());
2104
2105   return new_space_front;
2106 }
2107
2108
2109 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
2110 STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0);
2111
2112
2113 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
2114                                               HeapObject* object,
2115                                               int size));
2116
2117 static HeapObject* EnsureDoubleAligned(Heap* heap,
2118                                        HeapObject* object,
2119                                        int size) {
2120   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
2121     heap->CreateFillerObjectAt(object->address(), kPointerSize);
2122     return HeapObject::FromAddress(object->address() + kPointerSize);
2123   } else {
2124     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
2125                                kPointerSize);
2126     return object;
2127   }
2128 }
2129
2130
2131 enum LoggingAndProfiling {
2132   LOGGING_AND_PROFILING_ENABLED,
2133   LOGGING_AND_PROFILING_DISABLED
2134 };
2135
2136
2137 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
2138
2139
2140 template<MarksHandling marks_handling,
2141          LoggingAndProfiling logging_and_profiling_mode>
2142 class ScavengingVisitor : public StaticVisitorBase {
2143  public:
2144   static void Initialize() {
2145     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
2146     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
2147     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
2148     table_.Register(kVisitByteArray, &EvacuateByteArray);
2149     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
2150     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
2151     table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
2152     table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
2153
2154     table_.Register(kVisitNativeContext,
2155                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2156                         template VisitSpecialized<Context::kSize>);
2157
2158     table_.Register(kVisitConsString,
2159                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2160                         template VisitSpecialized<ConsString::kSize>);
2161
2162     table_.Register(kVisitSlicedString,
2163                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2164                         template VisitSpecialized<SlicedString::kSize>);
2165
2166     table_.Register(kVisitSymbol,
2167                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2168                         template VisitSpecialized<Symbol::kSize>);
2169
2170     table_.Register(kVisitSharedFunctionInfo,
2171                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2172                         template VisitSpecialized<SharedFunctionInfo::kSize>);
2173
2174     table_.Register(kVisitJSWeakMap,
2175                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2176                     Visit);
2177
2178     table_.Register(kVisitJSWeakSet,
2179                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2180                     Visit);
2181
2182     table_.Register(kVisitJSArrayBuffer,
2183                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2184                     Visit);
2185
2186     table_.Register(kVisitJSTypedArray,
2187                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2188                     Visit);
2189
2190     table_.Register(kVisitJSDataView,
2191                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2192                     Visit);
2193
2194     table_.Register(kVisitJSRegExp,
2195                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
2196                     Visit);
2197
2198     if (marks_handling == IGNORE_MARKS) {
2199       table_.Register(kVisitJSFunction,
2200                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
2201                           template VisitSpecialized<JSFunction::kSize>);
2202     } else {
2203       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2204     }
2205
2206     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2207                                    kVisitDataObject,
2208                                    kVisitDataObjectGeneric>();
2209
2210     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2211                                    kVisitJSObject,
2212                                    kVisitJSObjectGeneric>();
2213
2214     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2215                                    kVisitStruct,
2216                                    kVisitStructGeneric>();
2217   }
2218
2219   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2220     return &table_;
2221   }
2222
2223  private:
2224   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
2225
2226   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2227     bool should_record = false;
2228 #ifdef DEBUG
2229     should_record = FLAG_heap_stats;
2230 #endif
2231     should_record = should_record || FLAG_log_gc;
2232     if (should_record) {
2233       if (heap->new_space()->Contains(obj)) {
2234         heap->new_space()->RecordAllocation(obj);
2235       } else {
2236         heap->new_space()->RecordPromotion(obj);
2237       }
2238     }
2239   }
2240
2241   // Helper function used by CopyObject to copy a source object to an
2242   // allocated target object and update the forwarding pointer in the source
2243   // object.  Returns the target object.
2244   INLINE(static void MigrateObject(Heap* heap,
2245                                    HeapObject* source,
2246                                    HeapObject* target,
2247                                    int size)) {
2248     // Copy the content of source to target.
2249     heap->CopyBlock(target->address(), source->address(), size);
2250
2251     // Set the forwarding address.
2252     source->set_map_word(MapWord::FromForwardingAddress(target));
2253
2254     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2255       // Update NewSpace stats if necessary.
2256       RecordCopiedObject(heap, target);
2257       Isolate* isolate = heap->isolate();
2258       HeapProfiler* heap_profiler = isolate->heap_profiler();
2259       if (heap_profiler->is_tracking_object_moves()) {
2260         heap_profiler->ObjectMoveEvent(source->address(), target->address(),
2261                                        size);
2262       }
2263       if (isolate->logger()->is_logging_code_events() ||
2264           isolate->cpu_profiler()->is_profiling()) {
2265         if (target->IsSharedFunctionInfo()) {
2266           PROFILE(isolate, SharedFunctionInfoMoveEvent(
2267               source->address(), target->address()));
2268         }
2269       }
2270     }
2271
2272     if (marks_handling == TRANSFER_MARKS) {
2273       if (Marking::TransferColor(source, target)) {
2274         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2275       }
2276     }
2277   }
2278
2279
2280   template<ObjectContents object_contents, int alignment>
2281   static inline void EvacuateObject(Map* map,
2282                                     HeapObject** slot,
2283                                     HeapObject* object,
2284                                     int object_size) {
2285     SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
2286     SLOW_ASSERT(object->Size() == object_size);
2287
2288     int allocation_size = object_size;
2289     if (alignment != kObjectAlignment) {
2290       ASSERT(alignment == kDoubleAlignment);
2291       allocation_size += kPointerSize;
2292     }
2293
2294     Heap* heap = map->GetHeap();
2295     if (heap->ShouldBePromoted(object->address(), object_size)) {
2296       MaybeObject* maybe_result;
2297
2298       if (object_contents == DATA_OBJECT) {
2299         ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2300         maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2301       } else {
2302         ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2303         maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
2304       }
2305
2306       Object* result = NULL;  // Initialization to please compiler.
2307       if (maybe_result->ToObject(&result)) {
2308         HeapObject* target = HeapObject::cast(result);
2309
2310         if (alignment != kObjectAlignment) {
2311           target = EnsureDoubleAligned(heap, target, allocation_size);
2312         }
2313
2314         // Order is important: slot might be inside of the target if target
2315         // was allocated over a dead object and slot comes from the store
2316         // buffer.
2317         *slot = target;
2318         MigrateObject(heap, object, target, object_size);
2319
2320         if (object_contents == POINTER_OBJECT) {
2321           if (map->instance_type() == JS_FUNCTION_TYPE) {
2322             heap->promotion_queue()->insert(
2323                 target, JSFunction::kNonWeakFieldsEndOffset);
2324           } else {
2325             heap->promotion_queue()->insert(target, object_size);
2326           }
2327         }
2328
2329         heap->tracer()->increment_promoted_objects_size(object_size);
2330         return;
2331       }
2332     }
2333     ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
2334     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2335     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2336     Object* result = allocation->ToObjectUnchecked();
2337     HeapObject* target = HeapObject::cast(result);
2338
2339     if (alignment != kObjectAlignment) {
2340       target = EnsureDoubleAligned(heap, target, allocation_size);
2341     }
2342
2343     // Order is important: slot might be inside of the target if target
2344     // was allocated over a dead object and slot comes from the store
2345     // buffer.
2346     *slot = target;
2347     MigrateObject(heap, object, target, object_size);
2348     return;
2349   }
2350
2351
2352   static inline void EvacuateJSFunction(Map* map,
2353                                         HeapObject** slot,
2354                                         HeapObject* object) {
2355     ObjectEvacuationStrategy<POINTER_OBJECT>::
2356         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2357
2358     HeapObject* target = *slot;
2359     MarkBit mark_bit = Marking::MarkBitFrom(target);
2360     if (Marking::IsBlack(mark_bit)) {
2361       // This object is black and it might not be rescanned by marker.
2362       // We should explicitly record code entry slot for compaction because
2363       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2364       // miss it as it is not HeapObject-tagged.
2365       Address code_entry_slot =
2366           target->address() + JSFunction::kCodeEntryOffset;
2367       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2368       map->GetHeap()->mark_compact_collector()->
2369           RecordCodeEntrySlot(code_entry_slot, code);
2370     }
2371   }
2372
2373
2374   static inline void EvacuateFixedArray(Map* map,
2375                                         HeapObject** slot,
2376                                         HeapObject* object) {
2377     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2378     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2379         map, slot, object, object_size);
2380   }
2381
2382
2383   static inline void EvacuateFixedDoubleArray(Map* map,
2384                                               HeapObject** slot,
2385                                               HeapObject* object) {
2386     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2387     int object_size = FixedDoubleArray::SizeFor(length);
2388     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2389         map, slot, object, object_size);
2390   }
2391
2392
2393   static inline void EvacuateFixedTypedArray(Map* map,
2394                                              HeapObject** slot,
2395                                              HeapObject* object) {
2396     int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
2397     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2398         map, slot, object, object_size);
2399   }
2400
2401
2402   static inline void EvacuateFixedFloat64Array(Map* map,
2403                                                HeapObject** slot,
2404                                                HeapObject* object) {
2405     int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
2406     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2407         map, slot, object, object_size);
2408   }
2409
2410
2411   static inline void EvacuateByteArray(Map* map,
2412                                        HeapObject** slot,
2413                                        HeapObject* object) {
2414     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2415     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2416         map, slot, object, object_size);
2417   }
2418
2419
2420   static inline void EvacuateSeqOneByteString(Map* map,
2421                                             HeapObject** slot,
2422                                             HeapObject* object) {
2423     int object_size = SeqOneByteString::cast(object)->
2424         SeqOneByteStringSize(map->instance_type());
2425     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2426         map, slot, object, object_size);
2427   }
2428
2429
2430   static inline void EvacuateSeqTwoByteString(Map* map,
2431                                               HeapObject** slot,
2432                                               HeapObject* object) {
2433     int object_size = SeqTwoByteString::cast(object)->
2434         SeqTwoByteStringSize(map->instance_type());
2435     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2436         map, slot, object, object_size);
2437   }
2438
2439
2440   static inline bool IsShortcutCandidate(int type) {
2441     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2442   }
2443
2444   static inline void EvacuateShortcutCandidate(Map* map,
2445                                                HeapObject** slot,
2446                                                HeapObject* object) {
2447     ASSERT(IsShortcutCandidate(map->instance_type()));
2448
2449     Heap* heap = map->GetHeap();
2450
2451     if (marks_handling == IGNORE_MARKS &&
2452         ConsString::cast(object)->unchecked_second() ==
2453         heap->empty_string()) {
2454       HeapObject* first =
2455           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2456
2457       *slot = first;
2458
2459       if (!heap->InNewSpace(first)) {
2460         object->set_map_word(MapWord::FromForwardingAddress(first));
2461         return;
2462       }
2463
2464       MapWord first_word = first->map_word();
2465       if (first_word.IsForwardingAddress()) {
2466         HeapObject* target = first_word.ToForwardingAddress();
2467
2468         *slot = target;
2469         object->set_map_word(MapWord::FromForwardingAddress(target));
2470         return;
2471       }
2472
2473       heap->DoScavengeObject(first->map(), slot, first);
2474       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2475       return;
2476     }
2477
2478     int object_size = ConsString::kSize;
2479     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2480         map, slot, object, object_size);
2481   }
2482
2483   template<ObjectContents object_contents>
2484   class ObjectEvacuationStrategy {
2485    public:
2486     template<int object_size>
2487     static inline void VisitSpecialized(Map* map,
2488                                         HeapObject** slot,
2489                                         HeapObject* object) {
2490       EvacuateObject<object_contents, kObjectAlignment>(
2491           map, slot, object, object_size);
2492     }
2493
2494     static inline void Visit(Map* map,
2495                              HeapObject** slot,
2496                              HeapObject* object) {
2497       int object_size = map->instance_size();
2498       EvacuateObject<object_contents, kObjectAlignment>(
2499           map, slot, object, object_size);
2500     }
2501   };
2502
2503   static VisitorDispatchTable<ScavengingCallback> table_;
2504 };
2505
2506
2507 template<MarksHandling marks_handling,
2508          LoggingAndProfiling logging_and_profiling_mode>
2509 VisitorDispatchTable<ScavengingCallback>
2510     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2511
2512
2513 static void InitializeScavengingVisitorsTables() {
2514   ScavengingVisitor<TRANSFER_MARKS,
2515                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2516   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2517   ScavengingVisitor<TRANSFER_MARKS,
2518                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2519   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2520 }
2521
2522
2523 void Heap::SelectScavengingVisitorsTable() {
2524   bool logging_and_profiling =
2525       isolate()->logger()->is_logging() ||
2526       isolate()->cpu_profiler()->is_profiling() ||
2527       (isolate()->heap_profiler() != NULL &&
2528        isolate()->heap_profiler()->is_tracking_object_moves());
2529
2530   if (!incremental_marking()->IsMarking()) {
2531     if (!logging_and_profiling) {
2532       scavenging_visitors_table_.CopyFrom(
2533           ScavengingVisitor<IGNORE_MARKS,
2534                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2535     } else {
2536       scavenging_visitors_table_.CopyFrom(
2537           ScavengingVisitor<IGNORE_MARKS,
2538                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2539     }
2540   } else {
2541     if (!logging_and_profiling) {
2542       scavenging_visitors_table_.CopyFrom(
2543           ScavengingVisitor<TRANSFER_MARKS,
2544                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2545     } else {
2546       scavenging_visitors_table_.CopyFrom(
2547           ScavengingVisitor<TRANSFER_MARKS,
2548                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2549     }
2550
2551     if (incremental_marking()->IsCompacting()) {
2552       // When compacting forbid short-circuiting of cons-strings.
2553       // Scavenging code relies on the fact that new space object
2554       // can't be evacuated into evacuation candidate but
2555       // short-circuiting violates this assumption.
2556       scavenging_visitors_table_.Register(
2557           StaticVisitorBase::kVisitShortcutCandidate,
2558           scavenging_visitors_table_.GetVisitorById(
2559               StaticVisitorBase::kVisitConsString));
2560     }
2561   }
2562 }
2563
2564
2565 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2566   SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
2567   MapWord first_word = object->map_word();
2568   SLOW_ASSERT(!first_word.IsForwardingAddress());
2569   Map* map = first_word.ToMap();
2570   map->GetHeap()->DoScavengeObject(map, p, object);
2571 }
2572
2573
2574 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2575                                       int instance_size) {
2576   Object* result;
2577   MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2578   if (!maybe_result->ToObject(&result)) return maybe_result;
2579
2580   // Map::cast cannot be used due to uninitialized map field.
2581   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2582   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2583   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2584   reinterpret_cast<Map*>(result)->set_visitor_id(
2585         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2586   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2587   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2588   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2589   reinterpret_cast<Map*>(result)->set_bit_field(0);
2590   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2591   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2592                    Map::OwnsDescriptors::encode(true);
2593   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2594   return result;
2595 }
2596
2597
2598 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2599                                int instance_size,
2600                                ElementsKind elements_kind) {
2601   Object* result;
2602   MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2603   if (!maybe_result->To(&result)) return maybe_result;
2604
2605   Map* map = reinterpret_cast<Map*>(result);
2606   map->set_map_no_write_barrier(meta_map());
2607   map->set_instance_type(instance_type);
2608   map->set_visitor_id(
2609       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2610   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2611   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2612   map->set_instance_size(instance_size);
2613   map->set_inobject_properties(0);
2614   map->set_pre_allocated_property_fields(0);
2615   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2616   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2617                           SKIP_WRITE_BARRIER);
2618   map->init_back_pointer(undefined_value());
2619   map->set_unused_property_fields(0);
2620   map->set_instance_descriptors(empty_descriptor_array());
2621   map->set_bit_field(0);
2622   map->set_bit_field2(1 << Map::kIsExtensible);
2623   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
2624                    Map::OwnsDescriptors::encode(true);
2625   map->set_bit_field3(bit_field3);
2626   map->set_elements_kind(elements_kind);
2627
2628   return map;
2629 }
2630
2631
2632 MaybeObject* Heap::AllocateCodeCache() {
2633   CodeCache* code_cache;
2634   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2635     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2636   }
2637   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2638   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2639   return code_cache;
2640 }
2641
2642
2643 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2644   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2645 }
2646
2647
2648 MaybeObject* Heap::AllocateAccessorPair() {
2649   AccessorPair* accessors;
2650   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2651     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2652   }
2653   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2654   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2655   accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
2656   return accessors;
2657 }
2658
2659
2660 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2661   TypeFeedbackInfo* info;
2662   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2663     if (!maybe_info->To(&info)) return maybe_info;
2664   }
2665   info->initialize_storage();
2666   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2667                                 SKIP_WRITE_BARRIER);
2668   return info;
2669 }
2670
2671
2672 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2673   AliasedArgumentsEntry* entry;
2674   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2675     if (!maybe_entry->To(&entry)) return maybe_entry;
2676   }
2677   entry->set_aliased_context_slot(aliased_context_slot);
2678   return entry;
2679 }
2680
2681
2682 const Heap::StringTypeTable Heap::string_type_table[] = {
2683 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2684   {type, size, k##camel_name##MapRootIndex},
2685   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2686 #undef STRING_TYPE_ELEMENT
2687 };
2688
2689
2690 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2691 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2692   {contents, k##name##RootIndex},
2693   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2694 #undef CONSTANT_STRING_ELEMENT
2695 };
2696
2697
2698 const Heap::StructTable Heap::struct_table[] = {
2699 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2700   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2701   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2702 #undef STRUCT_TABLE_ELEMENT
2703 };
2704
2705
2706 bool Heap::CreateInitialMaps() {
2707   Object* obj;
2708   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2709     if (!maybe_obj->ToObject(&obj)) return false;
2710   }
2711   // Map::cast cannot be used due to uninitialized map field.
2712   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2713   set_meta_map(new_meta_map);
2714   new_meta_map->set_map(new_meta_map);
2715
2716   { MaybeObject* maybe_obj =
2717         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2718     if (!maybe_obj->ToObject(&obj)) return false;
2719   }
2720   set_fixed_array_map(Map::cast(obj));
2721
2722   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2723     if (!maybe_obj->ToObject(&obj)) return false;
2724   }
2725   set_oddball_map(Map::cast(obj));
2726
2727   { MaybeObject* maybe_obj =
2728         AllocatePartialMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
2729     if (!maybe_obj->ToObject(&obj)) return false;
2730   }
2731   set_constant_pool_array_map(Map::cast(obj));
2732
2733   // Allocate the empty array.
2734   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2735     if (!maybe_obj->ToObject(&obj)) return false;
2736   }
2737   set_empty_fixed_array(FixedArray::cast(obj));
2738
2739   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2740     if (!maybe_obj->ToObject(&obj)) return false;
2741   }
2742   set_null_value(Oddball::cast(obj));
2743   Oddball::cast(obj)->set_kind(Oddball::kNull);
2744
2745   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2746     if (!maybe_obj->ToObject(&obj)) return false;
2747   }
2748   set_undefined_value(Oddball::cast(obj));
2749   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2750   ASSERT(!InNewSpace(undefined_value()));
2751
2752   // Allocate the empty descriptor array.
2753   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2754     if (!maybe_obj->ToObject(&obj)) return false;
2755   }
2756   set_empty_descriptor_array(DescriptorArray::cast(obj));
2757
2758   // Allocate the constant pool array.
2759   { MaybeObject* maybe_obj = AllocateEmptyConstantPoolArray();
2760     if (!maybe_obj->ToObject(&obj)) return false;
2761   }
2762   set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
2763
2764   // Fix the instance_descriptors for the existing maps.
2765   meta_map()->set_code_cache(empty_fixed_array());
2766   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2767   meta_map()->init_back_pointer(undefined_value());
2768   meta_map()->set_instance_descriptors(empty_descriptor_array());
2769
2770   fixed_array_map()->set_code_cache(empty_fixed_array());
2771   fixed_array_map()->set_dependent_code(
2772       DependentCode::cast(empty_fixed_array()));
2773   fixed_array_map()->init_back_pointer(undefined_value());
2774   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2775
2776   oddball_map()->set_code_cache(empty_fixed_array());
2777   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2778   oddball_map()->init_back_pointer(undefined_value());
2779   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2780
2781   constant_pool_array_map()->set_code_cache(empty_fixed_array());
2782   constant_pool_array_map()->set_dependent_code(
2783       DependentCode::cast(empty_fixed_array()));
2784   constant_pool_array_map()->init_back_pointer(undefined_value());
2785   constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
2786
2787   // Fix prototype object for existing maps.
2788   meta_map()->set_prototype(null_value());
2789   meta_map()->set_constructor(null_value());
2790
2791   fixed_array_map()->set_prototype(null_value());
2792   fixed_array_map()->set_constructor(null_value());
2793
2794   oddball_map()->set_prototype(null_value());
2795   oddball_map()->set_constructor(null_value());
2796
2797   constant_pool_array_map()->set_prototype(null_value());
2798   constant_pool_array_map()->set_constructor(null_value());
2799
2800   { // Map allocation
2801 #define ALLOCATE_MAP(instance_type, size, field_name)                          \
2802     { Map* map;                                                                \
2803       if (!AllocateMap((instance_type), size)->To(&map)) return false;         \
2804       set_##field_name##_map(map);                                             \
2805     }
2806
2807 #define ALLOCATE_VARSIZE_MAP(instance_type, field_name)                        \
2808     ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
2809
2810     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
2811     ASSERT(fixed_array_map() != fixed_cow_array_map());
2812
2813     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
2814     ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
2815     ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
2816     ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
2817
2818     for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2819       const StringTypeTable& entry = string_type_table[i];
2820       { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2821         if (!maybe_obj->ToObject(&obj)) return false;
2822       }
2823       roots_[entry.index] = Map::cast(obj);
2824     }
2825
2826     ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
2827     undetectable_string_map()->set_is_undetectable();
2828
2829     ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
2830     undetectable_ascii_string_map()->set_is_undetectable();
2831
2832     ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
2833     ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
2834     ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
2835
2836 #define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)            \
2837     ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize,   \
2838         external_##type##_array)
2839
2840      TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
2841 #undef ALLOCATE_EXTERNAL_ARRAY_MAP
2842
2843 #define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size)         \
2844     ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE,                           \
2845         fixed_##type##_array)
2846
2847      TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
2848 #undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
2849
2850     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, non_strict_arguments_elements)
2851
2852     ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
2853
2854     ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
2855     ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
2856     ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
2857     ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
2858
2859
2860     for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2861       const StructTable& entry = struct_table[i];
2862       Map* map;
2863       if (!AllocateMap(entry.type, entry.size)->To(&map))
2864         return false;
2865       roots_[entry.index] = map;
2866     }
2867
2868     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
2869
2870     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
2871     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
2872     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
2873     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
2874     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
2875     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
2876
2877     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
2878     native_context_map()->set_dictionary_map(true);
2879     native_context_map()->set_visitor_id(
2880         StaticVisitorBase::kVisitNativeContext);
2881
2882     ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
2883         shared_function_info)
2884
2885     ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
2886         message_object)
2887     ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
2888         external)
2889     external_map()->set_is_extensible(false);
2890 #undef ALLOCATE_VARSIZE_MAP
2891 #undef ALLOCATE_MAP
2892   }
2893
2894   { // Empty arrays
2895     { ByteArray* byte_array;
2896       if (!AllocateByteArray(0, TENURED)->To(&byte_array)) return false;
2897       set_empty_byte_array(byte_array);
2898     }
2899
2900 #define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)           \
2901     { ExternalArray* obj;                                                      \
2902       if (!AllocateEmptyExternalArray(kExternal##Type##Array)->To(&obj))       \
2903           return false;                                                        \
2904       set_empty_external_##type##_array(obj);                                  \
2905     }
2906
2907     TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
2908 #undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
2909   }
2910   ASSERT(!InNewSpace(empty_fixed_array()));
2911   return true;
2912 }
2913
2914
2915 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2916   // Statically ensure that it is safe to allocate heap numbers in paged
2917   // spaces.
2918   int size = HeapNumber::kSize;
2919   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
2920
2921   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2922
2923   Object* result;
2924   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2925     if (!maybe_result->ToObject(&result)) return maybe_result;
2926   }
2927
2928   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2929   HeapNumber::cast(result)->set_value(value);
2930   return result;
2931 }
2932
2933
2934 MaybeObject* Heap::AllocateCell(Object* value) {
2935   int size = Cell::kSize;
2936   STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
2937
2938   Object* result;
2939   { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2940     if (!maybe_result->ToObject(&result)) return maybe_result;
2941   }
2942   HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2943   Cell::cast(result)->set_value(value);
2944   return result;
2945 }
2946
2947
2948 MaybeObject* Heap::AllocatePropertyCell() {
2949   int size = PropertyCell::kSize;
2950   STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
2951
2952   Object* result;
2953   MaybeObject* maybe_result =
2954       AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2955   if (!maybe_result->ToObject(&result)) return maybe_result;
2956
2957   HeapObject::cast(result)->set_map_no_write_barrier(
2958       global_property_cell_map());
2959   PropertyCell* cell = PropertyCell::cast(result);
2960   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2961                            SKIP_WRITE_BARRIER);
2962   cell->set_value(the_hole_value());
2963   cell->set_type(HeapType::None());
2964   return result;
2965 }
2966
2967
2968 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2969   Box* result;
2970   MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2971   if (!maybe_result->To(&result)) return maybe_result;
2972   result->set_value(value);
2973   return result;
2974 }
2975
2976
2977 MaybeObject* Heap::AllocateAllocationSite() {
2978   AllocationSite* site;
2979   MaybeObject* maybe_result = Allocate(allocation_site_map(),
2980                                        OLD_POINTER_SPACE);
2981   if (!maybe_result->To(&site)) return maybe_result;
2982   site->Initialize();
2983
2984   // Link the site
2985   site->set_weak_next(allocation_sites_list());
2986   set_allocation_sites_list(site);
2987   return site;
2988 }
2989
2990
2991 MaybeObject* Heap::CreateOddball(const char* to_string,
2992                                  Object* to_number,
2993                                  byte kind) {
2994   Object* result;
2995   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2996     if (!maybe_result->ToObject(&result)) return maybe_result;
2997   }
2998   return Oddball::cast(result)->Initialize(this, to_string, to_number, kind);
2999 }
3000
3001
3002 bool Heap::CreateApiObjects() {
3003   Object* obj;
3004
3005   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3006     if (!maybe_obj->ToObject(&obj)) return false;
3007   }
3008   // Don't use Smi-only elements optimizations for objects with the neander
3009   // map. There are too many cases where element values are set directly with a
3010   // bottleneck to trap the Smi-only -> fast elements transition, and there
3011   // appears to be no benefit for optimize this case.
3012   Map* new_neander_map = Map::cast(obj);
3013   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
3014   set_neander_map(new_neander_map);
3015
3016   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
3017     if (!maybe_obj->ToObject(&obj)) return false;
3018   }
3019   Object* elements;
3020   { MaybeObject* maybe_elements = AllocateFixedArray(2);
3021     if (!maybe_elements->ToObject(&elements)) return false;
3022   }
3023   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
3024   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
3025   set_message_listeners(JSObject::cast(obj));
3026
3027   return true;
3028 }
3029
3030
3031 void Heap::CreateJSEntryStub() {
3032   JSEntryStub stub;
3033   set_js_entry_code(*stub.GetCode(isolate()));
3034 }
3035
3036
3037 void Heap::CreateJSConstructEntryStub() {
3038   JSConstructEntryStub stub;
3039   set_js_construct_entry_code(*stub.GetCode(isolate()));
3040 }
3041
3042
3043 void Heap::CreateFixedStubs() {
3044   // Here we create roots for fixed stubs. They are needed at GC
3045   // for cooking and uncooking (check out frames.cc).
3046   // The eliminates the need for doing dictionary lookup in the
3047   // stub cache for these stubs.
3048   HandleScope scope(isolate());
3049   // gcc-4.4 has problem generating correct code of following snippet:
3050   // {  JSEntryStub stub;
3051   //    js_entry_code_ = *stub.GetCode();
3052   // }
3053   // {  JSConstructEntryStub stub;
3054   //    js_construct_entry_code_ = *stub.GetCode();
3055   // }
3056   // To workaround the problem, make separate functions without inlining.
3057   Heap::CreateJSEntryStub();
3058   Heap::CreateJSConstructEntryStub();
3059
3060   // Create stubs that should be there, so we don't unexpectedly have to
3061   // create them if we need them during the creation of another stub.
3062   // Stub creation mixes raw pointers and handles in an unsafe manner so
3063   // we cannot create stubs while we are creating stubs.
3064   CodeStub::GenerateStubsAheadOfTime(isolate());
3065 }
3066
3067
3068 void Heap::CreateStubsRequiringBuiltins() {
3069   HandleScope scope(isolate());
3070   CodeStub::GenerateStubsRequiringBuiltinsAheadOfTime(isolate());
3071 }
3072
3073
3074 bool Heap::CreateInitialObjects() {
3075   Object* obj;
3076
3077   // The -0 value must be set before NumberFromDouble works.
3078   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3079     if (!maybe_obj->ToObject(&obj)) return false;
3080   }
3081   set_minus_zero_value(HeapNumber::cast(obj));
3082   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3083
3084   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3085     if (!maybe_obj->ToObject(&obj)) return false;
3086   }
3087   set_nan_value(HeapNumber::cast(obj));
3088
3089   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3090     if (!maybe_obj->ToObject(&obj)) return false;
3091   }
3092   set_infinity_value(HeapNumber::cast(obj));
3093
3094   // The hole has not been created yet, but we want to put something
3095   // predictable in the gaps in the string table, so lets make that Smi zero.
3096   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3097
3098   // Allocate initial string table.
3099   { MaybeObject* maybe_obj =
3100         StringTable::Allocate(this, kInitialStringTableSize);
3101     if (!maybe_obj->ToObject(&obj)) return false;
3102   }
3103   // Don't use set_string_table() due to asserts.
3104   roots_[kStringTableRootIndex] = obj;
3105
3106   // Finish initializing oddballs after creating the string table.
3107   { MaybeObject* maybe_obj =
3108         undefined_value()->Initialize(this,
3109                                       "undefined",
3110                                       nan_value(),
3111                                       Oddball::kUndefined);
3112     if (!maybe_obj->ToObject(&obj)) return false;
3113   }
3114
3115   // Initialize the null_value.
3116   { MaybeObject* maybe_obj = null_value()->Initialize(
3117       this, "null", Smi::FromInt(0), Oddball::kNull);
3118     if (!maybe_obj->ToObject(&obj)) return false;
3119   }
3120
3121   { MaybeObject* maybe_obj = CreateOddball("true",
3122                                            Smi::FromInt(1),
3123                                            Oddball::kTrue);
3124     if (!maybe_obj->ToObject(&obj)) return false;
3125   }
3126   set_true_value(Oddball::cast(obj));
3127
3128   { MaybeObject* maybe_obj = CreateOddball("false",
3129                                            Smi::FromInt(0),
3130                                            Oddball::kFalse);
3131     if (!maybe_obj->ToObject(&obj)) return false;
3132   }
3133   set_false_value(Oddball::cast(obj));
3134
3135   { MaybeObject* maybe_obj = CreateOddball("hole",
3136                                            Smi::FromInt(-1),
3137                                            Oddball::kTheHole);
3138     if (!maybe_obj->ToObject(&obj)) return false;
3139   }
3140   set_the_hole_value(Oddball::cast(obj));
3141
3142   { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3143                                            Smi::FromInt(-1),
3144                                            Oddball::kUninitialized);
3145     if (!maybe_obj->ToObject(&obj)) return false;
3146   }
3147   set_uninitialized_value(Oddball::cast(obj));
3148
3149   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3150                                            Smi::FromInt(-4),
3151                                            Oddball::kArgumentMarker);
3152     if (!maybe_obj->ToObject(&obj)) return false;
3153   }
3154   set_arguments_marker(Oddball::cast(obj));
3155
3156   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3157                                            Smi::FromInt(-2),
3158                                            Oddball::kOther);
3159     if (!maybe_obj->ToObject(&obj)) return false;
3160   }
3161   set_no_interceptor_result_sentinel(obj);
3162
3163   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3164                                            Smi::FromInt(-3),
3165                                            Oddball::kOther);
3166     if (!maybe_obj->ToObject(&obj)) return false;
3167   }
3168   set_termination_exception(obj);
3169
3170   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3171     { MaybeObject* maybe_obj =
3172           InternalizeUtf8String(constant_string_table[i].contents);
3173       if (!maybe_obj->ToObject(&obj)) return false;
3174     }
3175     roots_[constant_string_table[i].index] = String::cast(obj);
3176   }
3177
3178   // Allocate the hidden string which is used to identify the hidden properties
3179   // in JSObjects. The hash code has a special value so that it will not match
3180   // the empty string when searching for the property. It cannot be part of the
3181   // loop above because it needs to be allocated manually with the special
3182   // hash code in place. The hash code for the hidden_string is zero to ensure
3183   // that it will always be at the first entry in property descriptors.
3184   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3185       OneByteVector("", 0), String::kEmptyStringHash);
3186     if (!maybe_obj->ToObject(&obj)) return false;
3187   }
3188   hidden_string_ = String::cast(obj);
3189
3190   // Allocate the code_stubs dictionary. The initial size is set to avoid
3191   // expanding the dictionary during bootstrapping.
3192   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3193     if (!maybe_obj->ToObject(&obj)) return false;
3194   }
3195   set_code_stubs(UnseededNumberDictionary::cast(obj));
3196
3197
3198   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3199   // is set to avoid expanding the dictionary during bootstrapping.
3200   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3201     if (!maybe_obj->ToObject(&obj)) return false;
3202   }
3203   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3204
3205   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3206     if (!maybe_obj->ToObject(&obj)) return false;
3207   }
3208   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3209
3210   set_instanceof_cache_function(Smi::FromInt(0));
3211   set_instanceof_cache_map(Smi::FromInt(0));
3212   set_instanceof_cache_answer(Smi::FromInt(0));
3213
3214   CreateFixedStubs();
3215
3216   // Allocate the dictionary of intrinsic function names.
3217   { MaybeObject* maybe_obj =
3218         NameDictionary::Allocate(this, Runtime::kNumFunctions);
3219     if (!maybe_obj->ToObject(&obj)) return false;
3220   }
3221   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3222                                                                        obj);
3223     if (!maybe_obj->ToObject(&obj)) return false;
3224   }
3225   set_intrinsic_function_names(NameDictionary::cast(obj));
3226
3227   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3228     if (!maybe_obj->ToObject(&obj)) return false;
3229   }
3230   set_number_string_cache(FixedArray::cast(obj));
3231
3232   // Allocate cache for single character one byte strings.
3233   { MaybeObject* maybe_obj =
3234         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3235     if (!maybe_obj->ToObject(&obj)) return false;
3236   }
3237   set_single_character_string_cache(FixedArray::cast(obj));
3238
3239   // Allocate cache for string split.
3240   { MaybeObject* maybe_obj = AllocateFixedArray(
3241       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3242     if (!maybe_obj->ToObject(&obj)) return false;
3243   }
3244   set_string_split_cache(FixedArray::cast(obj));
3245
3246   { MaybeObject* maybe_obj = AllocateFixedArray(
3247       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3248     if (!maybe_obj->ToObject(&obj)) return false;
3249   }
3250   set_regexp_multiple_cache(FixedArray::cast(obj));
3251
3252   // Allocate cache for external strings pointing to native source code.
3253   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3254     if (!maybe_obj->ToObject(&obj)) return false;
3255   }
3256   set_natives_source_cache(FixedArray::cast(obj));
3257
3258   { MaybeObject* maybe_obj = AllocateCell(undefined_value());
3259     if (!maybe_obj->ToObject(&obj)) return false;
3260   }
3261   set_undefined_cell(Cell::cast(obj));
3262
3263   // Allocate object to hold object observation state.
3264   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3265     if (!maybe_obj->ToObject(&obj)) return false;
3266   }
3267   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3268     if (!maybe_obj->ToObject(&obj)) return false;
3269   }
3270   set_observation_state(JSObject::cast(obj));
3271
3272   { MaybeObject* maybe_obj = AllocateSymbol();
3273     if (!maybe_obj->ToObject(&obj)) return false;
3274   }
3275   Symbol::cast(obj)->set_is_private(true);
3276   set_frozen_symbol(Symbol::cast(obj));
3277
3278   { MaybeObject* maybe_obj = AllocateSymbol();
3279     if (!maybe_obj->ToObject(&obj)) return false;
3280   }
3281   Symbol::cast(obj)->set_is_private(true);
3282   set_elements_transition_symbol(Symbol::cast(obj));
3283
3284   { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3285     if (!maybe_obj->ToObject(&obj)) return false;
3286   }
3287   SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3288   set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3289
3290   { MaybeObject* maybe_obj = AllocateSymbol();
3291     if (!maybe_obj->ToObject(&obj)) return false;
3292   }
3293   Symbol::cast(obj)->set_is_private(true);
3294   set_observed_symbol(Symbol::cast(obj));
3295
3296   // Handling of script id generation is in Factory::NewScript.
3297   set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3298
3299   // Initialize keyed lookup cache.
3300   isolate_->keyed_lookup_cache()->Clear();
3301
3302   // Initialize context slot cache.
3303   isolate_->context_slot_cache()->Clear();
3304
3305   // Initialize descriptor cache.
3306   isolate_->descriptor_lookup_cache()->Clear();
3307
3308   // Initialize compilation cache.
3309   isolate_->compilation_cache()->Clear();
3310
3311   return true;
3312 }
3313
3314
3315 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3316   RootListIndex writable_roots[] = {
3317     kStoreBufferTopRootIndex,
3318     kStackLimitRootIndex,
3319     kNumberStringCacheRootIndex,
3320     kInstanceofCacheFunctionRootIndex,
3321     kInstanceofCacheMapRootIndex,
3322     kInstanceofCacheAnswerRootIndex,
3323     kCodeStubsRootIndex,
3324     kNonMonomorphicCacheRootIndex,
3325     kPolymorphicCodeCacheRootIndex,
3326     kLastScriptIdRootIndex,
3327     kEmptyScriptRootIndex,
3328     kRealStackLimitRootIndex,
3329     kArgumentsAdaptorDeoptPCOffsetRootIndex,
3330     kConstructStubDeoptPCOffsetRootIndex,
3331     kGetterStubDeoptPCOffsetRootIndex,
3332     kSetterStubDeoptPCOffsetRootIndex,
3333     kStringTableRootIndex,
3334   };
3335
3336   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3337     if (root_index == writable_roots[i])
3338       return true;
3339   }
3340   return false;
3341 }
3342
3343
3344 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
3345   return !RootCanBeWrittenAfterInitialization(root_index) &&
3346       !InNewSpace(roots_array_start()[root_index]);
3347 }
3348
3349
3350 Object* RegExpResultsCache::Lookup(Heap* heap,
3351                                    String* key_string,
3352                                    Object* key_pattern,
3353                                    ResultsCacheType type) {
3354   FixedArray* cache;
3355   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3356   if (type == STRING_SPLIT_SUBSTRINGS) {
3357     ASSERT(key_pattern->IsString());
3358     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3359     cache = heap->string_split_cache();
3360   } else {
3361     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3362     ASSERT(key_pattern->IsFixedArray());
3363     cache = heap->regexp_multiple_cache();
3364   }
3365
3366   uint32_t hash = key_string->Hash();
3367   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3368       ~(kArrayEntriesPerCacheEntry - 1));
3369   if (cache->get(index + kStringOffset) == key_string &&
3370       cache->get(index + kPatternOffset) == key_pattern) {
3371     return cache->get(index + kArrayOffset);
3372   }
3373   index =
3374       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3375   if (cache->get(index + kStringOffset) == key_string &&
3376       cache->get(index + kPatternOffset) == key_pattern) {
3377     return cache->get(index + kArrayOffset);
3378   }
3379   return Smi::FromInt(0);
3380 }
3381
3382
3383 void RegExpResultsCache::Enter(Heap* heap,
3384                                String* key_string,
3385                                Object* key_pattern,
3386                                FixedArray* value_array,
3387                                ResultsCacheType type) {
3388   FixedArray* cache;
3389   if (!key_string->IsInternalizedString()) return;
3390   if (type == STRING_SPLIT_SUBSTRINGS) {
3391     ASSERT(key_pattern->IsString());
3392     if (!key_pattern->IsInternalizedString()) return;
3393     cache = heap->string_split_cache();
3394   } else {
3395     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3396     ASSERT(key_pattern->IsFixedArray());
3397     cache = heap->regexp_multiple_cache();
3398   }
3399
3400   uint32_t hash = key_string->Hash();
3401   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3402       ~(kArrayEntriesPerCacheEntry - 1));
3403   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3404     cache->set(index + kStringOffset, key_string);
3405     cache->set(index + kPatternOffset, key_pattern);
3406     cache->set(index + kArrayOffset, value_array);
3407   } else {
3408     uint32_t index2 =
3409         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3410     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3411       cache->set(index2 + kStringOffset, key_string);
3412       cache->set(index2 + kPatternOffset, key_pattern);
3413       cache->set(index2 + kArrayOffset, value_array);
3414     } else {
3415       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3416       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3417       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3418       cache->set(index + kStringOffset, key_string);
3419       cache->set(index + kPatternOffset, key_pattern);
3420       cache->set(index + kArrayOffset, value_array);
3421     }
3422   }
3423   // If the array is a reasonably short list of substrings, convert it into a
3424   // list of internalized strings.
3425   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3426     for (int i = 0; i < value_array->length(); i++) {
3427       String* str = String::cast(value_array->get(i));
3428       Object* internalized_str;
3429       MaybeObject* maybe_string = heap->InternalizeString(str);
3430       if (maybe_string->ToObject(&internalized_str)) {
3431         value_array->set(i, internalized_str);
3432       }
3433     }
3434   }
3435   // Convert backing store to a copy-on-write array.
3436   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3437 }
3438
3439
3440 void RegExpResultsCache::Clear(FixedArray* cache) {
3441   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3442     cache->set(i, Smi::FromInt(0));
3443   }
3444 }
3445
3446
3447 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3448   MaybeObject* maybe_obj =
3449       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3450   return maybe_obj;
3451 }
3452
3453
3454 int Heap::FullSizeNumberStringCacheLength() {
3455   // Compute the size of the number string cache based on the max newspace size.
3456   // The number string cache has a minimum size based on twice the initial cache
3457   // size to ensure that it is bigger after being made 'full size'.
3458   int number_string_cache_size = max_semispace_size_ / 512;
3459   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3460                                  Min(0x4000, number_string_cache_size));
3461   // There is a string and a number per entry so the length is twice the number
3462   // of entries.
3463   return number_string_cache_size * 2;
3464 }
3465
3466
3467 void Heap::AllocateFullSizeNumberStringCache() {
3468   // The idea is to have a small number string cache in the snapshot to keep
3469   // boot-time memory usage down.  If we expand the number string cache already
3470   // while creating the snapshot then that didn't work out.
3471   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3472   MaybeObject* maybe_obj =
3473       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3474   Object* new_cache;
3475   if (maybe_obj->ToObject(&new_cache)) {
3476     // We don't bother to repopulate the cache with entries from the old cache.
3477     // It will be repopulated soon enough with new strings.
3478     set_number_string_cache(FixedArray::cast(new_cache));
3479   }
3480   // If allocation fails then we just return without doing anything.  It is only
3481   // a cache, so best effort is OK here.
3482 }
3483
3484
3485 void Heap::FlushNumberStringCache() {
3486   // Flush the number to string cache.
3487   int len = number_string_cache()->length();
3488   for (int i = 0; i < len; i++) {
3489     number_string_cache()->set_undefined(i);
3490   }
3491 }
3492
3493
3494 static inline int double_get_hash(double d) {
3495   DoubleRepresentation rep(d);
3496   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3497 }
3498
3499
3500 static inline int smi_get_hash(Smi* smi) {
3501   return smi->value();
3502 }
3503
3504
3505 Object* Heap::GetNumberStringCache(Object* number) {
3506   int hash;
3507   int mask = (number_string_cache()->length() >> 1) - 1;
3508   if (number->IsSmi()) {
3509     hash = smi_get_hash(Smi::cast(number)) & mask;
3510   } else {
3511     hash = double_get_hash(number->Number()) & mask;
3512   }
3513   Object* key = number_string_cache()->get(hash * 2);
3514   if (key == number) {
3515     return String::cast(number_string_cache()->get(hash * 2 + 1));
3516   } else if (key->IsHeapNumber() &&
3517              number->IsHeapNumber() &&
3518              key->Number() == number->Number()) {
3519     return String::cast(number_string_cache()->get(hash * 2 + 1));
3520   }
3521   return undefined_value();
3522 }
3523
3524
3525 void Heap::SetNumberStringCache(Object* number, String* string) {
3526   int hash;
3527   int mask = (number_string_cache()->length() >> 1) - 1;
3528   if (number->IsSmi()) {
3529     hash = smi_get_hash(Smi::cast(number)) & mask;
3530   } else {
3531     hash = double_get_hash(number->Number()) & mask;
3532   }
3533   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3534       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3535     // The first time we have a hash collision, we move to the full sized
3536     // number string cache.
3537     AllocateFullSizeNumberStringCache();
3538     return;
3539   }
3540   number_string_cache()->set(hash * 2, number);
3541   number_string_cache()->set(hash * 2 + 1, string);
3542 }
3543
3544
3545 MaybeObject* Heap::NumberToString(Object* number,
3546                                   bool check_number_string_cache) {
3547   isolate_->counters()->number_to_string_runtime()->Increment();
3548   if (check_number_string_cache) {
3549     Object* cached = GetNumberStringCache(number);
3550     if (cached != undefined_value()) {
3551       return cached;
3552     }
3553   }
3554
3555   char arr[100];
3556   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3557   const char* str;
3558   if (number->IsSmi()) {
3559     int num = Smi::cast(number)->value();
3560     str = IntToCString(num, buffer);
3561   } else {
3562     double num = HeapNumber::cast(number)->value();
3563     str = DoubleToCString(num, buffer);
3564   }
3565
3566   Object* js_string;
3567
3568   // We tenure the allocated string since it is referenced from the
3569   // number-string cache which lives in the old space.
3570   MaybeObject* maybe_js_string =
3571       AllocateStringFromOneByte(CStrVector(str), TENURED);
3572   if (maybe_js_string->ToObject(&js_string)) {
3573     SetNumberStringCache(number, String::cast(js_string));
3574   }
3575   return maybe_js_string;
3576 }
3577
3578
3579 MaybeObject* Heap::Uint32ToString(uint32_t value,
3580                                   bool check_number_string_cache) {
3581   Object* number;
3582   MaybeObject* maybe = NumberFromUint32(value);
3583   if (!maybe->To<Object>(&number)) return maybe;
3584   return NumberToString(number, check_number_string_cache);
3585 }
3586
3587
3588 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3589   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3590 }
3591
3592
3593 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3594     ExternalArrayType array_type) {
3595   switch (array_type) {
3596 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)               \
3597     case kExternal##Type##Array:                                              \
3598       return kExternal##Type##ArrayMapRootIndex;
3599
3600     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3601 #undef ARRAY_TYPE_TO_ROOT_INDEX
3602
3603     default:
3604       UNREACHABLE();
3605       return kUndefinedValueRootIndex;
3606   }
3607 }
3608
3609
3610 Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
3611   return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
3612 }
3613
3614
3615 Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
3616     ExternalArrayType array_type) {
3617   switch (array_type) {
3618 #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)               \
3619     case kExternal##Type##Array:                                              \
3620       return kFixed##Type##ArrayMapRootIndex;
3621
3622     TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
3623 #undef ARRAY_TYPE_TO_ROOT_INDEX
3624
3625     default:
3626       UNREACHABLE();
3627       return kUndefinedValueRootIndex;
3628   }
3629 }
3630
3631
3632 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3633     ElementsKind elementsKind) {
3634   switch (elementsKind) {
3635 #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)             \
3636     case EXTERNAL_##TYPE##_ELEMENTS:                                          \
3637       return kEmptyExternal##Type##ArrayRootIndex;
3638
3639     TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
3640 #undef ELEMENT_KIND_TO_ROOT_INDEX
3641
3642     default:
3643       UNREACHABLE();
3644       return kUndefinedValueRootIndex;
3645   }
3646 }
3647
3648
3649 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3650   return ExternalArray::cast(
3651       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3652 }
3653
3654
3655 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3656   // We need to distinguish the minus zero value and this cannot be
3657   // done after conversion to int. Doing this by comparing bit
3658   // patterns is faster than using fpclassify() et al.
3659   if (IsMinusZero(value)) {
3660     return AllocateHeapNumber(-0.0, pretenure);
3661   }
3662
3663   int int_value = FastD2I(value);
3664   if (value == int_value && Smi::IsValid(int_value)) {
3665     return Smi::FromInt(int_value);
3666   }
3667
3668   // Materialize the value in the heap.
3669   return AllocateHeapNumber(value, pretenure);
3670 }
3671
3672
3673 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3674   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3675   STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
3676   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3677   Foreign* result;
3678   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3679   if (!maybe_result->To(&result)) return maybe_result;
3680   result->set_foreign_address(address);
3681   return result;
3682 }
3683
3684
3685 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3686   SharedFunctionInfo* share;
3687   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3688   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3689
3690   // Set pointer fields.
3691   share->set_name(name);
3692   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3693   share->set_code(illegal);
3694   share->set_optimized_code_map(Smi::FromInt(0));
3695   share->set_scope_info(ScopeInfo::Empty(isolate_));
3696   Code* construct_stub =
3697       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3698   share->set_construct_stub(construct_stub);
3699   share->set_instance_class_name(Object_string());
3700   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3701   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3702   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3703   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3704   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3705   share->set_ast_node_count(0);
3706   share->set_counters(0);
3707
3708   // Set integer fields (smi or int, depending on the architecture).
3709   share->set_length(0);
3710   share->set_formal_parameter_count(0);
3711   share->set_expected_nof_properties(0);
3712   share->set_num_literals(0);
3713   share->set_start_position_and_type(0);
3714   share->set_end_position(0);
3715   share->set_function_token_position(0);
3716   // All compiler hints default to false or 0.
3717   share->set_compiler_hints(0);
3718   share->set_opt_count_and_bailout_reason(0);
3719
3720   return share;
3721 }
3722
3723
3724 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3725                                            JSArray* arguments,
3726                                            int start_position,
3727                                            int end_position,
3728                                            Object* script,
3729                                            Object* stack_trace,
3730                                            Object* stack_frames) {
3731   Object* result;
3732   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3733     if (!maybe_result->ToObject(&result)) return maybe_result;
3734   }
3735   JSMessageObject* message = JSMessageObject::cast(result);
3736   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3737   message->initialize_elements();
3738   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3739   message->set_type(type);
3740   message->set_arguments(arguments);
3741   message->set_start_position(start_position);
3742   message->set_end_position(end_position);
3743   message->set_script(script);
3744   message->set_stack_trace(stack_trace);
3745   message->set_stack_frames(stack_frames);
3746   return result;
3747 }
3748
3749
3750 MaybeObject* Heap::AllocateExternalStringFromAscii(
3751     const ExternalAsciiString::Resource* resource) {
3752   size_t length = resource->length();
3753   if (length > static_cast<size_t>(String::kMaxLength)) {
3754     isolate()->context()->mark_out_of_memory();
3755     return Failure::OutOfMemoryException(0x5);
3756   }
3757
3758   Map* map = external_ascii_string_map();
3759   Object* result;
3760   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3761     if (!maybe_result->ToObject(&result)) return maybe_result;
3762   }
3763
3764   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3765   external_string->set_length(static_cast<int>(length));
3766   external_string->set_hash_field(String::kEmptyHashField);
3767   external_string->set_resource(resource);
3768
3769   return result;
3770 }
3771
3772
3773 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3774     const ExternalTwoByteString::Resource* resource) {
3775   size_t length = resource->length();
3776   if (length > static_cast<size_t>(String::kMaxLength)) {
3777     isolate()->context()->mark_out_of_memory();
3778     return Failure::OutOfMemoryException(0x6);
3779   }
3780
3781   // For small strings we check whether the resource contains only
3782   // one byte characters.  If yes, we use a different string map.
3783   static const size_t kOneByteCheckLengthLimit = 32;
3784   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3785       String::IsOneByte(resource->data(), static_cast<int>(length));
3786   Map* map = is_one_byte ?
3787       external_string_with_one_byte_data_map() : external_string_map();
3788   Object* result;
3789   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3790     if (!maybe_result->ToObject(&result)) return maybe_result;
3791   }
3792
3793   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3794   external_string->set_length(static_cast<int>(length));
3795   external_string->set_hash_field(String::kEmptyHashField);
3796   external_string->set_resource(resource);
3797
3798   return result;
3799 }
3800
3801
3802 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3803   if (code <= String::kMaxOneByteCharCode) {
3804     Object* value = single_character_string_cache()->get(code);
3805     if (value != undefined_value()) return value;
3806
3807     uint8_t buffer[1];
3808     buffer[0] = static_cast<uint8_t>(code);
3809     Object* result;
3810     OneByteStringKey key(Vector<const uint8_t>(buffer, 1), HashSeed());
3811     MaybeObject* maybe_result = InternalizeStringWithKey(&key);
3812
3813     if (!maybe_result->ToObject(&result)) return maybe_result;
3814     single_character_string_cache()->set(code, result);
3815     return result;
3816   }
3817
3818   SeqTwoByteString* result;
3819   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3820     if (!maybe_result->To<SeqTwoByteString>(&result)) return maybe_result;
3821   }
3822   result->SeqTwoByteStringSet(0, code);
3823   return result;
3824 }
3825
3826
3827 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3828   if (length < 0 || length > ByteArray::kMaxLength) {
3829     return Failure::OutOfMemoryException(0x7);
3830   }
3831   int size = ByteArray::SizeFor(length);
3832   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3833   Object* result;
3834   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3835     if (!maybe_result->ToObject(&result)) return maybe_result;
3836   }
3837
3838   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3839       byte_array_map());
3840   reinterpret_cast<ByteArray*>(result)->set_length(length);
3841   return result;
3842 }
3843
3844
3845 void Heap::CreateFillerObjectAt(Address addr, int size) {
3846   if (size == 0) return;
3847   HeapObject* filler = HeapObject::FromAddress(addr);
3848   if (size == kPointerSize) {
3849     filler->set_map_no_write_barrier(one_pointer_filler_map());
3850   } else if (size == 2 * kPointerSize) {
3851     filler->set_map_no_write_barrier(two_pointer_filler_map());
3852   } else {
3853     filler->set_map_no_write_barrier(free_space_map());
3854     FreeSpace::cast(filler)->set_size(size);
3855   }
3856 }
3857
3858
3859 MaybeObject* Heap::AllocateExternalArray(int length,
3860                                          ExternalArrayType array_type,
3861                                          void* external_pointer,
3862                                          PretenureFlag pretenure) {
3863   int size = ExternalArray::kAlignedSize;
3864   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3865   Object* result;
3866   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3867     if (!maybe_result->ToObject(&result)) return maybe_result;
3868   }
3869
3870   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
3871       MapForExternalArrayType(array_type));
3872   reinterpret_cast<ExternalArray*>(result)->set_length(length);
3873   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
3874       external_pointer);
3875
3876   return result;
3877 }
3878
3879 static void ForFixedTypedArray(ExternalArrayType array_type,
3880                                int* element_size,
3881                                ElementsKind* element_kind) {
3882   switch (array_type) {
3883 #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)                       \
3884     case kExternal##Type##Array:                                              \
3885       *element_size = size;                                                   \
3886       *element_kind = TYPE##_ELEMENTS;                                        \
3887       return;
3888
3889     TYPED_ARRAYS(TYPED_ARRAY_CASE)
3890 #undef TYPED_ARRAY_CASE
3891
3892     default:
3893       *element_size = 0;  // Bogus
3894       *element_kind = UINT8_ELEMENTS;  // Bogus
3895       UNREACHABLE();
3896   }
3897 }
3898
3899
3900 MaybeObject* Heap::AllocateFixedTypedArray(int length,
3901                                            ExternalArrayType array_type,
3902                                            PretenureFlag pretenure) {
3903   int element_size;
3904   ElementsKind elements_kind;
3905   ForFixedTypedArray(array_type, &element_size, &elements_kind);
3906   int size = OBJECT_POINTER_ALIGN(
3907       length * element_size + FixedTypedArrayBase::kDataOffset);
3908 #ifndef V8_HOST_ARCH_64_BIT
3909   if (array_type == kExternalFloat64Array) {
3910     size += kPointerSize;
3911   }
3912 #endif
3913   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
3914
3915   HeapObject* object;
3916   MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
3917   if (!maybe_object->To(&object)) return maybe_object;
3918
3919   if (array_type == kExternalFloat64Array) {
3920     object = EnsureDoubleAligned(this, object, size);
3921   }
3922
3923   FixedTypedArrayBase* elements =
3924       reinterpret_cast<FixedTypedArrayBase*>(object);
3925   elements->set_map(MapForFixedTypedArray(array_type));
3926   elements->set_length(length);
3927   return elements;
3928 }
3929
3930
3931 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
3932                               Code::Flags flags,
3933                               Handle<Object> self_reference,
3934                               bool immovable,
3935                               bool crankshafted,
3936                               int prologue_offset) {
3937   // Allocate ByteArray before the Code object, so that we do not risk
3938   // leaving uninitialized Code object (and breaking the heap).
3939   ByteArray* reloc_info;
3940   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
3941   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
3942
3943   // Compute size.
3944   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
3945   int obj_size = Code::SizeFor(body_size);
3946   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
3947   MaybeObject* maybe_result;
3948   // Large code objects and code objects which should stay at a fixed address
3949   // are allocated in large object space.
3950   HeapObject* result;
3951   bool force_lo_space = obj_size > code_space()->AreaSize();
3952   if (force_lo_space) {
3953     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3954   } else {
3955     maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
3956   }
3957   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3958
3959   if (immovable && !force_lo_space &&
3960       // Objects on the first page of each space are never moved.
3961       !code_space_->FirstPage()->Contains(result->address())) {
3962     // Discard the first code allocation, which was on a page where it could be
3963     // moved.
3964     CreateFillerObjectAt(result->address(), obj_size);
3965     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3966     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
3967   }
3968
3969   // Initialize the object
3970   result->set_map_no_write_barrier(code_map());
3971   Code* code = Code::cast(result);
3972   ASSERT(!isolate_->code_range()->exists() ||
3973       isolate_->code_range()->contains(code->address()));
3974   code->set_instruction_size(desc.instr_size);
3975   code->set_relocation_info(reloc_info);
3976   code->set_flags(flags);
3977   code->set_raw_kind_specific_flags1(0);
3978   code->set_raw_kind_specific_flags2(0);
3979   if (code->is_call_stub() || code->is_keyed_call_stub()) {
3980     code->set_check_type(RECEIVER_MAP_CHECK);
3981   }
3982   code->set_is_crankshafted(crankshafted);
3983   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
3984   code->set_raw_type_feedback_info(undefined_value());
3985   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
3986   code->set_gc_metadata(Smi::FromInt(0));
3987   code->set_ic_age(global_ic_age_);
3988   code->set_prologue_offset(prologue_offset);
3989   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
3990     code->set_marked_for_deoptimization(false);
3991   }
3992   code->set_constant_pool(empty_constant_pool_array());
3993
3994 #ifdef ENABLE_DEBUGGER_SUPPORT
3995   if (code->kind() == Code::FUNCTION) {
3996     code->set_has_debug_break_slots(
3997         isolate_->debugger()->IsDebuggerActive());
3998   }
3999 #endif
4000
4001   // Allow self references to created code object by patching the handle to
4002   // point to the newly allocated Code object.
4003   if (!self_reference.is_null()) {
4004     *(self_reference.location()) = code;
4005   }
4006   // Migrate generated code.
4007   // The generated code can contain Object** values (typically from handles)
4008   // that are dereferenced during the copy to point directly to the actual heap
4009   // objects. These pointers can include references to the code object itself,
4010   // through the self_reference parameter.
4011   code->CopyFrom(desc);
4012
4013 #ifdef VERIFY_HEAP
4014   if (FLAG_verify_heap) {
4015     code->Verify();
4016   }
4017 #endif
4018   return code;
4019 }
4020
4021
4022 MaybeObject* Heap::CopyCode(Code* code) {
4023   // Allocate an object the same size as the code object.
4024   int obj_size = code->Size();
4025   MaybeObject* maybe_result;
4026   if (obj_size > code_space()->AreaSize()) {
4027     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4028   } else {
4029     maybe_result = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
4030   }
4031
4032   Object* result;
4033   if (!maybe_result->ToObject(&result)) return maybe_result;
4034
4035   // Copy code object.
4036   Address old_addr = code->address();
4037   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4038   CopyBlock(new_addr, old_addr, obj_size);
4039   // Relocate the copy.
4040   Code* new_code = Code::cast(result);
4041   ASSERT(!isolate_->code_range()->exists() ||
4042       isolate_->code_range()->contains(code->address()));
4043   new_code->Relocate(new_addr - old_addr);
4044   return new_code;
4045 }
4046
4047
4048 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4049   // Allocate ByteArray before the Code object, so that we do not risk
4050   // leaving uninitialized Code object (and breaking the heap).
4051   Object* reloc_info_array;
4052   { MaybeObject* maybe_reloc_info_array =
4053         AllocateByteArray(reloc_info.length(), TENURED);
4054     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4055       return maybe_reloc_info_array;
4056     }
4057   }
4058
4059   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4060
4061   int new_obj_size = Code::SizeFor(new_body_size);
4062
4063   Address old_addr = code->address();
4064
4065   size_t relocation_offset =
4066       static_cast<size_t>(code->instruction_end() - old_addr);
4067
4068   MaybeObject* maybe_result;
4069   if (new_obj_size > code_space()->AreaSize()) {
4070     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4071   } else {
4072     maybe_result = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
4073   }
4074
4075   Object* result;
4076   if (!maybe_result->ToObject(&result)) return maybe_result;
4077
4078   // Copy code object.
4079   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4080
4081   // Copy header and instructions.
4082   CopyBytes(new_addr, old_addr, relocation_offset);
4083
4084   Code* new_code = Code::cast(result);
4085   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4086
4087   // Copy patched rinfo.
4088   CopyBytes(new_code->relocation_start(),
4089             reloc_info.start(),
4090             static_cast<size_t>(reloc_info.length()));
4091
4092   // Relocate the copy.
4093   ASSERT(!isolate_->code_range()->exists() ||
4094       isolate_->code_range()->contains(code->address()));
4095   new_code->Relocate(new_addr - old_addr);
4096
4097 #ifdef VERIFY_HEAP
4098   if (FLAG_verify_heap) {
4099     code->Verify();
4100   }
4101 #endif
4102   return new_code;
4103 }
4104
4105
4106 void Heap::InitializeAllocationMemento(AllocationMemento* memento,
4107                                        AllocationSite* allocation_site) {
4108   memento->set_map_no_write_barrier(allocation_memento_map());
4109   ASSERT(allocation_site->map() == allocation_site_map());
4110   memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
4111   if (FLAG_allocation_site_pretenuring) {
4112     allocation_site->IncrementMementoCreateCount();
4113   }
4114 }
4115
4116
4117 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4118     Handle<AllocationSite> allocation_site) {
4119   ASSERT(gc_state_ == NOT_IN_GC);
4120   ASSERT(map->instance_type() != MAP_TYPE);
4121   // If allocation failures are disallowed, we may allocate in a different
4122   // space when new space is full and the object is not a large object.
4123   AllocationSpace retry_space =
4124       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4125   int size = map->instance_size() + AllocationMemento::kSize;
4126   Object* result;
4127   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4128   if (!maybe_result->ToObject(&result)) return maybe_result;
4129   // No need for write barrier since object is white and map is in old space.
4130   HeapObject::cast(result)->set_map_no_write_barrier(map);
4131   AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4132       reinterpret_cast<Address>(result) + map->instance_size());
4133   InitializeAllocationMemento(alloc_memento, *allocation_site);
4134   return result;
4135 }
4136
4137
4138 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4139   ASSERT(gc_state_ == NOT_IN_GC);
4140   ASSERT(map->instance_type() != MAP_TYPE);
4141   // If allocation failures are disallowed, we may allocate in a different
4142   // space when new space is full and the object is not a large object.
4143   AllocationSpace retry_space =
4144       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4145   int size = map->instance_size();
4146   Object* result;
4147   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4148   if (!maybe_result->ToObject(&result)) return maybe_result;
4149   // No need for write barrier since object is white and map is in old space.
4150   HeapObject::cast(result)->set_map_no_write_barrier(map);
4151   return result;
4152 }
4153
4154
4155 void Heap::InitializeFunction(JSFunction* function,
4156                               SharedFunctionInfo* shared,
4157                               Object* prototype) {
4158   ASSERT(!prototype->IsMap());
4159   function->initialize_properties();
4160   function->initialize_elements();
4161   function->set_shared(shared);
4162   function->set_code(shared->code());
4163   function->set_prototype_or_initial_map(prototype);
4164   function->set_context(undefined_value());
4165   function->set_literals_or_bindings(empty_fixed_array());
4166   function->set_next_function_link(undefined_value());
4167 }
4168
4169
4170 MaybeObject* Heap::AllocateFunction(Map* function_map,
4171                                     SharedFunctionInfo* shared,
4172                                     Object* prototype,
4173                                     PretenureFlag pretenure) {
4174   AllocationSpace space =
4175       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4176   Object* result;
4177   { MaybeObject* maybe_result = Allocate(function_map, space);
4178     if (!maybe_result->ToObject(&result)) return maybe_result;
4179   }
4180   InitializeFunction(JSFunction::cast(result), shared, prototype);
4181   return result;
4182 }
4183
4184
4185 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4186   // To get fast allocation and map sharing for arguments objects we
4187   // allocate them based on an arguments boilerplate.
4188
4189   JSObject* boilerplate;
4190   int arguments_object_size;
4191   bool strict_mode_callee = callee->IsJSFunction() &&
4192       !JSFunction::cast(callee)->shared()->is_classic_mode();
4193   if (strict_mode_callee) {
4194     boilerplate =
4195         isolate()->context()->native_context()->
4196             strict_mode_arguments_boilerplate();
4197     arguments_object_size = kArgumentsObjectSizeStrict;
4198   } else {
4199     boilerplate =
4200         isolate()->context()->native_context()->arguments_boilerplate();
4201     arguments_object_size = kArgumentsObjectSize;
4202   }
4203
4204   // Check that the size of the boilerplate matches our
4205   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4206   // on the size being a known constant.
4207   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4208
4209   // Do the allocation.
4210   Object* result;
4211   { MaybeObject* maybe_result =
4212         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4213     if (!maybe_result->ToObject(&result)) return maybe_result;
4214   }
4215
4216   // Copy the content. The arguments boilerplate doesn't have any
4217   // fields that point to new space so it's safe to skip the write
4218   // barrier here.
4219   CopyBlock(HeapObject::cast(result)->address(),
4220             boilerplate->address(),
4221             JSObject::kHeaderSize);
4222
4223   // Set the length property.
4224   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4225                                                 Smi::FromInt(length),
4226                                                 SKIP_WRITE_BARRIER);
4227   // Set the callee property for non-strict mode arguments object only.
4228   if (!strict_mode_callee) {
4229     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4230                                                   callee);
4231   }
4232
4233   // Check the state of the object
4234   ASSERT(JSObject::cast(result)->HasFastProperties());
4235   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4236
4237   return result;
4238 }
4239
4240
4241 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4242                                      FixedArray* properties,
4243                                      Map* map) {
4244   obj->set_properties(properties);
4245   obj->initialize_elements();
4246   // TODO(1240798): Initialize the object's body using valid initial values
4247   // according to the object's initial map.  For example, if the map's
4248   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4249   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4250   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4251   // verification code has to cope with (temporarily) invalid objects.  See
4252   // for example, JSArray::JSArrayVerify).
4253   Object* filler;
4254   // We cannot always fill with one_pointer_filler_map because objects
4255   // created from API functions expect their internal fields to be initialized
4256   // with undefined_value.
4257   // Pre-allocated fields need to be initialized with undefined_value as well
4258   // so that object accesses before the constructor completes (e.g. in the
4259   // debugger) will not cause a crash.
4260   if (map->constructor()->IsJSFunction() &&
4261       JSFunction::cast(map->constructor())->shared()->
4262           IsInobjectSlackTrackingInProgress()) {
4263     // We might want to shrink the object later.
4264     ASSERT(obj->GetInternalFieldCount() == 0);
4265     filler = Heap::one_pointer_filler_map();
4266   } else {
4267     filler = Heap::undefined_value();
4268   }
4269   obj->InitializeBody(map, Heap::undefined_value(), filler);
4270 }
4271
4272
4273 MaybeObject* Heap::AllocateJSObjectFromMap(
4274     Map* map, PretenureFlag pretenure, bool allocate_properties) {
4275   // JSFunctions should be allocated using AllocateFunction to be
4276   // properly initialized.
4277   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4278
4279   // Both types of global objects should be allocated using
4280   // AllocateGlobalObject to be properly initialized.
4281   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4282   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4283
4284   // Allocate the backing storage for the properties.
4285   FixedArray* properties;
4286   if (allocate_properties) {
4287     int prop_size = map->InitialPropertiesLength();
4288     ASSERT(prop_size >= 0);
4289     { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4290       if (!maybe_properties->To(&properties)) return maybe_properties;
4291     }
4292   } else {
4293     properties = empty_fixed_array();
4294   }
4295
4296   // Allocate the JSObject.
4297   int size = map->instance_size();
4298   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4299   Object* obj;
4300   MaybeObject* maybe_obj = Allocate(map, space);
4301   if (!maybe_obj->To(&obj)) return maybe_obj;
4302
4303   // Initialize the JSObject.
4304   InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4305   ASSERT(JSObject::cast(obj)->HasFastElements() ||
4306          JSObject::cast(obj)->HasExternalArrayElements());
4307   return obj;
4308 }
4309
4310
4311 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
4312     Map* map, Handle<AllocationSite> allocation_site) {
4313   // JSFunctions should be allocated using AllocateFunction to be
4314   // properly initialized.
4315   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4316
4317   // Both types of global objects should be allocated using
4318   // AllocateGlobalObject to be properly initialized.
4319   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4320   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4321
4322   // Allocate the backing storage for the properties.
4323   int prop_size = map->InitialPropertiesLength();
4324   ASSERT(prop_size >= 0);
4325   FixedArray* properties;
4326   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4327     if (!maybe_properties->To(&properties)) return maybe_properties;
4328   }
4329
4330   // Allocate the JSObject.
4331   int size = map->instance_size();
4332   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
4333   Object* obj;
4334   MaybeObject* maybe_obj =
4335       AllocateWithAllocationSite(map, space, allocation_site);
4336   if (!maybe_obj->To(&obj)) return maybe_obj;
4337
4338   // Initialize the JSObject.
4339   InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4340   ASSERT(JSObject::cast(obj)->HasFastElements());
4341   return obj;
4342 }
4343
4344
4345 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4346                                     PretenureFlag pretenure) {
4347   ASSERT(constructor->has_initial_map());
4348   // Allocate the object based on the constructors initial map.
4349   MaybeObject* result = AllocateJSObjectFromMap(
4350       constructor->initial_map(), pretenure);
4351 #ifdef DEBUG
4352   // Make sure result is NOT a global object if valid.
4353   Object* non_failure;
4354   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4355 #endif
4356   return result;
4357 }
4358
4359
4360 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4361     Handle<AllocationSite> allocation_site) {
4362   ASSERT(constructor->has_initial_map());
4363   // Allocate the object based on the constructors initial map, or the payload
4364   // advice
4365   Map* initial_map = constructor->initial_map();
4366
4367   ElementsKind to_kind = allocation_site->GetElementsKind();
4368   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4369   if (to_kind != initial_map->elements_kind()) {
4370     MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4371     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4372     // Possibly alter the mode, since we found an updated elements kind
4373     // in the type info cell.
4374     mode = AllocationSite::GetMode(to_kind);
4375   }
4376
4377   MaybeObject* result;
4378   if (mode == TRACK_ALLOCATION_SITE) {
4379     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4380         allocation_site);
4381   } else {
4382     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4383   }
4384 #ifdef DEBUG
4385   // Make sure result is NOT a global object if valid.
4386   Object* non_failure;
4387   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4388 #endif
4389   return result;
4390 }
4391
4392
4393 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4394   // Allocate a fresh map. Modules do not have a prototype.
4395   Map* map;
4396   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4397   if (!maybe_map->To(&map)) return maybe_map;
4398   // Allocate the object based on the map.
4399   JSModule* module;
4400   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4401   if (!maybe_module->To(&module)) return maybe_module;
4402   module->set_context(context);
4403   module->set_scope_info(scope_info);
4404   return module;
4405 }
4406
4407
4408 MaybeObject* Heap::AllocateJSArrayAndStorage(
4409     ElementsKind elements_kind,
4410     int length,
4411     int capacity,
4412     ArrayStorageAllocationMode mode,
4413     PretenureFlag pretenure) {
4414   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4415   JSArray* array;
4416   if (!maybe_array->To(&array)) return maybe_array;
4417
4418   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4419   // for performance reasons.
4420   ASSERT(capacity >= length);
4421
4422   if (capacity == 0) {
4423     array->set_length(Smi::FromInt(0));
4424     array->set_elements(empty_fixed_array());
4425     return array;
4426   }
4427
4428   FixedArrayBase* elms;
4429   MaybeObject* maybe_elms = NULL;
4430   if (IsFastDoubleElementsKind(elements_kind)) {
4431     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4432       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4433     } else {
4434       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4435       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4436     }
4437   } else {
4438     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4439     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4440       maybe_elms = AllocateUninitializedFixedArray(capacity);
4441     } else {
4442       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4443       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4444     }
4445   }
4446   if (!maybe_elms->To(&elms)) return maybe_elms;
4447
4448   array->set_elements(elms);
4449   array->set_length(Smi::FromInt(length));
4450   return array;
4451 }
4452
4453
4454 MaybeObject* Heap::AllocateJSArrayStorage(
4455     JSArray* array,
4456     int length,
4457     int capacity,
4458     ArrayStorageAllocationMode mode) {
4459   ASSERT(capacity >= length);
4460
4461   if (capacity == 0) {
4462     array->set_length(Smi::FromInt(0));
4463     array->set_elements(empty_fixed_array());
4464     return array;
4465   }
4466
4467   FixedArrayBase* elms;
4468   MaybeObject* maybe_elms = NULL;
4469   ElementsKind elements_kind = array->GetElementsKind();
4470   if (IsFastDoubleElementsKind(elements_kind)) {
4471     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4472       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4473     } else {
4474       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4475       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4476     }
4477   } else {
4478     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4479     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4480       maybe_elms = AllocateUninitializedFixedArray(capacity);
4481     } else {
4482       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4483       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4484     }
4485   }
4486   if (!maybe_elms->To(&elms)) return maybe_elms;
4487
4488   array->set_elements(elms);
4489   array->set_length(Smi::FromInt(length));
4490   return array;
4491 }
4492
4493
4494 MaybeObject* Heap::AllocateJSArrayWithElements(
4495     FixedArrayBase* elements,
4496     ElementsKind elements_kind,
4497     int length,
4498     PretenureFlag pretenure) {
4499   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4500   JSArray* array;
4501   if (!maybe_array->To(&array)) return maybe_array;
4502
4503   array->set_elements(elements);
4504   array->set_length(Smi::FromInt(length));
4505   array->ValidateElements();
4506   return array;
4507 }
4508
4509
4510 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4511   // Allocate map.
4512   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4513   // maps. Will probably depend on the identity of the handler object, too.
4514   Map* map;
4515   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4516   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4517   map->set_prototype(prototype);
4518
4519   // Allocate the proxy object.
4520   JSProxy* result;
4521   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4522   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4523   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4524   result->set_handler(handler);
4525   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4526   return result;
4527 }
4528
4529
4530 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4531                                            Object* call_trap,
4532                                            Object* construct_trap,
4533                                            Object* prototype) {
4534   // Allocate map.
4535   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4536   // maps. Will probably depend on the identity of the handler object, too.
4537   Map* map;
4538   MaybeObject* maybe_map_obj =
4539       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4540   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4541   map->set_prototype(prototype);
4542
4543   // Allocate the proxy object.
4544   JSFunctionProxy* result;
4545   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4546   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4547   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4548   result->set_handler(handler);
4549   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4550   result->set_call_trap(call_trap);
4551   result->set_construct_trap(construct_trap);
4552   return result;
4553 }
4554
4555
4556 MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4557   // Never used to copy functions.  If functions need to be copied we
4558   // have to be careful to clear the literals array.
4559   SLOW_ASSERT(!source->IsJSFunction());
4560
4561   // Make the clone.
4562   Map* map = source->map();
4563   int object_size = map->instance_size();
4564   Object* clone;
4565
4566   ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
4567
4568   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4569
4570   // If we're forced to always allocate, we use the general allocation
4571   // functions which may leave us with an object in old space.
4572   if (always_allocate()) {
4573     { MaybeObject* maybe_clone =
4574           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4575       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4576     }
4577     Address clone_address = HeapObject::cast(clone)->address();
4578     CopyBlock(clone_address,
4579               source->address(),
4580               object_size);
4581     // Update write barrier for all fields that lie beyond the header.
4582     RecordWrites(clone_address,
4583                  JSObject::kHeaderSize,
4584                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4585   } else {
4586     wb_mode = SKIP_WRITE_BARRIER;
4587
4588     { int adjusted_object_size = site != NULL
4589           ? object_size + AllocationMemento::kSize
4590           : object_size;
4591       MaybeObject* maybe_clone =
4592           AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
4593       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4594     }
4595     SLOW_ASSERT(InNewSpace(clone));
4596     // Since we know the clone is allocated in new space, we can copy
4597     // the contents without worrying about updating the write barrier.
4598     CopyBlock(HeapObject::cast(clone)->address(),
4599               source->address(),
4600               object_size);
4601
4602     if (site != NULL) {
4603       AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4604           reinterpret_cast<Address>(clone) + object_size);
4605       InitializeAllocationMemento(alloc_memento, site);
4606     }
4607   }
4608
4609   SLOW_ASSERT(
4610       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4611   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4612   FixedArray* properties = FixedArray::cast(source->properties());
4613   // Update elements if necessary.
4614   if (elements->length() > 0) {
4615     Object* elem;
4616     { MaybeObject* maybe_elem;
4617       if (elements->map() == fixed_cow_array_map()) {
4618         maybe_elem = FixedArray::cast(elements);
4619       } else if (source->HasFastDoubleElements()) {
4620         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4621       } else {
4622         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4623       }
4624       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4625     }
4626     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4627   }
4628   // Update properties if necessary.
4629   if (properties->length() > 0) {
4630     Object* prop;
4631     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4632       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4633     }
4634     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4635   }
4636   // Return the new clone.
4637   return clone;
4638 }
4639
4640
4641 MaybeObject* Heap::ReinitializeJSReceiver(
4642     JSReceiver* object, InstanceType type, int size) {
4643   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4644
4645   // Allocate fresh map.
4646   // TODO(rossberg): Once we optimize proxies, cache these maps.
4647   Map* map;
4648   MaybeObject* maybe = AllocateMap(type, size);
4649   if (!maybe->To<Map>(&map)) return maybe;
4650
4651   // Check that the receiver has at least the size of the fresh object.
4652   int size_difference = object->map()->instance_size() - map->instance_size();
4653   ASSERT(size_difference >= 0);
4654
4655   map->set_prototype(object->map()->prototype());
4656
4657   // Allocate the backing storage for the properties.
4658   int prop_size = map->unused_property_fields() - map->inobject_properties();
4659   Object* properties;
4660   maybe = AllocateFixedArray(prop_size, TENURED);
4661   if (!maybe->ToObject(&properties)) return maybe;
4662
4663   // Functions require some allocation, which might fail here.
4664   SharedFunctionInfo* shared = NULL;
4665   if (type == JS_FUNCTION_TYPE) {
4666     String* name;
4667     OneByteStringKey key(STATIC_ASCII_VECTOR("<freezing call trap>"),
4668                          HashSeed());
4669     maybe = InternalizeStringWithKey(&key);
4670     if (!maybe->To<String>(&name)) return maybe;
4671     maybe = AllocateSharedFunctionInfo(name);
4672     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4673   }
4674
4675   // Because of possible retries of this function after failure,
4676   // we must NOT fail after this point, where we have changed the type!
4677
4678   // Reset the map for the object.
4679   object->set_map(map);
4680   JSObject* jsobj = JSObject::cast(object);
4681
4682   // Reinitialize the object from the constructor map.
4683   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
4684
4685   // Functions require some minimal initialization.
4686   if (type == JS_FUNCTION_TYPE) {
4687     map->set_function_with_prototype(true);
4688     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
4689     JSFunction::cast(object)->set_context(
4690         isolate()->context()->native_context());
4691   }
4692
4693   // Put in filler if the new object is smaller than the old.
4694   if (size_difference > 0) {
4695     CreateFillerObjectAt(
4696         object->address() + map->instance_size(), size_difference);
4697   }
4698
4699   return object;
4700 }
4701
4702
4703 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
4704                                              JSGlobalProxy* object) {
4705   ASSERT(constructor->has_initial_map());
4706   Map* map = constructor->initial_map();
4707
4708   // Check that the already allocated object has the same size and type as
4709   // objects allocated using the constructor.
4710   ASSERT(map->instance_size() == object->map()->instance_size());
4711   ASSERT(map->instance_type() == object->map()->instance_type());
4712
4713   // Allocate the backing storage for the properties.
4714   int prop_size = map->unused_property_fields() - map->inobject_properties();
4715   Object* properties;
4716   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
4717     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4718   }
4719
4720   // Reset the map for the object.
4721   object->set_map(constructor->initial_map());
4722
4723   // Reinitialize the object from the constructor map.
4724   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
4725   return object;
4726 }
4727
4728
4729 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
4730                                              PretenureFlag pretenure) {
4731   int length = string.length();
4732   if (length == 1) {
4733     return Heap::LookupSingleCharacterStringFromCode(string[0]);
4734   }
4735   Object* result;
4736   { MaybeObject* maybe_result =
4737         AllocateRawOneByteString(string.length(), pretenure);
4738     if (!maybe_result->ToObject(&result)) return maybe_result;
4739   }
4740
4741   // Copy the characters into the new object.
4742   CopyChars(SeqOneByteString::cast(result)->GetChars(),
4743             string.start(),
4744             length);
4745   return result;
4746 }
4747
4748
4749 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
4750                                               int non_ascii_start,
4751                                               PretenureFlag pretenure) {
4752   // Continue counting the number of characters in the UTF-8 string, starting
4753   // from the first non-ascii character or word.
4754   Access<UnicodeCache::Utf8Decoder>
4755       decoder(isolate_->unicode_cache()->utf8_decoder());
4756   decoder->Reset(string.start() + non_ascii_start,
4757                  string.length() - non_ascii_start);
4758   int utf16_length = decoder->Utf16Length();
4759   ASSERT(utf16_length > 0);
4760   // Allocate string.
4761   Object* result;
4762   {
4763     int chars = non_ascii_start + utf16_length;
4764     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
4765     if (!maybe_result->ToObject(&result)) return maybe_result;
4766   }
4767   // Convert and copy the characters into the new object.
4768   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
4769   // Copy ascii portion.
4770   uint16_t* data = twobyte->GetChars();
4771   if (non_ascii_start != 0) {
4772     const char* ascii_data = string.start();
4773     for (int i = 0; i < non_ascii_start; i++) {
4774       *data++ = *ascii_data++;
4775     }
4776   }
4777   // Now write the remainder.
4778   decoder->WriteUtf16(data, utf16_length);
4779   return result;
4780 }
4781
4782
4783 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
4784                                              PretenureFlag pretenure) {
4785   // Check if the string is an ASCII string.
4786   Object* result;
4787   int length = string.length();
4788   const uc16* start = string.start();
4789
4790   if (String::IsOneByte(start, length)) {
4791     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
4792     if (!maybe_result->ToObject(&result)) return maybe_result;
4793     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
4794   } else {  // It's not a one byte string.
4795     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
4796     if (!maybe_result->ToObject(&result)) return maybe_result;
4797     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
4798   }
4799   return result;
4800 }
4801
4802
4803 Map* Heap::InternalizedStringMapForString(String* string) {
4804   // If the string is in new space it cannot be used as internalized.
4805   if (InNewSpace(string)) return NULL;
4806
4807   // Find the corresponding internalized string map for strings.
4808   switch (string->map()->instance_type()) {
4809     case STRING_TYPE: return internalized_string_map();
4810     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
4811     case CONS_STRING_TYPE: return cons_internalized_string_map();
4812     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
4813     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
4814     case EXTERNAL_ASCII_STRING_TYPE:
4815       return external_ascii_internalized_string_map();
4816     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4817       return external_internalized_string_with_one_byte_data_map();
4818     case SHORT_EXTERNAL_STRING_TYPE:
4819       return short_external_internalized_string_map();
4820     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
4821       return short_external_ascii_internalized_string_map();
4822     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
4823       return short_external_internalized_string_with_one_byte_data_map();
4824     default: return NULL;  // No match found.
4825   }
4826 }
4827
4828
4829 static inline void WriteOneByteData(Vector<const char> vector,
4830                                     uint8_t* chars,
4831                                     int len) {
4832   // Only works for ascii.
4833   ASSERT(vector.length() == len);
4834   OS::MemCopy(chars, vector.start(), len);
4835 }
4836
4837 static inline void WriteTwoByteData(Vector<const char> vector,
4838                                     uint16_t* chars,
4839                                     int len) {
4840   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
4841   unsigned stream_length = vector.length();
4842   while (stream_length != 0) {
4843     unsigned consumed = 0;
4844     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
4845     ASSERT(c != unibrow::Utf8::kBadChar);
4846     ASSERT(consumed <= stream_length);
4847     stream_length -= consumed;
4848     stream += consumed;
4849     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
4850       len -= 2;
4851       if (len < 0) break;
4852       *chars++ = unibrow::Utf16::LeadSurrogate(c);
4853       *chars++ = unibrow::Utf16::TrailSurrogate(c);
4854     } else {
4855       len -= 1;
4856       if (len < 0) break;
4857       *chars++ = c;
4858     }
4859   }
4860   ASSERT(stream_length == 0);
4861   ASSERT(len == 0);
4862 }
4863
4864
4865 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
4866   ASSERT(s->length() == len);
4867   String::WriteToFlat(s, chars, 0, len);
4868 }
4869
4870
4871 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
4872   ASSERT(s->length() == len);
4873   String::WriteToFlat(s, chars, 0, len);
4874 }
4875
4876
4877 template<bool is_one_byte, typename T>
4878 MaybeObject* Heap::AllocateInternalizedStringImpl(
4879     T t, int chars, uint32_t hash_field) {
4880   ASSERT(chars >= 0);
4881   // Compute map and object size.
4882   int size;
4883   Map* map;
4884
4885   if (is_one_byte) {
4886     if (chars > SeqOneByteString::kMaxLength) {
4887       return Failure::OutOfMemoryException(0x9);
4888     }
4889     map = ascii_internalized_string_map();
4890     size = SeqOneByteString::SizeFor(chars);
4891   } else {
4892     if (chars > SeqTwoByteString::kMaxLength) {
4893       return Failure::OutOfMemoryException(0xa);
4894     }
4895     map = internalized_string_map();
4896     size = SeqTwoByteString::SizeFor(chars);
4897   }
4898   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
4899
4900   // Allocate string.
4901   Object* result;
4902   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4903     if (!maybe_result->ToObject(&result)) return maybe_result;
4904   }
4905
4906   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
4907   // Set length and hash fields of the allocated string.
4908   String* answer = String::cast(result);
4909   answer->set_length(chars);
4910   answer->set_hash_field(hash_field);
4911
4912   ASSERT_EQ(size, answer->Size());
4913
4914   if (is_one_byte) {
4915     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
4916   } else {
4917     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
4918   }
4919   return answer;
4920 }
4921
4922
4923 // Need explicit instantiations.
4924 template
4925 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
4926 template
4927 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
4928     String*, int, uint32_t);
4929 template
4930 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
4931     Vector<const char>, int, uint32_t);
4932
4933
4934 MaybeObject* Heap::AllocateRawOneByteString(int length,
4935                                             PretenureFlag pretenure) {
4936   if (length < 0 || length > SeqOneByteString::kMaxLength) {
4937     return Failure::OutOfMemoryException(0xb);
4938   }
4939   int size = SeqOneByteString::SizeFor(length);
4940   ASSERT(size <= SeqOneByteString::kMaxSize);
4941   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4942
4943   Object* result;
4944   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4945     if (!maybe_result->ToObject(&result)) return maybe_result;
4946   }
4947
4948   // Partially initialize the object.
4949   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
4950   String::cast(result)->set_length(length);
4951   String::cast(result)->set_hash_field(String::kEmptyHashField);
4952   ASSERT_EQ(size, HeapObject::cast(result)->Size());
4953
4954   return result;
4955 }
4956
4957
4958 MaybeObject* Heap::AllocateRawTwoByteString(int length,
4959                                             PretenureFlag pretenure) {
4960   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
4961     return Failure::OutOfMemoryException(0xc);
4962   }
4963   int size = SeqTwoByteString::SizeFor(length);
4964   ASSERT(size <= SeqTwoByteString::kMaxSize);
4965   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4966
4967   Object* result;
4968   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4969     if (!maybe_result->ToObject(&result)) return maybe_result;
4970   }
4971
4972   // Partially initialize the object.
4973   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
4974   String::cast(result)->set_length(length);
4975   String::cast(result)->set_hash_field(String::kEmptyHashField);
4976   ASSERT_EQ(size, HeapObject::cast(result)->Size());
4977   return result;
4978 }
4979
4980
4981 MaybeObject* Heap::AllocateJSArray(
4982     ElementsKind elements_kind,
4983     PretenureFlag pretenure) {
4984   Context* native_context = isolate()->context()->native_context();
4985   JSFunction* array_function = native_context->array_function();
4986   Map* map = array_function->initial_map();
4987   Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
4988   if (transition_map != NULL) map = transition_map;
4989   return AllocateJSObjectFromMap(map, pretenure);
4990 }
4991
4992
4993 MaybeObject* Heap::AllocateEmptyFixedArray() {
4994   int size = FixedArray::SizeFor(0);
4995   Object* result;
4996   { MaybeObject* maybe_result =
4997         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
4998     if (!maybe_result->ToObject(&result)) return maybe_result;
4999   }
5000   // Initialize the object.
5001   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5002       fixed_array_map());
5003   reinterpret_cast<FixedArray*>(result)->set_length(0);
5004   return result;
5005 }
5006
5007
5008 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5009   return AllocateExternalArray(0, array_type, NULL, TENURED);
5010 }
5011
5012
5013 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5014   int len = src->length();
5015   Object* obj;
5016   { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
5017     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5018   }
5019   if (InNewSpace(obj)) {
5020     HeapObject* dst = HeapObject::cast(obj);
5021     dst->set_map_no_write_barrier(map);
5022     CopyBlock(dst->address() + kPointerSize,
5023               src->address() + kPointerSize,
5024               FixedArray::SizeFor(len) - kPointerSize);
5025     return obj;
5026   }
5027   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5028   FixedArray* result = FixedArray::cast(obj);
5029   result->set_length(len);
5030
5031   // Copy the content
5032   DisallowHeapAllocation no_gc;
5033   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5034   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5035   return result;
5036 }
5037
5038
5039 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5040                                                Map* map) {
5041   int len = src->length();
5042   Object* obj;
5043   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5044     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5045   }
5046   HeapObject* dst = HeapObject::cast(obj);
5047   dst->set_map_no_write_barrier(map);
5048   CopyBlock(
5049       dst->address() + FixedDoubleArray::kLengthOffset,
5050       src->address() + FixedDoubleArray::kLengthOffset,
5051       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5052   return obj;
5053 }
5054
5055
5056 MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
5057                                                 Map* map) {
5058   int int64_entries = src->count_of_int64_entries();
5059   int ptr_entries = src->count_of_ptr_entries();
5060   int int32_entries = src->count_of_int32_entries();
5061   Object* obj;
5062   { MaybeObject* maybe_obj =
5063         AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
5064     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5065   }
5066   HeapObject* dst = HeapObject::cast(obj);
5067   dst->set_map_no_write_barrier(map);
5068   CopyBlock(
5069       dst->address() + ConstantPoolArray::kLengthOffset,
5070       src->address() + ConstantPoolArray::kLengthOffset,
5071       ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
5072           - ConstantPoolArray::kLengthOffset);
5073   return obj;
5074 }
5075
5076
5077 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5078   if (length < 0 || length > FixedArray::kMaxLength) {
5079     return Failure::OutOfMemoryException(0xe);
5080   }
5081   int size = FixedArray::SizeFor(length);
5082   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
5083
5084   return AllocateRaw(size, space, OLD_POINTER_SPACE);
5085 }
5086
5087
5088 MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
5089                                                 PretenureFlag pretenure,
5090                                                 Object* filler) {
5091   ASSERT(length >= 0);
5092   ASSERT(empty_fixed_array()->IsFixedArray());
5093   if (length == 0) return empty_fixed_array();
5094
5095   ASSERT(!InNewSpace(filler));
5096   Object* result;
5097   { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
5098     if (!maybe_result->ToObject(&result)) return maybe_result;
5099   }
5100
5101   HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
5102   FixedArray* array = FixedArray::cast(result);
5103   array->set_length(length);
5104   MemsetPointer(array->data_start(), filler, length);
5105   return array;
5106 }
5107
5108
5109 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5110   return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
5111 }
5112
5113
5114 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5115                                                PretenureFlag pretenure) {
5116   return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
5117 }
5118
5119
5120 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5121   if (length == 0) return empty_fixed_array();
5122
5123   Object* obj;
5124   { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
5125     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5126   }
5127
5128   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5129       fixed_array_map());
5130   FixedArray::cast(obj)->set_length(length);
5131   return obj;
5132 }
5133
5134
5135 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5136   int size = FixedDoubleArray::SizeFor(0);
5137   Object* result;
5138   { MaybeObject* maybe_result =
5139         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5140     if (!maybe_result->ToObject(&result)) return maybe_result;
5141   }
5142   // Initialize the object.
5143   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5144       fixed_double_array_map());
5145   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5146   return result;
5147 }
5148
5149
5150 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5151     int length,
5152     PretenureFlag pretenure) {
5153   if (length == 0) return empty_fixed_array();
5154
5155   Object* elements_object;
5156   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5157   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5158   FixedDoubleArray* elements =
5159       reinterpret_cast<FixedDoubleArray*>(elements_object);
5160
5161   elements->set_map_no_write_barrier(fixed_double_array_map());
5162   elements->set_length(length);
5163   return elements;
5164 }
5165
5166
5167 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5168     int length,
5169     PretenureFlag pretenure) {
5170   if (length == 0) return empty_fixed_array();
5171
5172   Object* elements_object;
5173   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5174   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5175   FixedDoubleArray* elements =
5176       reinterpret_cast<FixedDoubleArray*>(elements_object);
5177
5178   for (int i = 0; i < length; ++i) {
5179     elements->set_the_hole(i);
5180   }
5181
5182   elements->set_map_no_write_barrier(fixed_double_array_map());
5183   elements->set_length(length);
5184   return elements;
5185 }
5186
5187
5188 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5189                                                PretenureFlag pretenure) {
5190   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5191     return Failure::OutOfMemoryException(0xf);
5192   }
5193   int size = FixedDoubleArray::SizeFor(length);
5194 #ifndef V8_HOST_ARCH_64_BIT
5195   size += kPointerSize;
5196 #endif
5197   AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5198
5199   HeapObject* object;
5200   { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
5201     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5202   }
5203
5204   return EnsureDoubleAligned(this, object, size);
5205 }
5206
5207
5208 MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
5209                                              int number_of_ptr_entries,
5210                                              int number_of_int32_entries) {
5211   ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
5212          number_of_int32_entries > 0);
5213   int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
5214                                         number_of_ptr_entries,
5215                                         number_of_int32_entries);
5216 #ifndef V8_HOST_ARCH_64_BIT
5217   size += kPointerSize;
5218 #endif
5219   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5220
5221   HeapObject* object;
5222   { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
5223     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5224   }
5225   object = EnsureDoubleAligned(this, object, size);
5226   HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
5227
5228   ConstantPoolArray* constant_pool =
5229       reinterpret_cast<ConstantPoolArray*>(object);
5230   constant_pool->SetEntryCounts(number_of_int64_entries,
5231                                 number_of_ptr_entries,
5232                                 number_of_int32_entries);
5233   if (number_of_ptr_entries > 0) {
5234     MemsetPointer(
5235         HeapObject::RawField(
5236             constant_pool,
5237             constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
5238         undefined_value(),
5239         number_of_ptr_entries);
5240   }
5241   return constant_pool;
5242 }
5243
5244
5245 MaybeObject* Heap::AllocateEmptyConstantPoolArray() {
5246   int size = ConstantPoolArray::SizeFor(0, 0, 0);
5247   Object* result;
5248   { MaybeObject* maybe_result =
5249         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5250     if (!maybe_result->ToObject(&result)) return maybe_result;
5251   }
5252   HeapObject::cast(result)->set_map_no_write_barrier(constant_pool_array_map());
5253   ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0);
5254   return result;
5255 }
5256
5257
5258 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5259   Object* result;
5260   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5261     if (!maybe_result->ToObject(&result)) return maybe_result;
5262   }
5263   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5264       hash_table_map());
5265   ASSERT(result->IsHashTable());
5266   return result;
5267 }
5268
5269
5270 MaybeObject* Heap::AllocateSymbol() {
5271   // Statically ensure that it is safe to allocate symbols in paged spaces.
5272   STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
5273
5274   Object* result;
5275   MaybeObject* maybe =
5276       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5277   if (!maybe->ToObject(&result)) return maybe;
5278
5279   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5280
5281   // Generate a random hash value.
5282   int hash;
5283   int attempts = 0;
5284   do {
5285     hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
5286     attempts++;
5287   } while (hash == 0 && attempts < 30);
5288   if (hash == 0) hash = 1;  // never return 0
5289
5290   Symbol::cast(result)->set_hash_field(
5291       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5292   Symbol::cast(result)->set_name(undefined_value());
5293   Symbol::cast(result)->set_flags(Smi::FromInt(0));
5294
5295   ASSERT(!Symbol::cast(result)->is_private());
5296   return result;
5297 }
5298
5299
5300 MaybeObject* Heap::AllocatePrivateSymbol() {
5301   MaybeObject* maybe = AllocateSymbol();
5302   Symbol* symbol;
5303   if (!maybe->To(&symbol)) return maybe;
5304   symbol->set_is_private(true);
5305   return symbol;
5306 }
5307
5308
5309 MaybeObject* Heap::AllocateNativeContext() {
5310   Object* result;
5311   { MaybeObject* maybe_result =
5312         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5313     if (!maybe_result->ToObject(&result)) return maybe_result;
5314   }
5315   Context* context = reinterpret_cast<Context*>(result);
5316   context->set_map_no_write_barrier(native_context_map());
5317   context->set_js_array_maps(undefined_value());
5318   ASSERT(context->IsNativeContext());
5319   ASSERT(result->IsContext());
5320   return result;
5321 }
5322
5323
5324 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5325                                          ScopeInfo* scope_info) {
5326   Object* result;
5327   { MaybeObject* maybe_result =
5328         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5329     if (!maybe_result->ToObject(&result)) return maybe_result;
5330   }
5331   Context* context = reinterpret_cast<Context*>(result);
5332   context->set_map_no_write_barrier(global_context_map());
5333   context->set_closure(function);
5334   context->set_previous(function->context());
5335   context->set_extension(scope_info);
5336   context->set_global_object(function->context()->global_object());
5337   ASSERT(context->IsGlobalContext());
5338   ASSERT(result->IsContext());
5339   return context;
5340 }
5341
5342
5343 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5344   Object* result;
5345   { MaybeObject* maybe_result =
5346         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5347     if (!maybe_result->ToObject(&result)) return maybe_result;
5348   }
5349   Context* context = reinterpret_cast<Context*>(result);
5350   context->set_map_no_write_barrier(module_context_map());
5351   // Instance link will be set later.
5352   context->set_extension(Smi::FromInt(0));
5353   return context;
5354 }
5355
5356
5357 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5358   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5359   Object* result;
5360   { MaybeObject* maybe_result = AllocateFixedArray(length);
5361     if (!maybe_result->ToObject(&result)) return maybe_result;
5362   }
5363   Context* context = reinterpret_cast<Context*>(result);
5364   context->set_map_no_write_barrier(function_context_map());
5365   context->set_closure(function);
5366   context->set_previous(function->context());
5367   context->set_extension(Smi::FromInt(0));
5368   context->set_global_object(function->context()->global_object());
5369   return context;
5370 }
5371
5372
5373 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5374                                         Context* previous,
5375                                         String* name,
5376                                         Object* thrown_object) {
5377   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5378   Object* result;
5379   { MaybeObject* maybe_result =
5380         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5381     if (!maybe_result->ToObject(&result)) return maybe_result;
5382   }
5383   Context* context = reinterpret_cast<Context*>(result);
5384   context->set_map_no_write_barrier(catch_context_map());
5385   context->set_closure(function);
5386   context->set_previous(previous);
5387   context->set_extension(name);
5388   context->set_global_object(previous->global_object());
5389   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5390   return context;
5391 }
5392
5393
5394 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5395                                        Context* previous,
5396                                        JSReceiver* extension) {
5397   Object* result;
5398   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5399     if (!maybe_result->ToObject(&result)) return maybe_result;
5400   }
5401   Context* context = reinterpret_cast<Context*>(result);
5402   context->set_map_no_write_barrier(with_context_map());
5403   context->set_closure(function);
5404   context->set_previous(previous);
5405   context->set_extension(extension);
5406   context->set_global_object(previous->global_object());
5407   return context;
5408 }
5409
5410
5411 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5412                                         Context* previous,
5413                                         ScopeInfo* scope_info) {
5414   Object* result;
5415   { MaybeObject* maybe_result =
5416         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5417     if (!maybe_result->ToObject(&result)) return maybe_result;
5418   }
5419   Context* context = reinterpret_cast<Context*>(result);
5420   context->set_map_no_write_barrier(block_context_map());
5421   context->set_closure(function);
5422   context->set_previous(previous);
5423   context->set_extension(scope_info);
5424   context->set_global_object(previous->global_object());
5425   return context;
5426 }
5427
5428
5429 MaybeObject* Heap::AllocateScopeInfo(int length) {
5430   FixedArray* scope_info;
5431   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5432   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5433   scope_info->set_map_no_write_barrier(scope_info_map());
5434   return scope_info;
5435 }
5436
5437
5438 MaybeObject* Heap::AllocateExternal(void* value) {
5439   Foreign* foreign;
5440   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5441     if (!maybe_result->To(&foreign)) return maybe_result;
5442   }
5443   JSObject* external;
5444   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5445     if (!maybe_result->To(&external)) return maybe_result;
5446   }
5447   external->SetInternalField(0, foreign);
5448   return external;
5449 }
5450
5451
5452 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5453   Map* map;
5454   switch (type) {
5455 #define MAKE_CASE(NAME, Name, name) \
5456     case NAME##_TYPE: map = name##_map(); break;
5457 STRUCT_LIST(MAKE_CASE)
5458 #undef MAKE_CASE
5459     default:
5460       UNREACHABLE();
5461       return Failure::InternalError();
5462   }
5463   int size = map->instance_size();
5464   AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5465   Object* result;
5466   { MaybeObject* maybe_result = Allocate(map, space);
5467     if (!maybe_result->ToObject(&result)) return maybe_result;
5468   }
5469   Struct::cast(result)->InitializeBody(size);
5470   return result;
5471 }
5472
5473
5474 bool Heap::IsHeapIterable() {
5475   return (!old_pointer_space()->was_swept_conservatively() &&
5476           !old_data_space()->was_swept_conservatively());
5477 }
5478
5479
5480 void Heap::EnsureHeapIsIterable() {
5481   ASSERT(AllowHeapAllocation::IsAllowed());
5482   if (!IsHeapIterable()) {
5483     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5484   }
5485   ASSERT(IsHeapIterable());
5486 }
5487
5488
5489 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5490   incremental_marking()->Step(step_size,
5491                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5492
5493   if (incremental_marking()->IsComplete()) {
5494     bool uncommit = false;
5495     if (gc_count_at_last_idle_gc_ == gc_count_) {
5496       // No GC since the last full GC, the mutator is probably not active.
5497       isolate_->compilation_cache()->Clear();
5498       uncommit = true;
5499     }
5500     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5501     mark_sweeps_since_idle_round_started_++;
5502     gc_count_at_last_idle_gc_ = gc_count_;
5503     if (uncommit) {
5504       new_space_.Shrink();
5505       UncommitFromSpace();
5506     }
5507   }
5508 }
5509
5510
5511 bool Heap::IdleNotification(int hint) {
5512   // Hints greater than this value indicate that
5513   // the embedder is requesting a lot of GC work.
5514   const int kMaxHint = 1000;
5515   const int kMinHintForIncrementalMarking = 10;
5516   // Minimal hint that allows to do full GC.
5517   const int kMinHintForFullGC = 100;
5518   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5519   // The size factor is in range [5..250]. The numbers here are chosen from
5520   // experiments. If you changes them, make sure to test with
5521   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5522   intptr_t step_size =
5523       size_factor * IncrementalMarking::kAllocatedThreshold;
5524
5525   if (contexts_disposed_ > 0) {
5526     contexts_disposed_ = 0;
5527     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5528     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5529         incremental_marking()->IsStopped()) {
5530       HistogramTimerScope scope(isolate_->counters()->gc_context());
5531       CollectAllGarbage(kReduceMemoryFootprintMask,
5532                         "idle notification: contexts disposed");
5533     } else {
5534       AdvanceIdleIncrementalMarking(step_size);
5535     }
5536
5537     // After context disposal there is likely a lot of garbage remaining, reset
5538     // the idle notification counters in order to trigger more incremental GCs
5539     // on subsequent idle notifications.
5540     StartIdleRound();
5541     return false;
5542   }
5543
5544   if (!FLAG_incremental_marking || Serializer::enabled()) {
5545     return IdleGlobalGC();
5546   }
5547
5548   // By doing small chunks of GC work in each IdleNotification,
5549   // perform a round of incremental GCs and after that wait until
5550   // the mutator creates enough garbage to justify a new round.
5551   // An incremental GC progresses as follows:
5552   // 1. many incremental marking steps,
5553   // 2. one old space mark-sweep-compact,
5554   // 3. many lazy sweep steps.
5555   // Use mark-sweep-compact events to count incremental GCs in a round.
5556
5557   if (incremental_marking()->IsStopped()) {
5558     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5559         !IsSweepingComplete() &&
5560         !AdvanceSweepers(static_cast<int>(step_size))) {
5561       return false;
5562     }
5563   }
5564
5565   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5566     if (EnoughGarbageSinceLastIdleRound()) {
5567       StartIdleRound();
5568     } else {
5569       return true;
5570     }
5571   }
5572
5573   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5574                               mark_sweeps_since_idle_round_started_;
5575
5576   if (incremental_marking()->IsStopped()) {
5577     // If there are no more than two GCs left in this idle round and we are
5578     // allowed to do a full GC, then make those GCs full in order to compact
5579     // the code space.
5580     // TODO(ulan): Once we enable code compaction for incremental marking,
5581     // we can get rid of this special case and always start incremental marking.
5582     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5583       CollectAllGarbage(kReduceMemoryFootprintMask,
5584                         "idle notification: finalize idle round");
5585       mark_sweeps_since_idle_round_started_++;
5586     } else if (hint > kMinHintForIncrementalMarking) {
5587       incremental_marking()->Start();
5588     }
5589   }
5590   if (!incremental_marking()->IsStopped() &&
5591       hint > kMinHintForIncrementalMarking) {
5592     AdvanceIdleIncrementalMarking(step_size);
5593   }
5594
5595   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5596     FinishIdleRound();
5597     return true;
5598   }
5599
5600   return false;
5601 }
5602
5603
5604 bool Heap::IdleGlobalGC() {
5605   static const int kIdlesBeforeScavenge = 4;
5606   static const int kIdlesBeforeMarkSweep = 7;
5607   static const int kIdlesBeforeMarkCompact = 8;
5608   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5609   static const unsigned int kGCsBetweenCleanup = 4;
5610
5611   if (!last_idle_notification_gc_count_init_) {
5612     last_idle_notification_gc_count_ = gc_count_;
5613     last_idle_notification_gc_count_init_ = true;
5614   }
5615
5616   bool uncommit = true;
5617   bool finished = false;
5618
5619   // Reset the number of idle notifications received when a number of
5620   // GCs have taken place. This allows another round of cleanup based
5621   // on idle notifications if enough work has been carried out to
5622   // provoke a number of garbage collections.
5623   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5624     number_idle_notifications_ =
5625         Min(number_idle_notifications_ + 1, kMaxIdleCount);
5626   } else {
5627     number_idle_notifications_ = 0;
5628     last_idle_notification_gc_count_ = gc_count_;
5629   }
5630
5631   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5632     CollectGarbage(NEW_SPACE, "idle notification");
5633     new_space_.Shrink();
5634     last_idle_notification_gc_count_ = gc_count_;
5635   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5636     // Before doing the mark-sweep collections we clear the
5637     // compilation cache to avoid hanging on to source code and
5638     // generated code for cached functions.
5639     isolate_->compilation_cache()->Clear();
5640
5641     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5642     new_space_.Shrink();
5643     last_idle_notification_gc_count_ = gc_count_;
5644
5645   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5646     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5647     new_space_.Shrink();
5648     last_idle_notification_gc_count_ = gc_count_;
5649     number_idle_notifications_ = 0;
5650     finished = true;
5651   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5652     // If we have received more than kIdlesBeforeMarkCompact idle
5653     // notifications we do not perform any cleanup because we don't
5654     // expect to gain much by doing so.
5655     finished = true;
5656   }
5657
5658   if (uncommit) UncommitFromSpace();
5659
5660   return finished;
5661 }
5662
5663
5664 #ifdef DEBUG
5665
5666 void Heap::Print() {
5667   if (!HasBeenSetUp()) return;
5668   isolate()->PrintStack(stdout);
5669   AllSpaces spaces(this);
5670   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5671     space->Print();
5672   }
5673 }
5674
5675
5676 void Heap::ReportCodeStatistics(const char* title) {
5677   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5678   PagedSpace::ResetCodeStatistics(isolate());
5679   // We do not look for code in new space, map space, or old space.  If code
5680   // somehow ends up in those spaces, we would miss it here.
5681   code_space_->CollectCodeStatistics();
5682   lo_space_->CollectCodeStatistics();
5683   PagedSpace::ReportCodeStatistics(isolate());
5684 }
5685
5686
5687 // This function expects that NewSpace's allocated objects histogram is
5688 // populated (via a call to CollectStatistics or else as a side effect of a
5689 // just-completed scavenge collection).
5690 void Heap::ReportHeapStatistics(const char* title) {
5691   USE(title);
5692   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5693          title, gc_count_);
5694   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5695          old_generation_allocation_limit_);
5696
5697   PrintF("\n");
5698   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5699   isolate_->global_handles()->PrintStats();
5700   PrintF("\n");
5701
5702   PrintF("Heap statistics : ");
5703   isolate_->memory_allocator()->ReportStatistics();
5704   PrintF("To space : ");
5705   new_space_.ReportStatistics();
5706   PrintF("Old pointer space : ");
5707   old_pointer_space_->ReportStatistics();
5708   PrintF("Old data space : ");
5709   old_data_space_->ReportStatistics();
5710   PrintF("Code space : ");
5711   code_space_->ReportStatistics();
5712   PrintF("Map space : ");
5713   map_space_->ReportStatistics();
5714   PrintF("Cell space : ");
5715   cell_space_->ReportStatistics();
5716   PrintF("PropertyCell space : ");
5717   property_cell_space_->ReportStatistics();
5718   PrintF("Large object space : ");
5719   lo_space_->ReportStatistics();
5720   PrintF(">>>>>> ========================================= >>>>>>\n");
5721 }
5722
5723 #endif  // DEBUG
5724
5725 bool Heap::Contains(HeapObject* value) {
5726   return Contains(value->address());
5727 }
5728
5729
5730 bool Heap::Contains(Address addr) {
5731   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5732   return HasBeenSetUp() &&
5733     (new_space_.ToSpaceContains(addr) ||
5734      old_pointer_space_->Contains(addr) ||
5735      old_data_space_->Contains(addr) ||
5736      code_space_->Contains(addr) ||
5737      map_space_->Contains(addr) ||
5738      cell_space_->Contains(addr) ||
5739      property_cell_space_->Contains(addr) ||
5740      lo_space_->SlowContains(addr));
5741 }
5742
5743
5744 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
5745   return InSpace(value->address(), space);
5746 }
5747
5748
5749 bool Heap::InSpace(Address addr, AllocationSpace space) {
5750   if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
5751   if (!HasBeenSetUp()) return false;
5752
5753   switch (space) {
5754     case NEW_SPACE:
5755       return new_space_.ToSpaceContains(addr);
5756     case OLD_POINTER_SPACE:
5757       return old_pointer_space_->Contains(addr);
5758     case OLD_DATA_SPACE:
5759       return old_data_space_->Contains(addr);
5760     case CODE_SPACE:
5761       return code_space_->Contains(addr);
5762     case MAP_SPACE:
5763       return map_space_->Contains(addr);
5764     case CELL_SPACE:
5765       return cell_space_->Contains(addr);
5766     case PROPERTY_CELL_SPACE:
5767       return property_cell_space_->Contains(addr);
5768     case LO_SPACE:
5769       return lo_space_->SlowContains(addr);
5770   }
5771
5772   return false;
5773 }
5774
5775
5776 #ifdef VERIFY_HEAP
5777 void Heap::Verify() {
5778   CHECK(HasBeenSetUp());
5779
5780   store_buffer()->Verify();
5781
5782   VerifyPointersVisitor visitor;
5783   IterateRoots(&visitor, VISIT_ONLY_STRONG);
5784
5785   new_space_.Verify();
5786
5787   old_pointer_space_->Verify(&visitor);
5788   map_space_->Verify(&visitor);
5789
5790   VerifyPointersVisitor no_dirty_regions_visitor;
5791   old_data_space_->Verify(&no_dirty_regions_visitor);
5792   code_space_->Verify(&no_dirty_regions_visitor);
5793   cell_space_->Verify(&no_dirty_regions_visitor);
5794   property_cell_space_->Verify(&no_dirty_regions_visitor);
5795
5796   lo_space_->Verify();
5797 }
5798 #endif
5799
5800
5801 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
5802   Utf8StringKey key(string, HashSeed());
5803   return InternalizeStringWithKey(&key);
5804 }
5805
5806
5807 MaybeObject* Heap::InternalizeString(String* string) {
5808   if (string->IsInternalizedString()) return string;
5809   Object* result = NULL;
5810   Object* new_table;
5811   { MaybeObject* maybe_new_table =
5812         string_table()->LookupString(string, &result);
5813     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5814   }
5815   // Can't use set_string_table because StringTable::cast knows that
5816   // StringTable is a singleton and checks for identity.
5817   roots_[kStringTableRootIndex] = new_table;
5818   ASSERT(result != NULL);
5819   return result;
5820 }
5821
5822
5823 bool Heap::InternalizeStringIfExists(String* string, String** result) {
5824   if (string->IsInternalizedString()) {
5825     *result = string;
5826     return true;
5827   }
5828   return string_table()->LookupStringIfExists(string, result);
5829 }
5830
5831
5832 MaybeObject* Heap::InternalizeStringWithKey(HashTableKey* key) {
5833   Object* result = NULL;
5834   Object* new_table;
5835   { MaybeObject* maybe_new_table =
5836         string_table()->LookupKey(key, &result);
5837     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
5838   }
5839   // Can't use set_string_table because StringTable::cast knows that
5840   // StringTable is a singleton and checks for identity.
5841   roots_[kStringTableRootIndex] = new_table;
5842   ASSERT(result != NULL);
5843   return result;
5844 }
5845
5846
5847 void Heap::ZapFromSpace() {
5848   NewSpacePageIterator it(new_space_.FromSpaceStart(),
5849                           new_space_.FromSpaceEnd());
5850   while (it.has_next()) {
5851     NewSpacePage* page = it.next();
5852     for (Address cursor = page->area_start(), limit = page->area_end();
5853          cursor < limit;
5854          cursor += kPointerSize) {
5855       Memory::Address_at(cursor) = kFromSpaceZapValue;
5856     }
5857   }
5858 }
5859
5860
5861 void Heap::IterateAndMarkPointersToFromSpace(Address start,
5862                                              Address end,
5863                                              ObjectSlotCallback callback) {
5864   Address slot_address = start;
5865
5866   // We are not collecting slots on new space objects during mutation
5867   // thus we have to scan for pointers to evacuation candidates when we
5868   // promote objects. But we should not record any slots in non-black
5869   // objects. Grey object's slots would be rescanned.
5870   // White object might not survive until the end of collection
5871   // it would be a violation of the invariant to record it's slots.
5872   bool record_slots = false;
5873   if (incremental_marking()->IsCompacting()) {
5874     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
5875     record_slots = Marking::IsBlack(mark_bit);
5876   }
5877
5878   while (slot_address < end) {
5879     Object** slot = reinterpret_cast<Object**>(slot_address);
5880     Object* object = *slot;
5881     // If the store buffer becomes overfull we mark pages as being exempt from
5882     // the store buffer.  These pages are scanned to find pointers that point
5883     // to the new space.  In that case we may hit newly promoted objects and
5884     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
5885     if (object->IsHeapObject()) {
5886       if (Heap::InFromSpace(object)) {
5887         callback(reinterpret_cast<HeapObject**>(slot),
5888                  HeapObject::cast(object));
5889         Object* new_object = *slot;
5890         if (InNewSpace(new_object)) {
5891           SLOW_ASSERT(Heap::InToSpace(new_object));
5892           SLOW_ASSERT(new_object->IsHeapObject());
5893           store_buffer_.EnterDirectlyIntoStoreBuffer(
5894               reinterpret_cast<Address>(slot));
5895         }
5896         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
5897       } else if (record_slots &&
5898                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
5899         mark_compact_collector()->RecordSlot(slot, slot, object);
5900       }
5901     }
5902     slot_address += kPointerSize;
5903   }
5904 }
5905
5906
5907 #ifdef DEBUG
5908 typedef bool (*CheckStoreBufferFilter)(Object** addr);
5909
5910
5911 bool IsAMapPointerAddress(Object** addr) {
5912   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
5913   int mod = a % Map::kSize;
5914   return mod >= Map::kPointerFieldsBeginOffset &&
5915          mod < Map::kPointerFieldsEndOffset;
5916 }
5917
5918
5919 bool EverythingsAPointer(Object** addr) {
5920   return true;
5921 }
5922
5923
5924 static void CheckStoreBuffer(Heap* heap,
5925                              Object** current,
5926                              Object** limit,
5927                              Object**** store_buffer_position,
5928                              Object*** store_buffer_top,
5929                              CheckStoreBufferFilter filter,
5930                              Address special_garbage_start,
5931                              Address special_garbage_end) {
5932   Map* free_space_map = heap->free_space_map();
5933   for ( ; current < limit; current++) {
5934     Object* o = *current;
5935     Address current_address = reinterpret_cast<Address>(current);
5936     // Skip free space.
5937     if (o == free_space_map) {
5938       Address current_address = reinterpret_cast<Address>(current);
5939       FreeSpace* free_space =
5940           FreeSpace::cast(HeapObject::FromAddress(current_address));
5941       int skip = free_space->Size();
5942       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
5943       ASSERT(skip > 0);
5944       current_address += skip - kPointerSize;
5945       current = reinterpret_cast<Object**>(current_address);
5946       continue;
5947     }
5948     // Skip the current linear allocation space between top and limit which is
5949     // unmarked with the free space map, but can contain junk.
5950     if (current_address == special_garbage_start &&
5951         special_garbage_end != special_garbage_start) {
5952       current_address = special_garbage_end - kPointerSize;
5953       current = reinterpret_cast<Object**>(current_address);
5954       continue;
5955     }
5956     if (!(*filter)(current)) continue;
5957     ASSERT(current_address < special_garbage_start ||
5958            current_address >= special_garbage_end);
5959     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
5960     // We have to check that the pointer does not point into new space
5961     // without trying to cast it to a heap object since the hash field of
5962     // a string can contain values like 1 and 3 which are tagged null
5963     // pointers.
5964     if (!heap->InNewSpace(o)) continue;
5965     while (**store_buffer_position < current &&
5966            *store_buffer_position < store_buffer_top) {
5967       (*store_buffer_position)++;
5968     }
5969     if (**store_buffer_position != current ||
5970         *store_buffer_position == store_buffer_top) {
5971       Object** obj_start = current;
5972       while (!(*obj_start)->IsMap()) obj_start--;
5973       UNREACHABLE();
5974     }
5975   }
5976 }
5977
5978
5979 // Check that the store buffer contains all intergenerational pointers by
5980 // scanning a page and ensuring that all pointers to young space are in the
5981 // store buffer.
5982 void Heap::OldPointerSpaceCheckStoreBuffer() {
5983   OldSpace* space = old_pointer_space();
5984   PageIterator pages(space);
5985
5986   store_buffer()->SortUniq();
5987
5988   while (pages.has_next()) {
5989     Page* page = pages.next();
5990     Object** current = reinterpret_cast<Object**>(page->area_start());
5991
5992     Address end = page->area_end();
5993
5994     Object*** store_buffer_position = store_buffer()->Start();
5995     Object*** store_buffer_top = store_buffer()->Top();
5996
5997     Object** limit = reinterpret_cast<Object**>(end);
5998     CheckStoreBuffer(this,
5999                      current,
6000                      limit,
6001                      &store_buffer_position,
6002                      store_buffer_top,
6003                      &EverythingsAPointer,
6004                      space->top(),
6005                      space->limit());
6006   }
6007 }
6008
6009
6010 void Heap::MapSpaceCheckStoreBuffer() {
6011   MapSpace* space = map_space();
6012   PageIterator pages(space);
6013
6014   store_buffer()->SortUniq();
6015
6016   while (pages.has_next()) {
6017     Page* page = pages.next();
6018     Object** current = reinterpret_cast<Object**>(page->area_start());
6019
6020     Address end = page->area_end();
6021
6022     Object*** store_buffer_position = store_buffer()->Start();
6023     Object*** store_buffer_top = store_buffer()->Top();
6024
6025     Object** limit = reinterpret_cast<Object**>(end);
6026     CheckStoreBuffer(this,
6027                      current,
6028                      limit,
6029                      &store_buffer_position,
6030                      store_buffer_top,
6031                      &IsAMapPointerAddress,
6032                      space->top(),
6033                      space->limit());
6034   }
6035 }
6036
6037
6038 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6039   LargeObjectIterator it(lo_space());
6040   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6041     // We only have code, sequential strings, or fixed arrays in large
6042     // object space, and only fixed arrays can possibly contain pointers to
6043     // the young generation.
6044     if (object->IsFixedArray()) {
6045       Object*** store_buffer_position = store_buffer()->Start();
6046       Object*** store_buffer_top = store_buffer()->Top();
6047       Object** current = reinterpret_cast<Object**>(object->address());
6048       Object** limit =
6049           reinterpret_cast<Object**>(object->address() + object->Size());
6050       CheckStoreBuffer(this,
6051                        current,
6052                        limit,
6053                        &store_buffer_position,
6054                        store_buffer_top,
6055                        &EverythingsAPointer,
6056                        NULL,
6057                        NULL);
6058     }
6059   }
6060 }
6061 #endif
6062
6063
6064 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6065   IterateStrongRoots(v, mode);
6066   IterateWeakRoots(v, mode);
6067 }
6068
6069
6070 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6071   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6072   v->Synchronize(VisitorSynchronization::kStringTable);
6073   if (mode != VISIT_ALL_IN_SCAVENGE &&
6074       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6075     // Scavenge collections have special processing for this.
6076     external_string_table_.Iterate(v);
6077   }
6078   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6079 }
6080
6081
6082 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6083   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6084   v->Synchronize(VisitorSynchronization::kStrongRootList);
6085
6086   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6087   v->Synchronize(VisitorSynchronization::kInternalizedString);
6088
6089   isolate_->bootstrapper()->Iterate(v);
6090   v->Synchronize(VisitorSynchronization::kBootstrapper);
6091   isolate_->Iterate(v);
6092   v->Synchronize(VisitorSynchronization::kTop);
6093   Relocatable::Iterate(isolate_, v);
6094   v->Synchronize(VisitorSynchronization::kRelocatable);
6095
6096 #ifdef ENABLE_DEBUGGER_SUPPORT
6097   isolate_->debug()->Iterate(v);
6098   if (isolate_->deoptimizer_data() != NULL) {
6099     isolate_->deoptimizer_data()->Iterate(v);
6100   }
6101 #endif
6102   v->Synchronize(VisitorSynchronization::kDebug);
6103   isolate_->compilation_cache()->Iterate(v);
6104   v->Synchronize(VisitorSynchronization::kCompilationCache);
6105
6106   // Iterate over local handles in handle scopes.
6107   isolate_->handle_scope_implementer()->Iterate(v);
6108   isolate_->IterateDeferredHandles(v);
6109   v->Synchronize(VisitorSynchronization::kHandleScope);
6110
6111   // Iterate over the builtin code objects and code stubs in the
6112   // heap. Note that it is not necessary to iterate over code objects
6113   // on scavenge collections.
6114   if (mode != VISIT_ALL_IN_SCAVENGE) {
6115     isolate_->builtins()->IterateBuiltins(v);
6116   }
6117   v->Synchronize(VisitorSynchronization::kBuiltins);
6118
6119   // Iterate over global handles.
6120   switch (mode) {
6121     case VISIT_ONLY_STRONG:
6122       isolate_->global_handles()->IterateStrongRoots(v);
6123       break;
6124     case VISIT_ALL_IN_SCAVENGE:
6125       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6126       break;
6127     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6128     case VISIT_ALL:
6129       isolate_->global_handles()->IterateAllRoots(v);
6130       break;
6131   }
6132   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6133
6134   // Iterate over eternal handles.
6135   if (mode == VISIT_ALL_IN_SCAVENGE) {
6136     isolate_->eternal_handles()->IterateNewSpaceRoots(v);
6137   } else {
6138     isolate_->eternal_handles()->IterateAllRoots(v);
6139   }
6140   v->Synchronize(VisitorSynchronization::kEternalHandles);
6141
6142   // Iterate over pointers being held by inactive threads.
6143   isolate_->thread_manager()->Iterate(v);
6144   v->Synchronize(VisitorSynchronization::kThreadManager);
6145
6146   // Iterate over the pointers the Serialization/Deserialization code is
6147   // holding.
6148   // During garbage collection this keeps the partial snapshot cache alive.
6149   // During deserialization of the startup snapshot this creates the partial
6150   // snapshot cache and deserializes the objects it refers to.  During
6151   // serialization this does nothing, since the partial snapshot cache is
6152   // empty.  However the next thing we do is create the partial snapshot,
6153   // filling up the partial snapshot cache with objects it needs as we go.
6154   SerializerDeserializer::Iterate(isolate_, v);
6155   // We don't do a v->Synchronize call here, because in debug mode that will
6156   // output a flag to the snapshot.  However at this point the serializer and
6157   // deserializer are deliberately a little unsynchronized (see above) so the
6158   // checking of the sync flag in the snapshot would fail.
6159 }
6160
6161
6162 // TODO(1236194): Since the heap size is configurable on the command line
6163 // and through the API, we should gracefully handle the case that the heap
6164 // size is not big enough to fit all the initial objects.
6165 bool Heap::ConfigureHeap(int max_semispace_size,
6166                          intptr_t max_old_gen_size,
6167                          intptr_t max_executable_size) {
6168   if (HasBeenSetUp()) return false;
6169
6170   if (FLAG_stress_compaction) {
6171     // This will cause more frequent GCs when stressing.
6172     max_semispace_size_ = Page::kPageSize;
6173   }
6174
6175   if (max_semispace_size > 0) {
6176     if (max_semispace_size < Page::kPageSize) {
6177       max_semispace_size = Page::kPageSize;
6178       if (FLAG_trace_gc) {
6179         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6180                  Page::kPageSize >> 10);
6181       }
6182     }
6183     max_semispace_size_ = max_semispace_size;
6184   }
6185
6186   if (Snapshot::IsEnabled()) {
6187     // If we are using a snapshot we always reserve the default amount
6188     // of memory for each semispace because code in the snapshot has
6189     // write-barrier code that relies on the size and alignment of new
6190     // space.  We therefore cannot use a larger max semispace size
6191     // than the default reserved semispace size.
6192     if (max_semispace_size_ > reserved_semispace_size_) {
6193       max_semispace_size_ = reserved_semispace_size_;
6194       if (FLAG_trace_gc) {
6195         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6196                  reserved_semispace_size_ >> 10);
6197       }
6198     }
6199   } else {
6200     // If we are not using snapshots we reserve space for the actual
6201     // max semispace size.
6202     reserved_semispace_size_ = max_semispace_size_;
6203   }
6204
6205   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6206   if (max_executable_size > 0) {
6207     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6208   }
6209
6210   // The max executable size must be less than or equal to the max old
6211   // generation size.
6212   if (max_executable_size_ > max_old_generation_size_) {
6213     max_executable_size_ = max_old_generation_size_;
6214   }
6215
6216   // The new space size must be a power of two to support single-bit testing
6217   // for containment.
6218   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6219   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6220   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6221
6222   // The external allocation limit should be below 256 MB on all architectures
6223   // to avoid unnecessary low memory notifications, as that is the threshold
6224   // for some embedders.
6225   external_allocation_limit_ = 12 * max_semispace_size_;
6226   ASSERT(external_allocation_limit_ <= 256 * MB);
6227
6228   // The old generation is paged and needs at least one page for each space.
6229   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6230   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6231                                                        Page::kPageSize),
6232                                  RoundUp(max_old_generation_size_,
6233                                          Page::kPageSize));
6234
6235   // We rely on being able to allocate new arrays in paged spaces.
6236   ASSERT(Page::kMaxRegularHeapObjectSize >=
6237          (JSArray::kSize +
6238           FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
6239           AllocationMemento::kSize));
6240
6241   configured_ = true;
6242   return true;
6243 }
6244
6245
6246 bool Heap::ConfigureHeapDefault() {
6247   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6248                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6249                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6250 }
6251
6252
6253 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6254   *stats->start_marker = HeapStats::kStartMarker;
6255   *stats->end_marker = HeapStats::kEndMarker;
6256   *stats->new_space_size = new_space_.SizeAsInt();
6257   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6258   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6259   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6260   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6261   *stats->old_data_space_capacity = old_data_space_->Capacity();
6262   *stats->code_space_size = code_space_->SizeOfObjects();
6263   *stats->code_space_capacity = code_space_->Capacity();
6264   *stats->map_space_size = map_space_->SizeOfObjects();
6265   *stats->map_space_capacity = map_space_->Capacity();
6266   *stats->cell_space_size = cell_space_->SizeOfObjects();
6267   *stats->cell_space_capacity = cell_space_->Capacity();
6268   *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6269   *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6270   *stats->lo_space_size = lo_space_->Size();
6271   isolate_->global_handles()->RecordStats(stats);
6272   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6273   *stats->memory_allocator_capacity =
6274       isolate()->memory_allocator()->Size() +
6275       isolate()->memory_allocator()->Available();
6276   *stats->os_error = OS::GetLastError();
6277       isolate()->memory_allocator()->Available();
6278   if (take_snapshot) {
6279     HeapIterator iterator(this);
6280     for (HeapObject* obj = iterator.next();
6281          obj != NULL;
6282          obj = iterator.next()) {
6283       InstanceType type = obj->map()->instance_type();
6284       ASSERT(0 <= type && type <= LAST_TYPE);
6285       stats->objects_per_type[type]++;
6286       stats->size_per_type[type] += obj->Size();
6287     }
6288   }
6289 }
6290
6291
6292 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6293   return old_pointer_space_->SizeOfObjects()
6294       + old_data_space_->SizeOfObjects()
6295       + code_space_->SizeOfObjects()
6296       + map_space_->SizeOfObjects()
6297       + cell_space_->SizeOfObjects()
6298       + property_cell_space_->SizeOfObjects()
6299       + lo_space_->SizeOfObjects();
6300 }
6301
6302
6303 bool Heap::AdvanceSweepers(int step_size) {
6304   ASSERT(isolate()->num_sweeper_threads() == 0);
6305   bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
6306   sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
6307   return sweeping_complete;
6308 }
6309
6310
6311 int64_t Heap::PromotedExternalMemorySize() {
6312   if (amount_of_external_allocated_memory_
6313       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6314   return amount_of_external_allocated_memory_
6315       - amount_of_external_allocated_memory_at_last_global_gc_;
6316 }
6317
6318
6319 void Heap::EnableInlineAllocation() {
6320   if (!inline_allocation_disabled_) return;
6321   inline_allocation_disabled_ = false;
6322
6323   // Update inline allocation limit for new space.
6324   new_space()->UpdateInlineAllocationLimit(0);
6325 }
6326
6327
6328 void Heap::DisableInlineAllocation() {
6329   if (inline_allocation_disabled_) return;
6330   inline_allocation_disabled_ = true;
6331
6332   // Update inline allocation limit for new space.
6333   new_space()->UpdateInlineAllocationLimit(0);
6334
6335   // Update inline allocation limit for old spaces.
6336   PagedSpaces spaces(this);
6337   for (PagedSpace* space = spaces.next();
6338        space != NULL;
6339        space = spaces.next()) {
6340     space->EmptyAllocationInfo();
6341   }
6342 }
6343
6344
6345 V8_DECLARE_ONCE(initialize_gc_once);
6346
6347 static void InitializeGCOnce() {
6348   InitializeScavengingVisitorsTables();
6349   NewSpaceScavenger::Initialize();
6350   MarkCompactCollector::Initialize();
6351 }
6352
6353
6354 bool Heap::SetUp() {
6355 #ifdef DEBUG
6356   allocation_timeout_ = FLAG_gc_interval;
6357 #endif
6358
6359   // Initialize heap spaces and initial maps and objects. Whenever something
6360   // goes wrong, just return false. The caller should check the results and
6361   // call Heap::TearDown() to release allocated memory.
6362   //
6363   // If the heap is not yet configured (e.g. through the API), configure it.
6364   // Configuration is based on the flags new-space-size (really the semispace
6365   // size) and old-space-size if set or the initial values of semispace_size_
6366   // and old_generation_size_ otherwise.
6367   if (!configured_) {
6368     if (!ConfigureHeapDefault()) return false;
6369   }
6370
6371   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6372
6373   MarkMapPointersAsEncoded(false);
6374
6375   // Set up memory allocator.
6376   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6377       return false;
6378
6379   // Set up new space.
6380   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6381     return false;
6382   }
6383
6384   // Initialize old pointer space.
6385   old_pointer_space_ =
6386       new OldSpace(this,
6387                    max_old_generation_size_,
6388                    OLD_POINTER_SPACE,
6389                    NOT_EXECUTABLE);
6390   if (old_pointer_space_ == NULL) return false;
6391   if (!old_pointer_space_->SetUp()) return false;
6392
6393   // Initialize old data space.
6394   old_data_space_ =
6395       new OldSpace(this,
6396                    max_old_generation_size_,
6397                    OLD_DATA_SPACE,
6398                    NOT_EXECUTABLE);
6399   if (old_data_space_ == NULL) return false;
6400   if (!old_data_space_->SetUp()) return false;
6401
6402   // Initialize the code space, set its maximum capacity to the old
6403   // generation size. It needs executable memory.
6404   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6405   // virtual address space, so that they can call each other with near calls.
6406   if (code_range_size_ > 0) {
6407     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6408       return false;
6409     }
6410   }
6411
6412   code_space_ =
6413       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6414   if (code_space_ == NULL) return false;
6415   if (!code_space_->SetUp()) return false;
6416
6417   // Initialize map space.
6418   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6419   if (map_space_ == NULL) return false;
6420   if (!map_space_->SetUp()) return false;
6421
6422   // Initialize simple cell space.
6423   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6424   if (cell_space_ == NULL) return false;
6425   if (!cell_space_->SetUp()) return false;
6426
6427   // Initialize global property cell space.
6428   property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6429                                                PROPERTY_CELL_SPACE);
6430   if (property_cell_space_ == NULL) return false;
6431   if (!property_cell_space_->SetUp()) return false;
6432
6433   // The large object code space may contain code or data.  We set the memory
6434   // to be non-executable here for safety, but this means we need to enable it
6435   // explicitly when allocating large code objects.
6436   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6437   if (lo_space_ == NULL) return false;
6438   if (!lo_space_->SetUp()) return false;
6439
6440   // Set up the seed that is used to randomize the string hash function.
6441   ASSERT(hash_seed() == 0);
6442   if (FLAG_randomize_hashes) {
6443     if (FLAG_hash_seed == 0) {
6444       int rnd = isolate()->random_number_generator()->NextInt();
6445       set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
6446     } else {
6447       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6448     }
6449   }
6450
6451   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6452   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6453
6454   store_buffer()->SetUp();
6455
6456   mark_compact_collector()->SetUp();
6457
6458   if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
6459
6460   return true;
6461 }
6462
6463
6464 bool Heap::CreateHeapObjects() {
6465   // Create initial maps.
6466   if (!CreateInitialMaps()) return false;
6467   if (!CreateApiObjects()) return false;
6468
6469   // Create initial objects
6470   if (!CreateInitialObjects()) return false;
6471
6472   native_contexts_list_ = undefined_value();
6473   array_buffers_list_ = undefined_value();
6474   allocation_sites_list_ = undefined_value();
6475   weak_object_to_code_table_ = undefined_value();
6476   return true;
6477 }
6478
6479
6480 void Heap::SetStackLimits() {
6481   ASSERT(isolate_ != NULL);
6482   ASSERT(isolate_ == isolate());
6483   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6484   // something that looks like an out of range Smi to the GC.
6485
6486   // Set up the special root array entries containing the stack limits.
6487   // These are actually addresses, but the tag makes the GC ignore it.
6488   roots_[kStackLimitRootIndex] =
6489       reinterpret_cast<Object*>(
6490           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6491   roots_[kRealStackLimitRootIndex] =
6492       reinterpret_cast<Object*>(
6493           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6494 }
6495
6496
6497 void Heap::TearDown() {
6498 #ifdef VERIFY_HEAP
6499   if (FLAG_verify_heap) {
6500     Verify();
6501   }
6502 #endif
6503
6504   UpdateMaximumCommitted();
6505
6506   if (FLAG_print_cumulative_gc_stat) {
6507     PrintF("\n");
6508     PrintF("gc_count=%d ", gc_count_);
6509     PrintF("mark_sweep_count=%d ", ms_count_);
6510     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6511     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6512     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6513     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6514            get_max_alive_after_gc());
6515     PrintF("total_marking_time=%.1f ", marking_time());
6516     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6517     PrintF("\n\n");
6518   }
6519
6520   if (FLAG_print_max_heap_committed) {
6521     PrintF("\n");
6522     PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
6523       MaximumCommittedMemory());
6524     PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
6525       new_space_.MaximumCommittedMemory());
6526     PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
6527       old_data_space_->MaximumCommittedMemory());
6528     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6529       old_pointer_space_->MaximumCommittedMemory());
6530     PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
6531       old_pointer_space_->MaximumCommittedMemory());
6532     PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
6533       code_space_->MaximumCommittedMemory());
6534     PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
6535       map_space_->MaximumCommittedMemory());
6536     PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
6537       cell_space_->MaximumCommittedMemory());
6538     PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
6539       property_cell_space_->MaximumCommittedMemory());
6540     PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
6541       lo_space_->MaximumCommittedMemory());
6542     PrintF("\n\n");
6543   }
6544
6545   TearDownArrayBuffers();
6546
6547   isolate_->global_handles()->TearDown();
6548
6549   external_string_table_.TearDown();
6550
6551   mark_compact_collector()->TearDown();
6552
6553   new_space_.TearDown();
6554
6555   if (old_pointer_space_ != NULL) {
6556     old_pointer_space_->TearDown();
6557     delete old_pointer_space_;
6558     old_pointer_space_ = NULL;
6559   }
6560
6561   if (old_data_space_ != NULL) {
6562     old_data_space_->TearDown();
6563     delete old_data_space_;
6564     old_data_space_ = NULL;
6565   }
6566
6567   if (code_space_ != NULL) {
6568     code_space_->TearDown();
6569     delete code_space_;
6570     code_space_ = NULL;
6571   }
6572
6573   if (map_space_ != NULL) {
6574     map_space_->TearDown();
6575     delete map_space_;
6576     map_space_ = NULL;
6577   }
6578
6579   if (cell_space_ != NULL) {
6580     cell_space_->TearDown();
6581     delete cell_space_;
6582     cell_space_ = NULL;
6583   }
6584
6585   if (property_cell_space_ != NULL) {
6586     property_cell_space_->TearDown();
6587     delete property_cell_space_;
6588     property_cell_space_ = NULL;
6589   }
6590
6591   if (lo_space_ != NULL) {
6592     lo_space_->TearDown();
6593     delete lo_space_;
6594     lo_space_ = NULL;
6595   }
6596
6597   store_buffer()->TearDown();
6598   incremental_marking()->TearDown();
6599
6600   isolate_->memory_allocator()->TearDown();
6601
6602   delete relocation_mutex_;
6603   relocation_mutex_ = NULL;
6604 }
6605
6606
6607 void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
6608                                  GCType gc_type,
6609                                  bool pass_isolate) {
6610   ASSERT(callback != NULL);
6611   GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
6612   ASSERT(!gc_prologue_callbacks_.Contains(pair));
6613   return gc_prologue_callbacks_.Add(pair);
6614 }
6615
6616
6617 void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
6618   ASSERT(callback != NULL);
6619   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6620     if (gc_prologue_callbacks_[i].callback == callback) {
6621       gc_prologue_callbacks_.Remove(i);
6622       return;
6623     }
6624   }
6625   UNREACHABLE();
6626 }
6627
6628
6629 void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
6630                                  GCType gc_type,
6631                                  bool pass_isolate) {
6632   ASSERT(callback != NULL);
6633   GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
6634   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6635   return gc_epilogue_callbacks_.Add(pair);
6636 }
6637
6638
6639 void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
6640   ASSERT(callback != NULL);
6641   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6642     if (gc_epilogue_callbacks_[i].callback == callback) {
6643       gc_epilogue_callbacks_.Remove(i);
6644       return;
6645     }
6646   }
6647   UNREACHABLE();
6648 }
6649
6650
6651 MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
6652                                                  DependentCode* dep) {
6653   ASSERT(!InNewSpace(obj));
6654   ASSERT(!InNewSpace(dep));
6655   MaybeObject* maybe_obj =
6656       WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
6657   WeakHashTable* table;
6658   if (!maybe_obj->To(&table)) return maybe_obj;
6659   if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
6660     WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
6661   }
6662   set_weak_object_to_code_table(table);
6663   ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
6664   return weak_object_to_code_table_;
6665 }
6666
6667
6668 DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
6669   Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
6670   if (dep->IsDependentCode()) return DependentCode::cast(dep);
6671   return DependentCode::cast(empty_fixed_array());
6672 }
6673
6674
6675 void Heap::EnsureWeakObjectToCodeTable() {
6676   if (!weak_object_to_code_table()->IsHashTable()) {
6677     set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
6678   }
6679 }
6680
6681
6682 void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
6683   v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
6684 }
6685
6686 #ifdef DEBUG
6687
6688 class PrintHandleVisitor: public ObjectVisitor {
6689  public:
6690   void VisitPointers(Object** start, Object** end) {
6691     for (Object** p = start; p < end; p++)
6692       PrintF("  handle %p to %p\n",
6693              reinterpret_cast<void*>(p),
6694              reinterpret_cast<void*>(*p));
6695   }
6696 };
6697
6698
6699 void Heap::PrintHandles() {
6700   PrintF("Handles:\n");
6701   PrintHandleVisitor v;
6702   isolate_->handle_scope_implementer()->Iterate(&v);
6703 }
6704
6705 #endif
6706
6707
6708 Space* AllSpaces::next() {
6709   switch (counter_++) {
6710     case NEW_SPACE:
6711       return heap_->new_space();
6712     case OLD_POINTER_SPACE:
6713       return heap_->old_pointer_space();
6714     case OLD_DATA_SPACE:
6715       return heap_->old_data_space();
6716     case CODE_SPACE:
6717       return heap_->code_space();
6718     case MAP_SPACE:
6719       return heap_->map_space();
6720     case CELL_SPACE:
6721       return heap_->cell_space();
6722     case PROPERTY_CELL_SPACE:
6723       return heap_->property_cell_space();
6724     case LO_SPACE:
6725       return heap_->lo_space();
6726     default:
6727       return NULL;
6728   }
6729 }
6730
6731
6732 PagedSpace* PagedSpaces::next() {
6733   switch (counter_++) {
6734     case OLD_POINTER_SPACE:
6735       return heap_->old_pointer_space();
6736     case OLD_DATA_SPACE:
6737       return heap_->old_data_space();
6738     case CODE_SPACE:
6739       return heap_->code_space();
6740     case MAP_SPACE:
6741       return heap_->map_space();
6742     case CELL_SPACE:
6743       return heap_->cell_space();
6744     case PROPERTY_CELL_SPACE:
6745       return heap_->property_cell_space();
6746     default:
6747       return NULL;
6748   }
6749 }
6750
6751
6752
6753 OldSpace* OldSpaces::next() {
6754   switch (counter_++) {
6755     case OLD_POINTER_SPACE:
6756       return heap_->old_pointer_space();
6757     case OLD_DATA_SPACE:
6758       return heap_->old_data_space();
6759     case CODE_SPACE:
6760       return heap_->code_space();
6761     default:
6762       return NULL;
6763   }
6764 }
6765
6766
6767 SpaceIterator::SpaceIterator(Heap* heap)
6768     : heap_(heap),
6769       current_space_(FIRST_SPACE),
6770       iterator_(NULL),
6771       size_func_(NULL) {
6772 }
6773
6774
6775 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
6776     : heap_(heap),
6777       current_space_(FIRST_SPACE),
6778       iterator_(NULL),
6779       size_func_(size_func) {
6780 }
6781
6782
6783 SpaceIterator::~SpaceIterator() {
6784   // Delete active iterator if any.
6785   delete iterator_;
6786 }
6787
6788
6789 bool SpaceIterator::has_next() {
6790   // Iterate until no more spaces.
6791   return current_space_ != LAST_SPACE;
6792 }
6793
6794
6795 ObjectIterator* SpaceIterator::next() {
6796   if (iterator_ != NULL) {
6797     delete iterator_;
6798     iterator_ = NULL;
6799     // Move to the next space
6800     current_space_++;
6801     if (current_space_ > LAST_SPACE) {
6802       return NULL;
6803     }
6804   }
6805
6806   // Return iterator for the new current space.
6807   return CreateIterator();
6808 }
6809
6810
6811 // Create an iterator for the space to iterate.
6812 ObjectIterator* SpaceIterator::CreateIterator() {
6813   ASSERT(iterator_ == NULL);
6814
6815   switch (current_space_) {
6816     case NEW_SPACE:
6817       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
6818       break;
6819     case OLD_POINTER_SPACE:
6820       iterator_ =
6821           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
6822       break;
6823     case OLD_DATA_SPACE:
6824       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
6825       break;
6826     case CODE_SPACE:
6827       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
6828       break;
6829     case MAP_SPACE:
6830       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
6831       break;
6832     case CELL_SPACE:
6833       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
6834       break;
6835     case PROPERTY_CELL_SPACE:
6836       iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
6837                                          size_func_);
6838       break;
6839     case LO_SPACE:
6840       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
6841       break;
6842   }
6843
6844   // Return the newly allocated iterator;
6845   ASSERT(iterator_ != NULL);
6846   return iterator_;
6847 }
6848
6849
6850 class HeapObjectsFilter {
6851  public:
6852   virtual ~HeapObjectsFilter() {}
6853   virtual bool SkipObject(HeapObject* object) = 0;
6854 };
6855
6856
6857 class UnreachableObjectsFilter : public HeapObjectsFilter {
6858  public:
6859   explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
6860     MarkReachableObjects();
6861   }
6862
6863   ~UnreachableObjectsFilter() {
6864     heap_->mark_compact_collector()->ClearMarkbits();
6865   }
6866
6867   bool SkipObject(HeapObject* object) {
6868     MarkBit mark_bit = Marking::MarkBitFrom(object);
6869     return !mark_bit.Get();
6870   }
6871
6872  private:
6873   class MarkingVisitor : public ObjectVisitor {
6874    public:
6875     MarkingVisitor() : marking_stack_(10) {}
6876
6877     void VisitPointers(Object** start, Object** end) {
6878       for (Object** p = start; p < end; p++) {
6879         if (!(*p)->IsHeapObject()) continue;
6880         HeapObject* obj = HeapObject::cast(*p);
6881         MarkBit mark_bit = Marking::MarkBitFrom(obj);
6882         if (!mark_bit.Get()) {
6883           mark_bit.Set();
6884           marking_stack_.Add(obj);
6885         }
6886       }
6887     }
6888
6889     void TransitiveClosure() {
6890       while (!marking_stack_.is_empty()) {
6891         HeapObject* obj = marking_stack_.RemoveLast();
6892         obj->Iterate(this);
6893       }
6894     }
6895
6896    private:
6897     List<HeapObject*> marking_stack_;
6898   };
6899
6900   void MarkReachableObjects() {
6901     MarkingVisitor visitor;
6902     heap_->IterateRoots(&visitor, VISIT_ALL);
6903     visitor.TransitiveClosure();
6904   }
6905
6906   Heap* heap_;
6907   DisallowHeapAllocation no_allocation_;
6908 };
6909
6910
6911 HeapIterator::HeapIterator(Heap* heap)
6912     : heap_(heap),
6913       filtering_(HeapIterator::kNoFiltering),
6914       filter_(NULL) {
6915   Init();
6916 }
6917
6918
6919 HeapIterator::HeapIterator(Heap* heap,
6920                            HeapIterator::HeapObjectsFiltering filtering)
6921     : heap_(heap),
6922       filtering_(filtering),
6923       filter_(NULL) {
6924   Init();
6925 }
6926
6927
6928 HeapIterator::~HeapIterator() {
6929   Shutdown();
6930 }
6931
6932
6933 void HeapIterator::Init() {
6934   // Start the iteration.
6935   space_iterator_ = new SpaceIterator(heap_);
6936   switch (filtering_) {
6937     case kFilterUnreachable:
6938       filter_ = new UnreachableObjectsFilter(heap_);
6939       break;
6940     default:
6941       break;
6942   }
6943   object_iterator_ = space_iterator_->next();
6944 }
6945
6946
6947 void HeapIterator::Shutdown() {
6948 #ifdef DEBUG
6949   // Assert that in filtering mode we have iterated through all
6950   // objects. Otherwise, heap will be left in an inconsistent state.
6951   if (filtering_ != kNoFiltering) {
6952     ASSERT(object_iterator_ == NULL);
6953   }
6954 #endif
6955   // Make sure the last iterator is deallocated.
6956   delete space_iterator_;
6957   space_iterator_ = NULL;
6958   object_iterator_ = NULL;
6959   delete filter_;
6960   filter_ = NULL;
6961 }
6962
6963
6964 HeapObject* HeapIterator::next() {
6965   if (filter_ == NULL) return NextObject();
6966
6967   HeapObject* obj = NextObject();
6968   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
6969   return obj;
6970 }
6971
6972
6973 HeapObject* HeapIterator::NextObject() {
6974   // No iterator means we are done.
6975   if (object_iterator_ == NULL) return NULL;
6976
6977   if (HeapObject* obj = object_iterator_->next_object()) {
6978     // If the current iterator has more objects we are fine.
6979     return obj;
6980   } else {
6981     // Go though the spaces looking for one that has objects.
6982     while (space_iterator_->has_next()) {
6983       object_iterator_ = space_iterator_->next();
6984       if (HeapObject* obj = object_iterator_->next_object()) {
6985         return obj;
6986       }
6987     }
6988   }
6989   // Done with the last space.
6990   object_iterator_ = NULL;
6991   return NULL;
6992 }
6993
6994
6995 void HeapIterator::reset() {
6996   // Restart the iterator.
6997   Shutdown();
6998   Init();
6999 }
7000
7001
7002 #ifdef DEBUG
7003
7004 Object* const PathTracer::kAnyGlobalObject = NULL;
7005
7006 class PathTracer::MarkVisitor: public ObjectVisitor {
7007  public:
7008   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7009   void VisitPointers(Object** start, Object** end) {
7010     // Scan all HeapObject pointers in [start, end)
7011     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7012       if ((*p)->IsHeapObject())
7013         tracer_->MarkRecursively(p, this);
7014     }
7015   }
7016
7017  private:
7018   PathTracer* tracer_;
7019 };
7020
7021
7022 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7023  public:
7024   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7025   void VisitPointers(Object** start, Object** end) {
7026     // Scan all HeapObject pointers in [start, end)
7027     for (Object** p = start; p < end; p++) {
7028       if ((*p)->IsHeapObject())
7029         tracer_->UnmarkRecursively(p, this);
7030     }
7031   }
7032
7033  private:
7034   PathTracer* tracer_;
7035 };
7036
7037
7038 void PathTracer::VisitPointers(Object** start, Object** end) {
7039   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7040   // Visit all HeapObject pointers in [start, end)
7041   for (Object** p = start; !done && (p < end); p++) {
7042     if ((*p)->IsHeapObject()) {
7043       TracePathFrom(p);
7044       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7045     }
7046   }
7047 }
7048
7049
7050 void PathTracer::Reset() {
7051   found_target_ = false;
7052   object_stack_.Clear();
7053 }
7054
7055
7056 void PathTracer::TracePathFrom(Object** root) {
7057   ASSERT((search_target_ == kAnyGlobalObject) ||
7058          search_target_->IsHeapObject());
7059   found_target_in_trace_ = false;
7060   Reset();
7061
7062   MarkVisitor mark_visitor(this);
7063   MarkRecursively(root, &mark_visitor);
7064
7065   UnmarkVisitor unmark_visitor(this);
7066   UnmarkRecursively(root, &unmark_visitor);
7067
7068   ProcessResults();
7069 }
7070
7071
7072 static bool SafeIsNativeContext(HeapObject* obj) {
7073   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7074 }
7075
7076
7077 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7078   if (!(*p)->IsHeapObject()) return;
7079
7080   HeapObject* obj = HeapObject::cast(*p);
7081
7082   Object* map = obj->map();
7083
7084   if (!map->IsHeapObject()) return;  // visited before
7085
7086   if (found_target_in_trace_) return;  // stop if target found
7087   object_stack_.Add(obj);
7088   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7089       (obj == search_target_)) {
7090     found_target_in_trace_ = true;
7091     found_target_ = true;
7092     return;
7093   }
7094
7095   bool is_native_context = SafeIsNativeContext(obj);
7096
7097   // not visited yet
7098   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7099
7100   Address map_addr = map_p->address();
7101
7102   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7103
7104   // Scan the object body.
7105   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7106     // This is specialized to scan Context's properly.
7107     Object** start = reinterpret_cast<Object**>(obj->address() +
7108                                                 Context::kHeaderSize);
7109     Object** end = reinterpret_cast<Object**>(obj->address() +
7110         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7111     mark_visitor->VisitPointers(start, end);
7112   } else {
7113     obj->IterateBody(map_p->instance_type(),
7114                      obj->SizeFromMap(map_p),
7115                      mark_visitor);
7116   }
7117
7118   // Scan the map after the body because the body is a lot more interesting
7119   // when doing leak detection.
7120   MarkRecursively(&map, mark_visitor);
7121
7122   if (!found_target_in_trace_)  // don't pop if found the target
7123     object_stack_.RemoveLast();
7124 }
7125
7126
7127 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7128   if (!(*p)->IsHeapObject()) return;
7129
7130   HeapObject* obj = HeapObject::cast(*p);
7131
7132   Object* map = obj->map();
7133
7134   if (map->IsHeapObject()) return;  // unmarked already
7135
7136   Address map_addr = reinterpret_cast<Address>(map);
7137
7138   map_addr -= kMarkTag;
7139
7140   ASSERT_TAG_ALIGNED(map_addr);
7141
7142   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7143
7144   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7145
7146   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7147
7148   obj->IterateBody(Map::cast(map_p)->instance_type(),
7149                    obj->SizeFromMap(Map::cast(map_p)),
7150                    unmark_visitor);
7151 }
7152
7153
7154 void PathTracer::ProcessResults() {
7155   if (found_target_) {
7156     PrintF("=====================================\n");
7157     PrintF("====        Path to object       ====\n");
7158     PrintF("=====================================\n\n");
7159
7160     ASSERT(!object_stack_.is_empty());
7161     for (int i = 0; i < object_stack_.length(); i++) {
7162       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7163       Object* obj = object_stack_[i];
7164       obj->Print();
7165     }
7166     PrintF("=====================================\n");
7167   }
7168 }
7169
7170
7171 // Triggers a depth-first traversal of reachable objects from one
7172 // given root object and finds a path to a specific heap object and
7173 // prints it.
7174 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7175   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7176   tracer.VisitPointer(&root);
7177 }
7178
7179
7180 // Triggers a depth-first traversal of reachable objects from roots
7181 // and finds a path to a specific heap object and prints it.
7182 void Heap::TracePathToObject(Object* target) {
7183   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7184   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7185 }
7186
7187
7188 // Triggers a depth-first traversal of reachable objects from roots
7189 // and finds a path to any global object and prints it. Useful for
7190 // determining the source for leaks of global objects.
7191 void Heap::TracePathToGlobal() {
7192   PathTracer tracer(PathTracer::kAnyGlobalObject,
7193                     PathTracer::FIND_ALL,
7194                     VISIT_ALL);
7195   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7196 }
7197 #endif
7198
7199
7200 static intptr_t CountTotalHolesSize(Heap* heap) {
7201   intptr_t holes_size = 0;
7202   OldSpaces spaces(heap);
7203   for (OldSpace* space = spaces.next();
7204        space != NULL;
7205        space = spaces.next()) {
7206     holes_size += space->Waste() + space->Available();
7207   }
7208   return holes_size;
7209 }
7210
7211
7212 GCTracer::GCTracer(Heap* heap,
7213                    const char* gc_reason,
7214                    const char* collector_reason)
7215     : start_time_(0.0),
7216       start_object_size_(0),
7217       start_memory_size_(0),
7218       gc_count_(0),
7219       full_gc_count_(0),
7220       allocated_since_last_gc_(0),
7221       spent_in_mutator_(0),
7222       promoted_objects_size_(0),
7223       nodes_died_in_new_space_(0),
7224       nodes_copied_in_new_space_(0),
7225       nodes_promoted_(0),
7226       heap_(heap),
7227       gc_reason_(gc_reason),
7228       collector_reason_(collector_reason) {
7229   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7230   start_time_ = OS::TimeCurrentMillis();
7231   start_object_size_ = heap_->SizeOfObjects();
7232   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7233
7234   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7235     scopes_[i] = 0;
7236   }
7237
7238   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7239
7240   allocated_since_last_gc_ =
7241       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7242
7243   if (heap_->last_gc_end_timestamp_ > 0) {
7244     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7245   }
7246
7247   steps_count_ = heap_->incremental_marking()->steps_count();
7248   steps_took_ = heap_->incremental_marking()->steps_took();
7249   longest_step_ = heap_->incremental_marking()->longest_step();
7250   steps_count_since_last_gc_ =
7251       heap_->incremental_marking()->steps_count_since_last_gc();
7252   steps_took_since_last_gc_ =
7253       heap_->incremental_marking()->steps_took_since_last_gc();
7254 }
7255
7256
7257 GCTracer::~GCTracer() {
7258   // Printf ONE line iff flag is set.
7259   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7260
7261   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7262
7263   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7264   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7265
7266   double time = heap_->last_gc_end_timestamp_ - start_time_;
7267
7268   // Update cumulative GC statistics if required.
7269   if (FLAG_print_cumulative_gc_stat) {
7270     heap_->total_gc_time_ms_ += time;
7271     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7272     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7273                                      heap_->alive_after_last_gc_);
7274     if (!first_gc) {
7275       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7276                                    spent_in_mutator_);
7277     }
7278   } else if (FLAG_trace_gc_verbose) {
7279     heap_->total_gc_time_ms_ += time;
7280   }
7281
7282   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7283
7284   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7285
7286   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7287   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7288
7289   if (!FLAG_trace_gc_nvp) {
7290     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7291
7292     double end_memory_size_mb =
7293         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7294
7295     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7296            CollectorString(),
7297            static_cast<double>(start_object_size_) / MB,
7298            static_cast<double>(start_memory_size_) / MB,
7299            SizeOfHeapObjects(),
7300            end_memory_size_mb);
7301
7302     if (external_time > 0) PrintF("%d / ", external_time);
7303     PrintF("%.1f ms", time);
7304     if (steps_count_ > 0) {
7305       if (collector_ == SCAVENGER) {
7306         PrintF(" (+ %.1f ms in %d steps since last GC)",
7307                steps_took_since_last_gc_,
7308                steps_count_since_last_gc_);
7309       } else {
7310         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7311                    "biggest step %.1f ms)",
7312                steps_took_,
7313                steps_count_,
7314                longest_step_);
7315       }
7316     }
7317
7318     if (gc_reason_ != NULL) {
7319       PrintF(" [%s]", gc_reason_);
7320     }
7321
7322     if (collector_reason_ != NULL) {
7323       PrintF(" [%s]", collector_reason_);
7324     }
7325
7326     PrintF(".\n");
7327   } else {
7328     PrintF("pause=%.1f ", time);
7329     PrintF("mutator=%.1f ", spent_in_mutator_);
7330     PrintF("gc=");
7331     switch (collector_) {
7332       case SCAVENGER:
7333         PrintF("s");
7334         break;
7335       case MARK_COMPACTOR:
7336         PrintF("ms");
7337         break;
7338       default:
7339         UNREACHABLE();
7340     }
7341     PrintF(" ");
7342
7343     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7344     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7345     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7346     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7347     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7348     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7349     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7350     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7351     PrintF("compaction_ptrs=%.1f ",
7352         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7353     PrintF("intracompaction_ptrs=%.1f ",
7354         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7355     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7356     PrintF("weakcollection_process=%.1f ",
7357         scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7358     PrintF("weakcollection_clear=%.1f ",
7359         scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7360
7361     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7362     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7363     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7364            in_free_list_or_wasted_before_gc_);
7365     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7366
7367     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7368     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7369     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7370     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7371     PrintF("nodes_promoted=%d ", nodes_promoted_);
7372
7373     if (collector_ == SCAVENGER) {
7374       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7375       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7376     } else {
7377       PrintF("stepscount=%d ", steps_count_);
7378       PrintF("stepstook=%.1f ", steps_took_);
7379       PrintF("longeststep=%.1f ", longest_step_);
7380     }
7381
7382     PrintF("\n");
7383   }
7384
7385   heap_->PrintShortHeapStatistics();
7386 }
7387
7388
7389 const char* GCTracer::CollectorString() {
7390   switch (collector_) {
7391     case SCAVENGER:
7392       return "Scavenge";
7393     case MARK_COMPACTOR:
7394       return "Mark-sweep";
7395   }
7396   return "Unknown GC";
7397 }
7398
7399
7400 int KeyedLookupCache::Hash(Map* map, Name* name) {
7401   // Uses only lower 32 bits if pointers are larger.
7402   uintptr_t addr_hash =
7403       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7404   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7405 }
7406
7407
7408 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7409   int index = (Hash(map, name) & kHashMask);
7410   for (int i = 0; i < kEntriesPerBucket; i++) {
7411     Key& key = keys_[index + i];
7412     if ((key.map == map) && key.name->Equals(name)) {
7413       return field_offsets_[index + i];
7414     }
7415   }
7416   return kNotFound;
7417 }
7418
7419
7420 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7421   if (!name->IsUniqueName()) {
7422     String* internalized_string;
7423     if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
7424             String::cast(name), &internalized_string)) {
7425       return;
7426     }
7427     name = internalized_string;
7428   }
7429   // This cache is cleared only between mark compact passes, so we expect the
7430   // cache to only contain old space names.
7431   ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
7432
7433   int index = (Hash(map, name) & kHashMask);
7434   // After a GC there will be free slots, so we use them in order (this may
7435   // help to get the most frequently used one in position 0).
7436   for (int i = 0; i< kEntriesPerBucket; i++) {
7437     Key& key = keys_[index];
7438     Object* free_entry_indicator = NULL;
7439     if (key.map == free_entry_indicator) {
7440       key.map = map;
7441       key.name = name;
7442       field_offsets_[index + i] = field_offset;
7443       return;
7444     }
7445   }
7446   // No free entry found in this bucket, so we move them all down one and
7447   // put the new entry at position zero.
7448   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7449     Key& key = keys_[index + i];
7450     Key& key2 = keys_[index + i - 1];
7451     key = key2;
7452     field_offsets_[index + i] = field_offsets_[index + i - 1];
7453   }
7454
7455   // Write the new first entry.
7456   Key& key = keys_[index];
7457   key.map = map;
7458   key.name = name;
7459   field_offsets_[index] = field_offset;
7460 }
7461
7462
7463 void KeyedLookupCache::Clear() {
7464   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7465 }
7466
7467
7468 void DescriptorLookupCache::Clear() {
7469   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7470 }
7471
7472
7473 #ifdef DEBUG
7474 void Heap::GarbageCollectionGreedyCheck() {
7475   ASSERT(FLAG_gc_greedy);
7476   if (isolate_->bootstrapper()->IsActive()) return;
7477   if (disallow_allocation_failure()) return;
7478   CollectGarbage(NEW_SPACE);
7479 }
7480 #endif
7481
7482
7483 void ExternalStringTable::CleanUp() {
7484   int last = 0;
7485   for (int i = 0; i < new_space_strings_.length(); ++i) {
7486     if (new_space_strings_[i] == heap_->the_hole_value()) {
7487       continue;
7488     }
7489     ASSERT(new_space_strings_[i]->IsExternalString());
7490     if (heap_->InNewSpace(new_space_strings_[i])) {
7491       new_space_strings_[last++] = new_space_strings_[i];
7492     } else {
7493       old_space_strings_.Add(new_space_strings_[i]);
7494     }
7495   }
7496   new_space_strings_.Rewind(last);
7497   new_space_strings_.Trim();
7498
7499   last = 0;
7500   for (int i = 0; i < old_space_strings_.length(); ++i) {
7501     if (old_space_strings_[i] == heap_->the_hole_value()) {
7502       continue;
7503     }
7504     ASSERT(old_space_strings_[i]->IsExternalString());
7505     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7506     old_space_strings_[last++] = old_space_strings_[i];
7507   }
7508   old_space_strings_.Rewind(last);
7509   old_space_strings_.Trim();
7510 #ifdef VERIFY_HEAP
7511   if (FLAG_verify_heap) {
7512     Verify();
7513   }
7514 #endif
7515 }
7516
7517
7518 void ExternalStringTable::TearDown() {
7519   for (int i = 0; i < new_space_strings_.length(); ++i) {
7520     heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
7521   }
7522   new_space_strings_.Free();
7523   for (int i = 0; i < old_space_strings_.length(); ++i) {
7524     heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
7525   }
7526   old_space_strings_.Free();
7527 }
7528
7529
7530 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7531   chunk->set_next_chunk(chunks_queued_for_free_);
7532   chunks_queued_for_free_ = chunk;
7533 }
7534
7535
7536 void Heap::FreeQueuedChunks() {
7537   if (chunks_queued_for_free_ == NULL) return;
7538   MemoryChunk* next;
7539   MemoryChunk* chunk;
7540   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7541     next = chunk->next_chunk();
7542     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7543
7544     if (chunk->owner()->identity() == LO_SPACE) {
7545       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7546       // If FromAnyPointerAddress encounters a slot that belongs to a large
7547       // chunk queued for deletion it will fail to find the chunk because
7548       // it try to perform a search in the list of pages owned by of the large
7549       // object space and queued chunks were detached from that list.
7550       // To work around this we split large chunk into normal kPageSize aligned
7551       // pieces and initialize size, owner and flags field of every piece.
7552       // If FromAnyPointerAddress encounters a slot that belongs to one of
7553       // these smaller pieces it will treat it as a slot on a normal Page.
7554       Address chunk_end = chunk->address() + chunk->size();
7555       MemoryChunk* inner = MemoryChunk::FromAddress(
7556           chunk->address() + Page::kPageSize);
7557       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7558       while (inner <= inner_last) {
7559         // Size of a large chunk is always a multiple of
7560         // OS::AllocateAlignment() so there is always
7561         // enough space for a fake MemoryChunk header.
7562         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7563         // Guard against overflow.
7564         if (area_end < inner->address()) area_end = chunk_end;
7565         inner->SetArea(inner->address(), area_end);
7566         inner->set_size(Page::kPageSize);
7567         inner->set_owner(lo_space());
7568         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7569         inner = MemoryChunk::FromAddress(
7570             inner->address() + Page::kPageSize);
7571       }
7572     }
7573   }
7574   isolate_->heap()->store_buffer()->Compact();
7575   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7576   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7577     next = chunk->next_chunk();
7578     isolate_->memory_allocator()->Free(chunk);
7579   }
7580   chunks_queued_for_free_ = NULL;
7581 }
7582
7583
7584 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7585   uintptr_t p = reinterpret_cast<uintptr_t>(page);
7586   // Tag the page pointer to make it findable in the dump file.
7587   if (compacted) {
7588     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7589   } else {
7590     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7591   }
7592   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7593       reinterpret_cast<Address>(p);
7594   remembered_unmapped_pages_index_++;
7595   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7596 }
7597
7598
7599 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7600   memset(object_counts_, 0, sizeof(object_counts_));
7601   memset(object_sizes_, 0, sizeof(object_sizes_));
7602   if (clear_last_time_stats) {
7603     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7604     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7605   }
7606 }
7607
7608
7609 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7610
7611
7612 void Heap::CheckpointObjectStats() {
7613   LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
7614   Counters* counters = isolate()->counters();
7615 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
7616   counters->count_of_##name()->Increment(                                      \
7617       static_cast<int>(object_counts_[name]));                                 \
7618   counters->count_of_##name()->Decrement(                                      \
7619       static_cast<int>(object_counts_last_time_[name]));                       \
7620   counters->size_of_##name()->Increment(                                       \
7621       static_cast<int>(object_sizes_[name]));                                  \
7622   counters->size_of_##name()->Decrement(                                       \
7623       static_cast<int>(object_sizes_last_time_[name]));
7624   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7625 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7626   int index;
7627 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7628   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
7629   counters->count_of_CODE_TYPE_##name()->Increment(       \
7630       static_cast<int>(object_counts_[index]));           \
7631   counters->count_of_CODE_TYPE_##name()->Decrement(       \
7632       static_cast<int>(object_counts_last_time_[index])); \
7633   counters->size_of_CODE_TYPE_##name()->Increment(        \
7634       static_cast<int>(object_sizes_[index]));            \
7635   counters->size_of_CODE_TYPE_##name()->Decrement(        \
7636       static_cast<int>(object_sizes_last_time_[index]));
7637   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7638 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7639 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7640   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
7641   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
7642       static_cast<int>(object_counts_[index]));           \
7643   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
7644       static_cast<int>(object_counts_last_time_[index])); \
7645   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
7646       static_cast<int>(object_sizes_[index]));            \
7647   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
7648       static_cast<int>(object_sizes_last_time_[index]));
7649   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7650 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7651 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
7652   index =                                                                     \
7653       FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
7654   counters->count_of_CODE_AGE_##name()->Increment(                            \
7655       static_cast<int>(object_counts_[index]));                               \
7656   counters->count_of_CODE_AGE_##name()->Decrement(                            \
7657       static_cast<int>(object_counts_last_time_[index]));                     \
7658   counters->size_of_CODE_AGE_##name()->Increment(                             \
7659       static_cast<int>(object_sizes_[index]));                                \
7660   counters->size_of_CODE_AGE_##name()->Decrement(                             \
7661       static_cast<int>(object_sizes_last_time_[index]));
7662   CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
7663 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7664
7665   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7666   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7667   ClearObjectStats();
7668 }
7669
7670 } }  // namespace v8::internal