Merge remote-tracking branch 'ry/v0.10'
[platform/upstream/nodejs.git] / deps / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
36 #include "debug.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "mark-compact.h"
42 #include "natives.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
45 #include "once.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
48 #include "snapshot.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
51 #include "v8utils.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
56 #endif
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
60 #endif
61
62 namespace v8 {
63 namespace internal {
64
65
66 Heap::Heap()
67     : isolate_(NULL),
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if V8_TARGET_ARCH_X64
71 #define LUMP_OF_MEMORY (2 * MB)
72       code_range_size_(512*MB),
73 #else
74 #define LUMP_OF_MEMORY MB
75       code_range_size_(0),
76 #endif
77 #if defined(ANDROID) || V8_TARGET_ARCH_MIPS
78       reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79       max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80       initial_semispace_size_(Page::kPageSize),
81       max_old_generation_size_(192*MB),
82       max_executable_size_(max_old_generation_size_),
83 #else
84       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86       initial_semispace_size_(Page::kPageSize),
87       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88       max_executable_size_(256l * LUMP_OF_MEMORY),
89 #endif
90
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95       survived_since_last_expansion_(0),
96       sweep_generation_(0),
97       always_allocate_scope_depth_(0),
98       linear_allocation_scope_depth_(0),
99       contexts_disposed_(0),
100       global_ic_age_(0),
101       flush_monomorphic_ics_(false),
102       scan_on_scavenge_pages_(0),
103       new_space_(this),
104       old_pointer_space_(NULL),
105       old_data_space_(NULL),
106       code_space_(NULL),
107       map_space_(NULL),
108       cell_space_(NULL),
109       property_cell_space_(NULL),
110       lo_space_(NULL),
111       gc_state_(NOT_IN_GC),
112       gc_post_processing_depth_(0),
113       ms_count_(0),
114       gc_count_(0),
115       remembered_unmapped_pages_index_(0),
116       unflattened_strings_length_(0),
117 #ifdef DEBUG
118       allocation_timeout_(0),
119       disallow_allocation_failure_(false),
120 #endif  // DEBUG
121       new_space_high_promotion_mode_active_(false),
122       old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
123       size_of_old_gen_at_last_old_space_gc_(0),
124       external_allocation_limit_(0),
125       amount_of_external_allocated_memory_(0),
126       amount_of_external_allocated_memory_at_last_global_gc_(0),
127       old_gen_exhausted_(false),
128       store_buffer_rebuilder_(store_buffer()),
129       hidden_string_(NULL),
130       global_gc_prologue_callback_(NULL),
131       global_gc_epilogue_callback_(NULL),
132       gc_safe_size_of_old_object_(NULL),
133       total_regexp_code_generated_(0),
134       tracer_(NULL),
135       young_survivors_after_last_gc_(0),
136       high_survival_rate_period_length_(0),
137       low_survival_rate_period_length_(0),
138       survival_rate_(0),
139       previous_survival_rate_trend_(Heap::STABLE),
140       survival_rate_trend_(Heap::STABLE),
141       max_gc_pause_(0.0),
142       total_gc_time_ms_(0.0),
143       max_alive_after_gc_(0),
144       min_in_mutator_(kMaxInt),
145       alive_after_last_gc_(0),
146       last_gc_end_timestamp_(0.0),
147       marking_time_(0.0),
148       sweeping_time_(0.0),
149       store_buffer_(this),
150       marking_(this),
151       incremental_marking_(this),
152       number_idle_notifications_(0),
153       last_idle_notification_gc_count_(0),
154       last_idle_notification_gc_count_init_(false),
155       mark_sweeps_since_idle_round_started_(0),
156       gc_count_at_last_idle_gc_(0),
157       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
158       gcs_since_last_deopt_(0),
159 #ifdef VERIFY_HEAP
160       no_weak_embedded_maps_verification_scope_depth_(0),
161 #endif
162       promotion_queue_(this),
163       configured_(false),
164       chunks_queued_for_free_(NULL),
165       relocation_mutex_(NULL) {
166   // Allow build-time customization of the max semispace size. Building
167   // V8 with snapshots and a non-default max semispace size is much
168   // easier if you can define it as part of the build environment.
169 #if defined(V8_MAX_SEMISPACE_SIZE)
170   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
171 #endif
172
173   intptr_t max_virtual = OS::MaxVirtualMemory();
174
175   if (max_virtual > 0) {
176     if (code_range_size_ > 0) {
177       // Reserve no more than 1/8 of the memory for the code range.
178       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
179     }
180   }
181
182   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
183   native_contexts_list_ = NULL;
184   array_buffers_list_ = Smi::FromInt(0);
185   allocation_sites_list_ = Smi::FromInt(0);
186   mark_compact_collector_.heap_ = this;
187   external_string_table_.heap_ = this;
188   // Put a dummy entry in the remembered pages so we can find the list the
189   // minidump even if there are no real unmapped pages.
190   RememberUnmappedPage(NULL, false);
191
192   ClearObjectStats(true);
193 }
194
195
196 intptr_t Heap::Capacity() {
197   if (!HasBeenSetUp()) return 0;
198
199   return new_space_.Capacity() +
200       old_pointer_space_->Capacity() +
201       old_data_space_->Capacity() +
202       code_space_->Capacity() +
203       map_space_->Capacity() +
204       cell_space_->Capacity() +
205       property_cell_space_->Capacity();
206 }
207
208
209 intptr_t Heap::CommittedMemory() {
210   if (!HasBeenSetUp()) return 0;
211
212   return new_space_.CommittedMemory() +
213       old_pointer_space_->CommittedMemory() +
214       old_data_space_->CommittedMemory() +
215       code_space_->CommittedMemory() +
216       map_space_->CommittedMemory() +
217       cell_space_->CommittedMemory() +
218       property_cell_space_->CommittedMemory() +
219       lo_space_->Size();
220 }
221
222
223 size_t Heap::CommittedPhysicalMemory() {
224   if (!HasBeenSetUp()) return 0;
225
226   return new_space_.CommittedPhysicalMemory() +
227       old_pointer_space_->CommittedPhysicalMemory() +
228       old_data_space_->CommittedPhysicalMemory() +
229       code_space_->CommittedPhysicalMemory() +
230       map_space_->CommittedPhysicalMemory() +
231       cell_space_->CommittedPhysicalMemory() +
232       property_cell_space_->CommittedPhysicalMemory() +
233       lo_space_->CommittedPhysicalMemory();
234 }
235
236
237 intptr_t Heap::CommittedMemoryExecutable() {
238   if (!HasBeenSetUp()) return 0;
239
240   return isolate()->memory_allocator()->SizeExecutable();
241 }
242
243
244 intptr_t Heap::Available() {
245   if (!HasBeenSetUp()) return 0;
246
247   return new_space_.Available() +
248       old_pointer_space_->Available() +
249       old_data_space_->Available() +
250       code_space_->Available() +
251       map_space_->Available() +
252       cell_space_->Available() +
253       property_cell_space_->Available();
254 }
255
256
257 bool Heap::HasBeenSetUp() {
258   return old_pointer_space_ != NULL &&
259          old_data_space_ != NULL &&
260          code_space_ != NULL &&
261          map_space_ != NULL &&
262          cell_space_ != NULL &&
263          property_cell_space_ != NULL &&
264          lo_space_ != NULL;
265 }
266
267
268 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
269   if (IntrusiveMarking::IsMarked(object)) {
270     return IntrusiveMarking::SizeOfMarkedObject(object);
271   }
272   return object->SizeFromMap(object->map());
273 }
274
275
276 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
277                                               const char** reason) {
278   // Is global GC requested?
279   if (space != NEW_SPACE) {
280     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
281     *reason = "GC in old space requested";
282     return MARK_COMPACTOR;
283   }
284
285   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
286     *reason = "GC in old space forced by flags";
287     return MARK_COMPACTOR;
288   }
289
290   // Is enough data promoted to justify a global GC?
291   if (OldGenerationAllocationLimitReached()) {
292     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
293     *reason = "promotion limit reached";
294     return MARK_COMPACTOR;
295   }
296
297   // Have allocation in OLD and LO failed?
298   if (old_gen_exhausted_) {
299     isolate_->counters()->
300         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
301     *reason = "old generations exhausted";
302     return MARK_COMPACTOR;
303   }
304
305   // Is there enough space left in OLD to guarantee that a scavenge can
306   // succeed?
307   //
308   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
309   // for object promotion. It counts only the bytes that the memory
310   // allocator has not yet allocated from the OS and assigned to any space,
311   // and does not count available bytes already in the old space or code
312   // space.  Undercounting is safe---we may get an unrequested full GC when
313   // a scavenge would have succeeded.
314   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
315     isolate_->counters()->
316         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
317     *reason = "scavenge might not succeed";
318     return MARK_COMPACTOR;
319   }
320
321   // Default
322   *reason = NULL;
323   return SCAVENGER;
324 }
325
326
327 // TODO(1238405): Combine the infrastructure for --heap-stats and
328 // --log-gc to avoid the complicated preprocessor and flag testing.
329 void Heap::ReportStatisticsBeforeGC() {
330   // Heap::ReportHeapStatistics will also log NewSpace statistics when
331   // compiled --log-gc is set.  The following logic is used to avoid
332   // double logging.
333 #ifdef DEBUG
334   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
335   if (FLAG_heap_stats) {
336     ReportHeapStatistics("Before GC");
337   } else if (FLAG_log_gc) {
338     new_space_.ReportStatistics();
339   }
340   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
341 #else
342   if (FLAG_log_gc) {
343     new_space_.CollectStatistics();
344     new_space_.ReportStatistics();
345     new_space_.ClearHistograms();
346   }
347 #endif  // DEBUG
348 }
349
350
351 void Heap::PrintShortHeapStatistics() {
352   if (!FLAG_trace_gc_verbose) return;
353   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
354                ", available: %6" V8_PTR_PREFIX "d KB\n",
355            isolate_->memory_allocator()->Size() / KB,
356            isolate_->memory_allocator()->Available() / KB);
357   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
358                ", available: %6" V8_PTR_PREFIX "d KB"
359                ", committed: %6" V8_PTR_PREFIX "d KB\n",
360            new_space_.Size() / KB,
361            new_space_.Available() / KB,
362            new_space_.CommittedMemory() / KB);
363   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
364                ", available: %6" V8_PTR_PREFIX "d KB"
365                ", committed: %6" V8_PTR_PREFIX "d KB\n",
366            old_pointer_space_->SizeOfObjects() / KB,
367            old_pointer_space_->Available() / KB,
368            old_pointer_space_->CommittedMemory() / KB);
369   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
370                ", available: %6" V8_PTR_PREFIX "d KB"
371                ", committed: %6" V8_PTR_PREFIX "d KB\n",
372            old_data_space_->SizeOfObjects() / KB,
373            old_data_space_->Available() / KB,
374            old_data_space_->CommittedMemory() / KB);
375   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
376                ", available: %6" V8_PTR_PREFIX "d KB"
377                ", committed: %6" V8_PTR_PREFIX "d KB\n",
378            code_space_->SizeOfObjects() / KB,
379            code_space_->Available() / KB,
380            code_space_->CommittedMemory() / KB);
381   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
382                ", available: %6" V8_PTR_PREFIX "d KB"
383                ", committed: %6" V8_PTR_PREFIX "d KB\n",
384            map_space_->SizeOfObjects() / KB,
385            map_space_->Available() / KB,
386            map_space_->CommittedMemory() / KB);
387   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
388                ", available: %6" V8_PTR_PREFIX "d KB"
389                ", committed: %6" V8_PTR_PREFIX "d KB\n",
390            cell_space_->SizeOfObjects() / KB,
391            cell_space_->Available() / KB,
392            cell_space_->CommittedMemory() / KB);
393   PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
394                ", available: %6" V8_PTR_PREFIX "d KB"
395                ", committed: %6" V8_PTR_PREFIX "d KB\n",
396            property_cell_space_->SizeOfObjects() / KB,
397            property_cell_space_->Available() / KB,
398            property_cell_space_->CommittedMemory() / KB);
399   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
400                ", available: %6" V8_PTR_PREFIX "d KB"
401                ", committed: %6" V8_PTR_PREFIX "d KB\n",
402            lo_space_->SizeOfObjects() / KB,
403            lo_space_->Available() / KB,
404            lo_space_->CommittedMemory() / KB);
405   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
406                ", available: %6" V8_PTR_PREFIX "d KB"
407                ", committed: %6" V8_PTR_PREFIX "d KB\n",
408            this->SizeOfObjects() / KB,
409            this->Available() / KB,
410            this->CommittedMemory() / KB);
411   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
412            amount_of_external_allocated_memory_ / KB);
413   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
414 }
415
416
417 // TODO(1238405): Combine the infrastructure for --heap-stats and
418 // --log-gc to avoid the complicated preprocessor and flag testing.
419 void Heap::ReportStatisticsAfterGC() {
420   // Similar to the before GC, we use some complicated logic to ensure that
421   // NewSpace statistics are logged exactly once when --log-gc is turned on.
422 #if defined(DEBUG)
423   if (FLAG_heap_stats) {
424     new_space_.CollectStatistics();
425     ReportHeapStatistics("After GC");
426   } else if (FLAG_log_gc) {
427     new_space_.ReportStatistics();
428   }
429 #else
430   if (FLAG_log_gc) new_space_.ReportStatistics();
431 #endif  // DEBUG
432 }
433
434
435 void Heap::GarbageCollectionPrologue() {
436   {  AllowHeapAllocation for_the_first_part_of_prologue;
437     isolate_->transcendental_cache()->Clear();
438     ClearJSFunctionResultCaches();
439     gc_count_++;
440     unflattened_strings_length_ = 0;
441
442     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
443       mark_compact_collector()->EnableCodeFlushing(true);
444     }
445
446 #ifdef VERIFY_HEAP
447     if (FLAG_verify_heap) {
448       Verify();
449     }
450 #endif
451   }
452
453 #ifdef DEBUG
454   ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
455
456   if (FLAG_gc_verbose) Print();
457
458   ReportStatisticsBeforeGC();
459 #endif  // DEBUG
460
461   store_buffer()->GCPrologue();
462 }
463
464
465 intptr_t Heap::SizeOfObjects() {
466   intptr_t total = 0;
467   AllSpaces spaces(this);
468   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
469     total += space->SizeOfObjects();
470   }
471   return total;
472 }
473
474
475 void Heap::RepairFreeListsAfterBoot() {
476   PagedSpaces spaces(this);
477   for (PagedSpace* space = spaces.next();
478        space != NULL;
479        space = spaces.next()) {
480     space->RepairFreeListsAfterBoot();
481   }
482 }
483
484
485 void Heap::GarbageCollectionEpilogue() {
486   store_buffer()->GCEpilogue();
487
488   // In release mode, we only zap the from space under heap verification.
489   if (Heap::ShouldZapGarbage()) {
490     ZapFromSpace();
491   }
492
493 #ifdef VERIFY_HEAP
494   if (FLAG_verify_heap) {
495     Verify();
496   }
497 #endif
498
499   AllowHeapAllocation for_the_rest_of_the_epilogue;
500
501 #ifdef DEBUG
502   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
503   if (FLAG_print_handles) PrintHandles();
504   if (FLAG_gc_verbose) Print();
505   if (FLAG_code_stats) ReportCodeStatistics("After GC");
506 #endif
507   if (FLAG_deopt_every_n_garbage_collections > 0) {
508     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
509       Deoptimizer::DeoptimizeAll(isolate());
510       gcs_since_last_deopt_ = 0;
511     }
512   }
513
514   isolate_->counters()->alive_after_last_gc()->Set(
515       static_cast<int>(SizeOfObjects()));
516
517   isolate_->counters()->string_table_capacity()->Set(
518       string_table()->Capacity());
519   isolate_->counters()->number_of_symbols()->Set(
520       string_table()->NumberOfElements());
521
522   if (CommittedMemory() > 0) {
523     isolate_->counters()->external_fragmentation_total()->AddSample(
524         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
525
526     isolate_->counters()->heap_fraction_map_space()->AddSample(
527         static_cast<int>(
528             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
529     isolate_->counters()->heap_fraction_cell_space()->AddSample(
530         static_cast<int>(
531             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
532     isolate_->counters()->heap_fraction_property_cell_space()->
533         AddSample(static_cast<int>(
534             (property_cell_space()->CommittedMemory() * 100.0) /
535             CommittedMemory()));
536
537     isolate_->counters()->heap_sample_total_committed()->AddSample(
538         static_cast<int>(CommittedMemory() / KB));
539     isolate_->counters()->heap_sample_total_used()->AddSample(
540         static_cast<int>(SizeOfObjects() / KB));
541     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
542         static_cast<int>(map_space()->CommittedMemory() / KB));
543     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
544         static_cast<int>(cell_space()->CommittedMemory() / KB));
545     isolate_->counters()->
546         heap_sample_property_cell_space_committed()->
547             AddSample(static_cast<int>(
548                 property_cell_space()->CommittedMemory() / KB));
549   }
550
551 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
552   isolate_->counters()->space##_bytes_available()->Set(                        \
553       static_cast<int>(space()->Available()));                                 \
554   isolate_->counters()->space##_bytes_committed()->Set(                        \
555       static_cast<int>(space()->CommittedMemory()));                           \
556   isolate_->counters()->space##_bytes_used()->Set(                             \
557       static_cast<int>(space()->SizeOfObjects()));
558 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
559   if (space()->CommittedMemory() > 0) {                                        \
560     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
561         static_cast<int>(100 -                                                 \
562             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
563   }
564 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
565   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
566   UPDATE_FRAGMENTATION_FOR_SPACE(space)
567
568   UPDATE_COUNTERS_FOR_SPACE(new_space)
569   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
570   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
571   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
572   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
573   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
574   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
575   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
576 #undef UPDATE_COUNTERS_FOR_SPACE
577 #undef UPDATE_FRAGMENTATION_FOR_SPACE
578 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
579
580 #if defined(DEBUG)
581   ReportStatisticsAfterGC();
582 #endif  // DEBUG
583 #ifdef ENABLE_DEBUGGER_SUPPORT
584   isolate_->debug()->AfterGarbageCollection();
585 #endif  // ENABLE_DEBUGGER_SUPPORT
586 }
587
588
589 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
590   // Since we are ignoring the return value, the exact choice of space does
591   // not matter, so long as we do not specify NEW_SPACE, which would not
592   // cause a full GC.
593   mark_compact_collector_.SetFlags(flags);
594   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
595   mark_compact_collector_.SetFlags(kNoGCFlags);
596 }
597
598
599 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
600   // Since we are ignoring the return value, the exact choice of space does
601   // not matter, so long as we do not specify NEW_SPACE, which would not
602   // cause a full GC.
603   // Major GC would invoke weak handle callbacks on weakly reachable
604   // handles, but won't collect weakly reachable objects until next
605   // major GC.  Therefore if we collect aggressively and weak handle callback
606   // has been invoked, we rerun major GC to release objects which become
607   // garbage.
608   // Note: as weak callbacks can execute arbitrary code, we cannot
609   // hope that eventually there will be no weak callbacks invocations.
610   // Therefore stop recollecting after several attempts.
611   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
612                                      kReduceMemoryFootprintMask);
613   isolate_->compilation_cache()->Clear();
614   const int kMaxNumberOfAttempts = 7;
615   const int kMinNumberOfAttempts = 2;
616   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
617     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
618         attempt + 1 >= kMinNumberOfAttempts) {
619       break;
620     }
621   }
622   mark_compact_collector()->SetFlags(kNoGCFlags);
623   new_space_.Shrink();
624   UncommitFromSpace();
625   incremental_marking()->UncommitMarkingDeque();
626 }
627
628
629 bool Heap::CollectGarbage(AllocationSpace space,
630                           GarbageCollector collector,
631                           const char* gc_reason,
632                           const char* collector_reason) {
633   // The VM is in the GC state until exiting this function.
634   VMState<GC> state(isolate_);
635
636 #ifdef DEBUG
637   // Reset the allocation timeout to the GC interval, but make sure to
638   // allow at least a few allocations after a collection. The reason
639   // for this is that we have a lot of allocation sequences and we
640   // assume that a garbage collection will allow the subsequent
641   // allocation attempts to go through.
642   allocation_timeout_ = Max(6, FLAG_gc_interval);
643 #endif
644
645   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
646     if (FLAG_trace_incremental_marking) {
647       PrintF("[IncrementalMarking] Scavenge during marking.\n");
648     }
649   }
650
651   if (collector == MARK_COMPACTOR &&
652       !mark_compact_collector()->abort_incremental_marking() &&
653       !incremental_marking()->IsStopped() &&
654       !incremental_marking()->should_hurry() &&
655       FLAG_incremental_marking_steps) {
656     // Make progress in incremental marking.
657     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
658     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
659                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
660     if (!incremental_marking()->IsComplete()) {
661       if (FLAG_trace_incremental_marking) {
662         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
663       }
664       collector = SCAVENGER;
665       collector_reason = "incremental marking delaying mark-sweep";
666     }
667   }
668
669   bool next_gc_likely_to_collect_more = false;
670
671   { GCTracer tracer(this, gc_reason, collector_reason);
672     ASSERT(AllowHeapAllocation::IsAllowed());
673     DisallowHeapAllocation no_allocation_during_gc;
674     GarbageCollectionPrologue();
675     // The GC count was incremented in the prologue.  Tell the tracer about
676     // it.
677     tracer.set_gc_count(gc_count_);
678
679     // Tell the tracer which collector we've selected.
680     tracer.set_collector(collector);
681
682     {
683       HistogramTimerScope histogram_timer_scope(
684           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
685                                    : isolate_->counters()->gc_compactor());
686       next_gc_likely_to_collect_more =
687           PerformGarbageCollection(collector, &tracer);
688     }
689
690     GarbageCollectionEpilogue();
691   }
692
693   // Start incremental marking for the next cycle. The heap snapshot
694   // generator needs incremental marking to stay off after it aborted.
695   if (!mark_compact_collector()->abort_incremental_marking() &&
696       incremental_marking()->IsStopped() &&
697       incremental_marking()->WorthActivating() &&
698       NextGCIsLikelyToBeFull()) {
699     incremental_marking()->Start();
700   }
701
702   return next_gc_likely_to_collect_more;
703 }
704
705
706 void Heap::PerformScavenge() {
707   GCTracer tracer(this, NULL, NULL);
708   if (incremental_marking()->IsStopped()) {
709     PerformGarbageCollection(SCAVENGER, &tracer);
710   } else {
711     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
712   }
713 }
714
715
716 void Heap::MoveElements(FixedArray* array,
717                         int dst_index,
718                         int src_index,
719                         int len) {
720   if (len == 0) return;
721
722   ASSERT(array->map() != HEAP->fixed_cow_array_map());
723   Object** dst_objects = array->data_start() + dst_index;
724   OS::MemMove(dst_objects,
725               array->data_start() + src_index,
726               len * kPointerSize);
727   if (!InNewSpace(array)) {
728     for (int i = 0; i < len; i++) {
729       // TODO(hpayer): check store buffer for entries
730       if (InNewSpace(dst_objects[i])) {
731         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
732       }
733     }
734   }
735   incremental_marking()->RecordWrites(array);
736 }
737
738
739 #ifdef VERIFY_HEAP
740 // Helper class for verifying the string table.
741 class StringTableVerifier : public ObjectVisitor {
742  public:
743   void VisitPointers(Object** start, Object** end) {
744     // Visit all HeapObject pointers in [start, end).
745     for (Object** p = start; p < end; p++) {
746       if ((*p)->IsHeapObject()) {
747         // Check that the string is actually internalized.
748         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
749               (*p)->IsInternalizedString());
750       }
751     }
752   }
753 };
754
755
756 static void VerifyStringTable() {
757   StringTableVerifier verifier;
758   HEAP->string_table()->IterateElements(&verifier);
759 }
760 #endif  // VERIFY_HEAP
761
762
763 static bool AbortIncrementalMarkingAndCollectGarbage(
764     Heap* heap,
765     AllocationSpace space,
766     const char* gc_reason = NULL) {
767   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
768   bool result = heap->CollectGarbage(space, gc_reason);
769   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
770   return result;
771 }
772
773
774 void Heap::ReserveSpace(
775     int *sizes,
776     Address *locations_out) {
777   bool gc_performed = true;
778   int counter = 0;
779   static const int kThreshold = 20;
780   while (gc_performed && counter++ < kThreshold) {
781     gc_performed = false;
782     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
783     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
784       if (sizes[space] != 0) {
785         MaybeObject* allocation;
786         if (space == NEW_SPACE) {
787           allocation = new_space()->AllocateRaw(sizes[space]);
788         } else {
789           allocation = paged_space(space)->AllocateRaw(sizes[space]);
790         }
791         FreeListNode* node;
792         if (!allocation->To<FreeListNode>(&node)) {
793           if (space == NEW_SPACE) {
794             Heap::CollectGarbage(NEW_SPACE,
795                                  "failed to reserve space in the new space");
796           } else {
797             AbortIncrementalMarkingAndCollectGarbage(
798                 this,
799                 static_cast<AllocationSpace>(space),
800                 "failed to reserve space in paged space");
801           }
802           gc_performed = true;
803           break;
804         } else {
805           // Mark with a free list node, in case we have a GC before
806           // deserializing.
807           node->set_size(this, sizes[space]);
808           locations_out[space] = node->address();
809         }
810       }
811     }
812   }
813
814   if (gc_performed) {
815     // Failed to reserve the space after several attempts.
816     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
817   }
818 }
819
820
821 void Heap::EnsureFromSpaceIsCommitted() {
822   if (new_space_.CommitFromSpaceIfNeeded()) return;
823
824   // Committing memory to from space failed.
825   // Memory is exhausted and we will die.
826   V8::FatalProcessOutOfMemory("Committing semi space failed.");
827 }
828
829
830 void Heap::ClearJSFunctionResultCaches() {
831   if (isolate_->bootstrapper()->IsActive()) return;
832
833   Object* context = native_contexts_list_;
834   while (!context->IsUndefined()) {
835     // Get the caches for this context. GC can happen when the context
836     // is not fully initialized, so the caches can be undefined.
837     Object* caches_or_undefined =
838         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
839     if (!caches_or_undefined->IsUndefined()) {
840       FixedArray* caches = FixedArray::cast(caches_or_undefined);
841       // Clear the caches:
842       int length = caches->length();
843       for (int i = 0; i < length; i++) {
844         JSFunctionResultCache::cast(caches->get(i))->Clear();
845       }
846     }
847     // Get the next context:
848     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
849   }
850 }
851
852
853 void Heap::ClearNormalizedMapCaches() {
854   if (isolate_->bootstrapper()->IsActive() &&
855       !incremental_marking()->IsMarking()) {
856     return;
857   }
858
859   Object* context = native_contexts_list_;
860   while (!context->IsUndefined()) {
861     // GC can happen when the context is not fully initialized,
862     // so the cache can be undefined.
863     Object* cache =
864         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
865     if (!cache->IsUndefined()) {
866       NormalizedMapCache::cast(cache)->Clear();
867     }
868     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
869   }
870 }
871
872
873 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
874   double survival_rate =
875       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
876       start_new_space_size;
877
878   if (survival_rate > kYoungSurvivalRateHighThreshold) {
879     high_survival_rate_period_length_++;
880   } else {
881     high_survival_rate_period_length_ = 0;
882   }
883
884   if (survival_rate < kYoungSurvivalRateLowThreshold) {
885     low_survival_rate_period_length_++;
886   } else {
887     low_survival_rate_period_length_ = 0;
888   }
889
890   double survival_rate_diff = survival_rate_ - survival_rate;
891
892   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
893     set_survival_rate_trend(DECREASING);
894   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
895     set_survival_rate_trend(INCREASING);
896   } else {
897     set_survival_rate_trend(STABLE);
898   }
899
900   survival_rate_ = survival_rate;
901 }
902
903 bool Heap::PerformGarbageCollection(GarbageCollector collector,
904                                     GCTracer* tracer) {
905   bool next_gc_likely_to_collect_more = false;
906
907   if (collector != SCAVENGER) {
908     PROFILE(isolate_, CodeMovingGCEvent());
909   }
910
911 #ifdef VERIFY_HEAP
912   if (FLAG_verify_heap) {
913     VerifyStringTable();
914   }
915 #endif
916
917   GCType gc_type =
918       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
919
920   {
921     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
922     VMState<EXTERNAL> state(isolate_);
923     HandleScope handle_scope(isolate_);
924     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
925   }
926
927   EnsureFromSpaceIsCommitted();
928
929   int start_new_space_size = Heap::new_space()->SizeAsInt();
930
931   if (IsHighSurvivalRate()) {
932     // We speed up the incremental marker if it is running so that it
933     // does not fall behind the rate of promotion, which would cause a
934     // constantly growing old space.
935     incremental_marking()->NotifyOfHighPromotionRate();
936   }
937
938   if (collector == MARK_COMPACTOR) {
939     // Perform mark-sweep with optional compaction.
940     MarkCompact(tracer);
941     sweep_generation_++;
942
943     UpdateSurvivalRateTrend(start_new_space_size);
944
945     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
946
947     old_generation_allocation_limit_ =
948         OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
949
950     old_gen_exhausted_ = false;
951   } else {
952     tracer_ = tracer;
953     Scavenge();
954     tracer_ = NULL;
955
956     UpdateSurvivalRateTrend(start_new_space_size);
957   }
958
959   if (!new_space_high_promotion_mode_active_ &&
960       new_space_.Capacity() == new_space_.MaximumCapacity() &&
961       IsStableOrIncreasingSurvivalTrend() &&
962       IsHighSurvivalRate()) {
963     // Stable high survival rates even though young generation is at
964     // maximum capacity indicates that most objects will be promoted.
965     // To decrease scavenger pauses and final mark-sweep pauses, we
966     // have to limit maximal capacity of the young generation.
967     SetNewSpaceHighPromotionModeActive(true);
968     if (FLAG_trace_gc) {
969       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
970                new_space_.InitialCapacity() / MB);
971     }
972     // Support for global pre-tenuring uses the high promotion mode as a
973     // heuristic indicator of whether to pretenure or not, we trigger
974     // deoptimization here to take advantage of pre-tenuring as soon as
975     // possible.
976     if (FLAG_pretenuring) {
977       isolate_->stack_guard()->FullDeopt();
978     }
979   } else if (new_space_high_promotion_mode_active_ &&
980       IsStableOrDecreasingSurvivalTrend() &&
981       IsLowSurvivalRate()) {
982     // Decreasing low survival rates might indicate that the above high
983     // promotion mode is over and we should allow the young generation
984     // to grow again.
985     SetNewSpaceHighPromotionModeActive(false);
986     if (FLAG_trace_gc) {
987       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
988                new_space_.MaximumCapacity() / MB);
989     }
990     // Trigger deoptimization here to turn off pre-tenuring as soon as
991     // possible.
992     if (FLAG_pretenuring) {
993       isolate_->stack_guard()->FullDeopt();
994     }
995   }
996
997   if (new_space_high_promotion_mode_active_ &&
998       new_space_.Capacity() > new_space_.InitialCapacity()) {
999     new_space_.Shrink();
1000   }
1001
1002   isolate_->counters()->objs_since_last_young()->Set(0);
1003
1004   // Callbacks that fire after this point might trigger nested GCs and
1005   // restart incremental marking, the assertion can't be moved down.
1006   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1007
1008   gc_post_processing_depth_++;
1009   { AllowHeapAllocation allow_allocation;
1010     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1011     next_gc_likely_to_collect_more =
1012         isolate_->global_handles()->PostGarbageCollectionProcessing(
1013             collector, tracer);
1014   }
1015   gc_post_processing_depth_--;
1016
1017   isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1018
1019   // Update relocatables.
1020   Relocatable::PostGarbageCollectionProcessing();
1021
1022   if (collector == MARK_COMPACTOR) {
1023     // Register the amount of external allocated memory.
1024     amount_of_external_allocated_memory_at_last_global_gc_ =
1025         amount_of_external_allocated_memory_;
1026   }
1027
1028   {
1029     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1030     VMState<EXTERNAL> state(isolate_);
1031     HandleScope handle_scope(isolate_);
1032     CallGCEpilogueCallbacks(gc_type);
1033   }
1034
1035 #ifdef VERIFY_HEAP
1036   if (FLAG_verify_heap) {
1037     VerifyStringTable();
1038   }
1039 #endif
1040
1041   return next_gc_likely_to_collect_more;
1042 }
1043
1044
1045 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1046   if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1047     global_gc_prologue_callback_();
1048   }
1049   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1050     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1051       gc_prologue_callbacks_[i].callback(gc_type, flags);
1052     }
1053   }
1054 }
1055
1056
1057 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1058   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1059     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1060       gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1061     }
1062   }
1063   if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1064     global_gc_epilogue_callback_();
1065   }
1066 }
1067
1068
1069 void Heap::MarkCompact(GCTracer* tracer) {
1070   gc_state_ = MARK_COMPACT;
1071   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1072
1073   mark_compact_collector_.Prepare(tracer);
1074
1075   ms_count_++;
1076   tracer->set_full_gc_count(ms_count_);
1077
1078   MarkCompactPrologue();
1079
1080   mark_compact_collector_.CollectGarbage();
1081
1082   LOG(isolate_, ResourceEvent("markcompact", "end"));
1083
1084   gc_state_ = NOT_IN_GC;
1085
1086   isolate_->counters()->objs_since_last_full()->Set(0);
1087
1088   contexts_disposed_ = 0;
1089
1090   flush_monomorphic_ics_ = false;
1091 }
1092
1093
1094 void Heap::MarkCompactPrologue() {
1095   // At any old GC clear the keyed lookup cache to enable collection of unused
1096   // maps.
1097   isolate_->keyed_lookup_cache()->Clear();
1098   isolate_->context_slot_cache()->Clear();
1099   isolate_->descriptor_lookup_cache()->Clear();
1100   RegExpResultsCache::Clear(string_split_cache());
1101   RegExpResultsCache::Clear(regexp_multiple_cache());
1102
1103   isolate_->compilation_cache()->MarkCompactPrologue();
1104
1105   CompletelyClearInstanceofCache();
1106
1107   FlushNumberStringCache();
1108   if (FLAG_cleanup_code_caches_at_gc) {
1109     polymorphic_code_cache()->set_cache(undefined_value());
1110   }
1111
1112   ClearNormalizedMapCaches();
1113 }
1114
1115
1116 // Helper class for copying HeapObjects
1117 class ScavengeVisitor: public ObjectVisitor {
1118  public:
1119   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1120
1121   void VisitPointer(Object** p) { ScavengePointer(p); }
1122
1123   void VisitPointers(Object** start, Object** end) {
1124     // Copy all HeapObject pointers in [start, end)
1125     for (Object** p = start; p < end; p++) ScavengePointer(p);
1126   }
1127
1128  private:
1129   void ScavengePointer(Object** p) {
1130     Object* object = *p;
1131     if (!heap_->InNewSpace(object)) return;
1132     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1133                          reinterpret_cast<HeapObject*>(object));
1134   }
1135
1136   Heap* heap_;
1137 };
1138
1139
1140 #ifdef VERIFY_HEAP
1141 // Visitor class to verify pointers in code or data space do not point into
1142 // new space.
1143 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1144  public:
1145   void VisitPointers(Object** start, Object**end) {
1146     for (Object** current = start; current < end; current++) {
1147       if ((*current)->IsHeapObject()) {
1148         CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1149       }
1150     }
1151   }
1152 };
1153
1154
1155 static void VerifyNonPointerSpacePointers() {
1156   // Verify that there are no pointers to new space in spaces where we
1157   // do not expect them.
1158   VerifyNonPointerSpacePointersVisitor v;
1159   HeapObjectIterator code_it(HEAP->code_space());
1160   for (HeapObject* object = code_it.Next();
1161        object != NULL; object = code_it.Next())
1162     object->Iterate(&v);
1163
1164   // The old data space was normally swept conservatively so that the iterator
1165   // doesn't work, so we normally skip the next bit.
1166   if (!HEAP->old_data_space()->was_swept_conservatively()) {
1167     HeapObjectIterator data_it(HEAP->old_data_space());
1168     for (HeapObject* object = data_it.Next();
1169          object != NULL; object = data_it.Next())
1170       object->Iterate(&v);
1171   }
1172 }
1173 #endif  // VERIFY_HEAP
1174
1175
1176 void Heap::CheckNewSpaceExpansionCriteria() {
1177   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1178       survived_since_last_expansion_ > new_space_.Capacity() &&
1179       !new_space_high_promotion_mode_active_) {
1180     // Grow the size of new space if there is room to grow, enough data
1181     // has survived scavenge since the last expansion and we are not in
1182     // high promotion mode.
1183     new_space_.Grow();
1184     survived_since_last_expansion_ = 0;
1185   }
1186 }
1187
1188
1189 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1190   return heap->InNewSpace(*p) &&
1191       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1192 }
1193
1194
1195 void Heap::ScavengeStoreBufferCallback(
1196     Heap* heap,
1197     MemoryChunk* page,
1198     StoreBufferEvent event) {
1199   heap->store_buffer_rebuilder_.Callback(page, event);
1200 }
1201
1202
1203 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1204   if (event == kStoreBufferStartScanningPagesEvent) {
1205     start_of_current_page_ = NULL;
1206     current_page_ = NULL;
1207   } else if (event == kStoreBufferScanningPageEvent) {
1208     if (current_page_ != NULL) {
1209       // If this page already overflowed the store buffer during this iteration.
1210       if (current_page_->scan_on_scavenge()) {
1211         // Then we should wipe out the entries that have been added for it.
1212         store_buffer_->SetTop(start_of_current_page_);
1213       } else if (store_buffer_->Top() - start_of_current_page_ >=
1214                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1215         // Did we find too many pointers in the previous page?  The heuristic is
1216         // that no page can take more then 1/5 the remaining slots in the store
1217         // buffer.
1218         current_page_->set_scan_on_scavenge(true);
1219         store_buffer_->SetTop(start_of_current_page_);
1220       } else {
1221         // In this case the page we scanned took a reasonable number of slots in
1222         // the store buffer.  It has now been rehabilitated and is no longer
1223         // marked scan_on_scavenge.
1224         ASSERT(!current_page_->scan_on_scavenge());
1225       }
1226     }
1227     start_of_current_page_ = store_buffer_->Top();
1228     current_page_ = page;
1229   } else if (event == kStoreBufferFullEvent) {
1230     // The current page overflowed the store buffer again.  Wipe out its entries
1231     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1232     // several times while scanning.
1233     if (current_page_ == NULL) {
1234       // Store Buffer overflowed while scanning promoted objects.  These are not
1235       // in any particular page, though they are likely to be clustered by the
1236       // allocation routines.
1237       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1238     } else {
1239       // Store Buffer overflowed while scanning a particular old space page for
1240       // pointers to new space.
1241       ASSERT(current_page_ == page);
1242       ASSERT(page != NULL);
1243       current_page_->set_scan_on_scavenge(true);
1244       ASSERT(start_of_current_page_ != store_buffer_->Top());
1245       store_buffer_->SetTop(start_of_current_page_);
1246     }
1247   } else {
1248     UNREACHABLE();
1249   }
1250 }
1251
1252
1253 void PromotionQueue::Initialize() {
1254   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1255   // entries (where each is a pair of intptr_t). This allows us to simplify
1256   // the test fpr when to switch pages.
1257   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1258          == 0);
1259   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1260   front_ = rear_ =
1261       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1262   emergency_stack_ = NULL;
1263   guard_ = false;
1264 }
1265
1266
1267 void PromotionQueue::RelocateQueueHead() {
1268   ASSERT(emergency_stack_ == NULL);
1269
1270   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1271   intptr_t* head_start = rear_;
1272   intptr_t* head_end =
1273       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1274
1275   int entries_count =
1276       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1277
1278   emergency_stack_ = new List<Entry>(2 * entries_count);
1279
1280   while (head_start != head_end) {
1281     int size = static_cast<int>(*(head_start++));
1282     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1283     emergency_stack_->Add(Entry(obj, size));
1284   }
1285   rear_ = head_end;
1286 }
1287
1288
1289 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1290  public:
1291   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1292
1293   virtual Object* RetainAs(Object* object) {
1294     if (!heap_->InFromSpace(object)) {
1295       return object;
1296     }
1297
1298     MapWord map_word = HeapObject::cast(object)->map_word();
1299     if (map_word.IsForwardingAddress()) {
1300       return map_word.ToForwardingAddress();
1301     }
1302     return NULL;
1303   }
1304
1305  private:
1306   Heap* heap_;
1307 };
1308
1309
1310 void Heap::Scavenge() {
1311   RelocationLock relocation_lock(this);
1312
1313 #ifdef VERIFY_HEAP
1314   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1315 #endif
1316
1317   gc_state_ = SCAVENGE;
1318
1319   // Implements Cheney's copying algorithm
1320   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1321
1322   // Clear descriptor cache.
1323   isolate_->descriptor_lookup_cache()->Clear();
1324
1325   // Used for updating survived_since_last_expansion_ at function end.
1326   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1327
1328   CheckNewSpaceExpansionCriteria();
1329
1330   SelectScavengingVisitorsTable();
1331
1332   incremental_marking()->PrepareForScavenge();
1333
1334   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1335   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1336
1337   // Flip the semispaces.  After flipping, to space is empty, from space has
1338   // live objects.
1339   new_space_.Flip();
1340   new_space_.ResetAllocationInfo();
1341
1342   // We need to sweep newly copied objects which can be either in the
1343   // to space or promoted to the old generation.  For to-space
1344   // objects, we treat the bottom of the to space as a queue.  Newly
1345   // copied and unswept objects lie between a 'front' mark and the
1346   // allocation pointer.
1347   //
1348   // Promoted objects can go into various old-generation spaces, and
1349   // can be allocated internally in the spaces (from the free list).
1350   // We treat the top of the to space as a queue of addresses of
1351   // promoted objects.  The addresses of newly promoted and unswept
1352   // objects lie between a 'front' mark and a 'rear' mark that is
1353   // updated as a side effect of promoting an object.
1354   //
1355   // There is guaranteed to be enough room at the top of the to space
1356   // for the addresses of promoted objects: every object promoted
1357   // frees up its size in bytes from the top of the new space, and
1358   // objects are at least one pointer in size.
1359   Address new_space_front = new_space_.ToSpaceStart();
1360   promotion_queue_.Initialize();
1361
1362 #ifdef DEBUG
1363   store_buffer()->Clean();
1364 #endif
1365
1366   ScavengeVisitor scavenge_visitor(this);
1367   // Copy roots.
1368   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1369
1370   // Copy objects reachable from the old generation.
1371   {
1372     StoreBufferRebuildScope scope(this,
1373                                   store_buffer(),
1374                                   &ScavengeStoreBufferCallback);
1375     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1376   }
1377
1378   // Copy objects reachable from simple cells by scavenging cell values
1379   // directly.
1380   HeapObjectIterator cell_iterator(cell_space_);
1381   for (HeapObject* heap_object = cell_iterator.Next();
1382        heap_object != NULL;
1383        heap_object = cell_iterator.Next()) {
1384     if (heap_object->IsCell()) {
1385       Cell* cell = Cell::cast(heap_object);
1386       Address value_address = cell->ValueAddress();
1387       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1388     }
1389   }
1390
1391   // Copy objects reachable from global property cells by scavenging global
1392   // property cell values directly.
1393   HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1394   for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1395        heap_object != NULL;
1396        heap_object = js_global_property_cell_iterator.Next()) {
1397     if (heap_object->IsPropertyCell()) {
1398       PropertyCell* cell = PropertyCell::cast(heap_object);
1399       Address value_address = cell->ValueAddress();
1400       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1401       Address type_address = cell->TypeAddress();
1402       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1403     }
1404   }
1405
1406   // Copy objects reachable from the code flushing candidates list.
1407   MarkCompactCollector* collector = mark_compact_collector();
1408   if (collector->is_code_flushing_enabled()) {
1409     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1410   }
1411
1412   // Scavenge object reachable from the native contexts list directly.
1413   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1414
1415   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1416
1417   while (isolate()->global_handles()->IterateObjectGroups(
1418       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1419     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1420   }
1421   isolate()->global_handles()->RemoveObjectGroups();
1422   isolate()->global_handles()->RemoveImplicitRefGroups();
1423
1424   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1425       &IsUnscavengedHeapObject);
1426   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1427       &scavenge_visitor);
1428   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1429
1430   UpdateNewSpaceReferencesInExternalStringTable(
1431       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1432
1433   promotion_queue_.Destroy();
1434
1435   if (!FLAG_watch_ic_patching) {
1436     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1437   }
1438   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1439
1440   ScavengeWeakObjectRetainer weak_object_retainer(this);
1441   ProcessWeakReferences(&weak_object_retainer);
1442
1443   ASSERT(new_space_front == new_space_.top());
1444
1445   // Set age mark.
1446   new_space_.set_age_mark(new_space_.top());
1447
1448   new_space_.LowerInlineAllocationLimit(
1449       new_space_.inline_allocation_limit_step());
1450
1451   // Update how much has survived scavenge.
1452   IncrementYoungSurvivorsCounter(static_cast<int>(
1453       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1454
1455   LOG(isolate_, ResourceEvent("scavenge", "end"));
1456
1457   gc_state_ = NOT_IN_GC;
1458
1459   scavenges_since_last_idle_round_++;
1460 }
1461
1462
1463 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1464                                                                 Object** p) {
1465   MapWord first_word = HeapObject::cast(*p)->map_word();
1466
1467   if (!first_word.IsForwardingAddress()) {
1468     // Unreachable external string can be finalized.
1469     heap->FinalizeExternalString(String::cast(*p));
1470     return NULL;
1471   }
1472
1473   // String is still reachable.
1474   return String::cast(first_word.ToForwardingAddress());
1475 }
1476
1477
1478 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1479     ExternalStringTableUpdaterCallback updater_func) {
1480 #ifdef VERIFY_HEAP
1481   if (FLAG_verify_heap) {
1482     external_string_table_.Verify();
1483   }
1484 #endif
1485
1486   if (external_string_table_.new_space_strings_.is_empty()) return;
1487
1488   Object** start = &external_string_table_.new_space_strings_[0];
1489   Object** end = start + external_string_table_.new_space_strings_.length();
1490   Object** last = start;
1491
1492   for (Object** p = start; p < end; ++p) {
1493     ASSERT(InFromSpace(*p));
1494     String* target = updater_func(this, p);
1495
1496     if (target == NULL) continue;
1497
1498     ASSERT(target->IsExternalString());
1499
1500     if (InNewSpace(target)) {
1501       // String is still in new space.  Update the table entry.
1502       *last = target;
1503       ++last;
1504     } else {
1505       // String got promoted.  Move it to the old string list.
1506       external_string_table_.AddOldString(target);
1507     }
1508   }
1509
1510   ASSERT(last <= end);
1511   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1512 }
1513
1514
1515 void Heap::UpdateReferencesInExternalStringTable(
1516     ExternalStringTableUpdaterCallback updater_func) {
1517
1518   // Update old space string references.
1519   if (external_string_table_.old_space_strings_.length() > 0) {
1520     Object** start = &external_string_table_.old_space_strings_[0];
1521     Object** end = start + external_string_table_.old_space_strings_.length();
1522     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1523   }
1524
1525   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1526 }
1527
1528
1529 template <class T>
1530 struct WeakListVisitor;
1531
1532
1533 template <class T>
1534 static Object* VisitWeakList(Heap* heap,
1535                              Object* list,
1536                              WeakObjectRetainer* retainer,
1537                              bool record_slots) {
1538   Object* undefined = heap->undefined_value();
1539   Object* head = undefined;
1540   T* tail = NULL;
1541   MarkCompactCollector* collector = heap->mark_compact_collector();
1542   while (list != undefined) {
1543     // Check whether to keep the candidate in the list.
1544     T* candidate = reinterpret_cast<T*>(list);
1545     Object* retained = retainer->RetainAs(list);
1546     if (retained != NULL) {
1547       if (head == undefined) {
1548         // First element in the list.
1549         head = retained;
1550       } else {
1551         // Subsequent elements in the list.
1552         ASSERT(tail != NULL);
1553         WeakListVisitor<T>::SetWeakNext(tail, retained);
1554         if (record_slots) {
1555           Object** next_slot =
1556             HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1557           collector->RecordSlot(next_slot, next_slot, retained);
1558         }
1559       }
1560       // Retained object is new tail.
1561       ASSERT(!retained->IsUndefined());
1562       candidate = reinterpret_cast<T*>(retained);
1563       tail = candidate;
1564
1565
1566       // tail is a live object, visit it.
1567       WeakListVisitor<T>::VisitLiveObject(
1568           heap, tail, retainer, record_slots);
1569     } else {
1570       WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1571     }
1572
1573     // Move to next element in the list.
1574     list = WeakListVisitor<T>::WeakNext(candidate);
1575   }
1576
1577   // Terminate the list if there is one or more elements.
1578   if (tail != NULL) {
1579     WeakListVisitor<T>::SetWeakNext(tail, undefined);
1580   }
1581   return head;
1582 }
1583
1584
1585 template<>
1586 struct WeakListVisitor<JSFunction> {
1587   static void SetWeakNext(JSFunction* function, Object* next) {
1588     function->set_next_function_link(next);
1589   }
1590
1591   static Object* WeakNext(JSFunction* function) {
1592     return function->next_function_link();
1593   }
1594
1595   static int WeakNextOffset() {
1596     return JSFunction::kNextFunctionLinkOffset;
1597   }
1598
1599   static void VisitLiveObject(Heap*, JSFunction*,
1600                               WeakObjectRetainer*, bool) {
1601   }
1602
1603   static void VisitPhantomObject(Heap*, JSFunction*) {
1604   }
1605 };
1606
1607
1608 template<>
1609 struct WeakListVisitor<Context> {
1610   static void SetWeakNext(Context* context, Object* next) {
1611     context->set(Context::NEXT_CONTEXT_LINK,
1612                  next,
1613                  UPDATE_WRITE_BARRIER);
1614   }
1615
1616   static Object* WeakNext(Context* context) {
1617     return context->get(Context::NEXT_CONTEXT_LINK);
1618   }
1619
1620   static void VisitLiveObject(Heap* heap,
1621                               Context* context,
1622                               WeakObjectRetainer* retainer,
1623                               bool record_slots) {
1624     // Process the weak list of optimized functions for the context.
1625     Object* function_list_head =
1626         VisitWeakList<JSFunction>(
1627             heap,
1628             context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1629             retainer,
1630             record_slots);
1631     context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
1632                  function_list_head,
1633                  UPDATE_WRITE_BARRIER);
1634     if (record_slots) {
1635       Object** optimized_functions =
1636           HeapObject::RawField(
1637               context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1638       heap->mark_compact_collector()->RecordSlot(
1639           optimized_functions, optimized_functions, function_list_head);
1640     }
1641   }
1642
1643   static void VisitPhantomObject(Heap*, Context*) {
1644   }
1645
1646   static int WeakNextOffset() {
1647     return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1648   }
1649 };
1650
1651
1652 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1653   // We don't record weak slots during marking or scavenges.
1654   // Instead we do it once when we complete mark-compact cycle.
1655   // Note that write barrier has no effect if we are already in the middle of
1656   // compacting mark-sweep cycle and we have to record slots manually.
1657   bool record_slots =
1658       gc_state() == MARK_COMPACT &&
1659       mark_compact_collector()->is_compacting();
1660   ProcessArrayBuffers(retainer, record_slots);
1661   ProcessNativeContexts(retainer, record_slots);
1662   ProcessAllocationSites(retainer, record_slots);
1663 }
1664
1665 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1666                                  bool record_slots) {
1667   Object* head =
1668       VisitWeakList<Context>(
1669           this, native_contexts_list(), retainer, record_slots);
1670   // Update the head of the list of contexts.
1671   native_contexts_list_ = head;
1672 }
1673
1674
1675 template<>
1676 struct WeakListVisitor<JSArrayBufferView> {
1677   static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1678     obj->set_weak_next(next);
1679   }
1680
1681   static Object* WeakNext(JSArrayBufferView* obj) {
1682     return obj->weak_next();
1683   }
1684
1685   static void VisitLiveObject(Heap*,
1686                               JSArrayBufferView* obj,
1687                               WeakObjectRetainer* retainer,
1688                               bool record_slots) {}
1689
1690   static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1691
1692   static int WeakNextOffset() {
1693     return JSArrayBufferView::kWeakNextOffset;
1694   }
1695 };
1696
1697
1698 template<>
1699 struct WeakListVisitor<JSArrayBuffer> {
1700   static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1701     obj->set_weak_next(next);
1702   }
1703
1704   static Object* WeakNext(JSArrayBuffer* obj) {
1705     return obj->weak_next();
1706   }
1707
1708   static void VisitLiveObject(Heap* heap,
1709                               JSArrayBuffer* array_buffer,
1710                               WeakObjectRetainer* retainer,
1711                               bool record_slots) {
1712     Object* typed_array_obj =
1713         VisitWeakList<JSArrayBufferView>(
1714             heap,
1715             array_buffer->weak_first_view(),
1716             retainer, record_slots);
1717     array_buffer->set_weak_first_view(typed_array_obj);
1718     if (typed_array_obj != heap->undefined_value() && record_slots) {
1719       Object** slot = HeapObject::RawField(
1720           array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1721       heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1722     }
1723   }
1724
1725   static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1726     Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1727   }
1728
1729   static int WeakNextOffset() {
1730     return JSArrayBuffer::kWeakNextOffset;
1731   }
1732 };
1733
1734
1735 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1736                                bool record_slots) {
1737   Object* array_buffer_obj =
1738       VisitWeakList<JSArrayBuffer>(this,
1739                                    array_buffers_list(),
1740                                    retainer, record_slots);
1741   set_array_buffers_list(array_buffer_obj);
1742 }
1743
1744
1745 void Heap::TearDownArrayBuffers() {
1746   Object* undefined = undefined_value();
1747   for (Object* o = array_buffers_list(); o != undefined;) {
1748     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1749     Runtime::FreeArrayBuffer(isolate(), buffer);
1750     o = buffer->weak_next();
1751   }
1752   array_buffers_list_ = undefined;
1753 }
1754
1755
1756 template<>
1757 struct WeakListVisitor<AllocationSite> {
1758   static void SetWeakNext(AllocationSite* obj, Object* next) {
1759     obj->set_weak_next(next);
1760   }
1761
1762   static Object* WeakNext(AllocationSite* obj) {
1763     return obj->weak_next();
1764   }
1765
1766   static void VisitLiveObject(Heap* heap,
1767                               AllocationSite* array_buffer,
1768                               WeakObjectRetainer* retainer,
1769                               bool record_slots) {}
1770
1771   static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1772
1773   static int WeakNextOffset() {
1774     return AllocationSite::kWeakNextOffset;
1775   }
1776 };
1777
1778
1779 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1780                                   bool record_slots) {
1781   Object* allocation_site_obj =
1782       VisitWeakList<AllocationSite>(this,
1783                                     allocation_sites_list(),
1784                                     retainer, record_slots);
1785   set_allocation_sites_list(allocation_site_obj);
1786 }
1787
1788
1789 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1790   DisallowHeapAllocation no_allocation;
1791
1792   // Both the external string table and the string table may contain
1793   // external strings, but neither lists them exhaustively, nor is the
1794   // intersection set empty.  Therefore we iterate over the external string
1795   // table first, ignoring internalized strings, and then over the
1796   // internalized string table.
1797
1798   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1799    public:
1800     explicit ExternalStringTableVisitorAdapter(
1801         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1802     virtual void VisitPointers(Object** start, Object** end) {
1803       for (Object** p = start; p < end; p++) {
1804         // Visit non-internalized external strings,
1805         // since internalized strings are listed in the string table.
1806         if (!(*p)->IsInternalizedString()) {
1807           ASSERT((*p)->IsExternalString());
1808           visitor_->VisitExternalString(Utils::ToLocal(
1809               Handle<String>(String::cast(*p))));
1810         }
1811       }
1812     }
1813    private:
1814     v8::ExternalResourceVisitor* visitor_;
1815   } external_string_table_visitor(visitor);
1816
1817   external_string_table_.Iterate(&external_string_table_visitor);
1818
1819   class StringTableVisitorAdapter : public ObjectVisitor {
1820    public:
1821     explicit StringTableVisitorAdapter(
1822         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1823     virtual void VisitPointers(Object** start, Object** end) {
1824       for (Object** p = start; p < end; p++) {
1825         if ((*p)->IsExternalString()) {
1826           ASSERT((*p)->IsInternalizedString());
1827           visitor_->VisitExternalString(Utils::ToLocal(
1828               Handle<String>(String::cast(*p))));
1829         }
1830       }
1831     }
1832    private:
1833     v8::ExternalResourceVisitor* visitor_;
1834   } string_table_visitor(visitor);
1835
1836   string_table()->IterateElements(&string_table_visitor);
1837 }
1838
1839
1840 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1841  public:
1842   static inline void VisitPointer(Heap* heap, Object** p) {
1843     Object* object = *p;
1844     if (!heap->InNewSpace(object)) return;
1845     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1846                          reinterpret_cast<HeapObject*>(object));
1847   }
1848 };
1849
1850
1851 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1852                          Address new_space_front) {
1853   do {
1854     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1855     // The addresses new_space_front and new_space_.top() define a
1856     // queue of unprocessed copied objects.  Process them until the
1857     // queue is empty.
1858     while (new_space_front != new_space_.top()) {
1859       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1860         HeapObject* object = HeapObject::FromAddress(new_space_front);
1861         new_space_front +=
1862           NewSpaceScavenger::IterateBody(object->map(), object);
1863       } else {
1864         new_space_front =
1865             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1866       }
1867     }
1868
1869     // Promote and process all the to-be-promoted objects.
1870     {
1871       StoreBufferRebuildScope scope(this,
1872                                     store_buffer(),
1873                                     &ScavengeStoreBufferCallback);
1874       while (!promotion_queue()->is_empty()) {
1875         HeapObject* target;
1876         int size;
1877         promotion_queue()->remove(&target, &size);
1878
1879         // Promoted object might be already partially visited
1880         // during old space pointer iteration. Thus we search specificly
1881         // for pointers to from semispace instead of looking for pointers
1882         // to new space.
1883         ASSERT(!target->IsMap());
1884         IterateAndMarkPointersToFromSpace(target->address(),
1885                                           target->address() + size,
1886                                           &ScavengeObject);
1887       }
1888     }
1889
1890     // Take another spin if there are now unswept objects in new space
1891     // (there are currently no more unswept promoted objects).
1892   } while (new_space_front != new_space_.top());
1893
1894   return new_space_front;
1895 }
1896
1897
1898 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1899
1900
1901 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1902                                               HeapObject* object,
1903                                               int size));
1904
1905 static HeapObject* EnsureDoubleAligned(Heap* heap,
1906                                        HeapObject* object,
1907                                        int size) {
1908   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1909     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1910     return HeapObject::FromAddress(object->address() + kPointerSize);
1911   } else {
1912     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1913                                kPointerSize);
1914     return object;
1915   }
1916 }
1917
1918
1919 enum LoggingAndProfiling {
1920   LOGGING_AND_PROFILING_ENABLED,
1921   LOGGING_AND_PROFILING_DISABLED
1922 };
1923
1924
1925 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1926
1927
1928 template<MarksHandling marks_handling,
1929          LoggingAndProfiling logging_and_profiling_mode>
1930 class ScavengingVisitor : public StaticVisitorBase {
1931  public:
1932   static void Initialize() {
1933     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1934     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1935     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1936     table_.Register(kVisitByteArray, &EvacuateByteArray);
1937     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1938     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1939
1940     table_.Register(kVisitNativeContext,
1941                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1942                         template VisitSpecialized<Context::kSize>);
1943
1944     table_.Register(kVisitConsString,
1945                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1946                         template VisitSpecialized<ConsString::kSize>);
1947
1948     table_.Register(kVisitSlicedString,
1949                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1950                         template VisitSpecialized<SlicedString::kSize>);
1951
1952     table_.Register(kVisitSymbol,
1953                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1954                         template VisitSpecialized<Symbol::kSize>);
1955
1956     table_.Register(kVisitSharedFunctionInfo,
1957                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1958                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1959
1960     table_.Register(kVisitJSWeakMap,
1961                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1962                     Visit);
1963
1964     table_.Register(kVisitJSWeakSet,
1965                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1966                     Visit);
1967
1968     table_.Register(kVisitJSArrayBuffer,
1969                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1970                     Visit);
1971
1972     table_.Register(kVisitJSTypedArray,
1973                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1974                     Visit);
1975
1976     table_.Register(kVisitJSDataView,
1977                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1978                     Visit);
1979
1980     table_.Register(kVisitJSRegExp,
1981                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1982                     Visit);
1983
1984     if (marks_handling == IGNORE_MARKS) {
1985       table_.Register(kVisitJSFunction,
1986                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1987                           template VisitSpecialized<JSFunction::kSize>);
1988     } else {
1989       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1990     }
1991
1992     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1993                                    kVisitDataObject,
1994                                    kVisitDataObjectGeneric>();
1995
1996     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1997                                    kVisitJSObject,
1998                                    kVisitJSObjectGeneric>();
1999
2000     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2001                                    kVisitStruct,
2002                                    kVisitStructGeneric>();
2003   }
2004
2005   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2006     return &table_;
2007   }
2008
2009  private:
2010   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
2011
2012   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2013     bool should_record = false;
2014 #ifdef DEBUG
2015     should_record = FLAG_heap_stats;
2016 #endif
2017     should_record = should_record || FLAG_log_gc;
2018     if (should_record) {
2019       if (heap->new_space()->Contains(obj)) {
2020         heap->new_space()->RecordAllocation(obj);
2021       } else {
2022         heap->new_space()->RecordPromotion(obj);
2023       }
2024     }
2025   }
2026
2027   // Helper function used by CopyObject to copy a source object to an
2028   // allocated target object and update the forwarding pointer in the source
2029   // object.  Returns the target object.
2030   INLINE(static void MigrateObject(Heap* heap,
2031                                    HeapObject* source,
2032                                    HeapObject* target,
2033                                    int size)) {
2034     // Copy the content of source to target.
2035     heap->CopyBlock(target->address(), source->address(), size);
2036
2037     // Set the forwarding address.
2038     source->set_map_word(MapWord::FromForwardingAddress(target));
2039
2040     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2041       // Update NewSpace stats if necessary.
2042       RecordCopiedObject(heap, target);
2043       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
2044       Isolate* isolate = heap->isolate();
2045       if (isolate->logger()->is_logging_code_events() ||
2046           isolate->cpu_profiler()->is_profiling()) {
2047         if (target->IsSharedFunctionInfo()) {
2048           PROFILE(isolate, SharedFunctionInfoMoveEvent(
2049               source->address(), target->address()));
2050         }
2051       }
2052     }
2053
2054     if (marks_handling == TRANSFER_MARKS) {
2055       if (Marking::TransferColor(source, target)) {
2056         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2057       }
2058     }
2059   }
2060
2061
2062   template<ObjectContents object_contents, int alignment>
2063   static inline void EvacuateObject(Map* map,
2064                                     HeapObject** slot,
2065                                     HeapObject* object,
2066                                     int object_size) {
2067     SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
2068     SLOW_ASSERT(object->Size() == object_size);
2069
2070     int allocation_size = object_size;
2071     if (alignment != kObjectAlignment) {
2072       ASSERT(alignment == kDoubleAlignment);
2073       allocation_size += kPointerSize;
2074     }
2075
2076     Heap* heap = map->GetHeap();
2077     if (heap->ShouldBePromoted(object->address(), object_size)) {
2078       MaybeObject* maybe_result;
2079
2080       if (object_contents == DATA_OBJECT) {
2081         maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2082       } else {
2083         maybe_result =
2084             heap->old_pointer_space()->AllocateRaw(allocation_size);
2085       }
2086
2087       Object* result = NULL;  // Initialization to please compiler.
2088       if (maybe_result->ToObject(&result)) {
2089         HeapObject* target = HeapObject::cast(result);
2090
2091         if (alignment != kObjectAlignment) {
2092           target = EnsureDoubleAligned(heap, target, allocation_size);
2093         }
2094
2095         // Order is important: slot might be inside of the target if target
2096         // was allocated over a dead object and slot comes from the store
2097         // buffer.
2098         *slot = target;
2099         MigrateObject(heap, object, target, object_size);
2100
2101         if (object_contents == POINTER_OBJECT) {
2102           if (map->instance_type() == JS_FUNCTION_TYPE) {
2103             heap->promotion_queue()->insert(
2104                 target, JSFunction::kNonWeakFieldsEndOffset);
2105           } else {
2106             heap->promotion_queue()->insert(target, object_size);
2107           }
2108         }
2109
2110         heap->tracer()->increment_promoted_objects_size(object_size);
2111         return;
2112       }
2113     }
2114     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2115     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2116     Object* result = allocation->ToObjectUnchecked();
2117     HeapObject* target = HeapObject::cast(result);
2118
2119     if (alignment != kObjectAlignment) {
2120       target = EnsureDoubleAligned(heap, target, allocation_size);
2121     }
2122
2123     // Order is important: slot might be inside of the target if target
2124     // was allocated over a dead object and slot comes from the store
2125     // buffer.
2126     *slot = target;
2127     MigrateObject(heap, object, target, object_size);
2128     return;
2129   }
2130
2131
2132   static inline void EvacuateJSFunction(Map* map,
2133                                         HeapObject** slot,
2134                                         HeapObject* object) {
2135     ObjectEvacuationStrategy<POINTER_OBJECT>::
2136         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2137
2138     HeapObject* target = *slot;
2139     MarkBit mark_bit = Marking::MarkBitFrom(target);
2140     if (Marking::IsBlack(mark_bit)) {
2141       // This object is black and it might not be rescanned by marker.
2142       // We should explicitly record code entry slot for compaction because
2143       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2144       // miss it as it is not HeapObject-tagged.
2145       Address code_entry_slot =
2146           target->address() + JSFunction::kCodeEntryOffset;
2147       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2148       map->GetHeap()->mark_compact_collector()->
2149           RecordCodeEntrySlot(code_entry_slot, code);
2150     }
2151   }
2152
2153
2154   static inline void EvacuateFixedArray(Map* map,
2155                                         HeapObject** slot,
2156                                         HeapObject* object) {
2157     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2158     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2159         map, slot, object, object_size);
2160   }
2161
2162
2163   static inline void EvacuateFixedDoubleArray(Map* map,
2164                                               HeapObject** slot,
2165                                               HeapObject* object) {
2166     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2167     int object_size = FixedDoubleArray::SizeFor(length);
2168     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2169         map, slot, object, object_size);
2170   }
2171
2172
2173   static inline void EvacuateByteArray(Map* map,
2174                                        HeapObject** slot,
2175                                        HeapObject* object) {
2176     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2177     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2178         map, slot, object, object_size);
2179   }
2180
2181
2182   static inline void EvacuateSeqOneByteString(Map* map,
2183                                             HeapObject** slot,
2184                                             HeapObject* object) {
2185     int object_size = SeqOneByteString::cast(object)->
2186         SeqOneByteStringSize(map->instance_type());
2187     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2188         map, slot, object, object_size);
2189   }
2190
2191
2192   static inline void EvacuateSeqTwoByteString(Map* map,
2193                                               HeapObject** slot,
2194                                               HeapObject* object) {
2195     int object_size = SeqTwoByteString::cast(object)->
2196         SeqTwoByteStringSize(map->instance_type());
2197     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2198         map, slot, object, object_size);
2199   }
2200
2201
2202   static inline bool IsShortcutCandidate(int type) {
2203     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2204   }
2205
2206   static inline void EvacuateShortcutCandidate(Map* map,
2207                                                HeapObject** slot,
2208                                                HeapObject* object) {
2209     ASSERT(IsShortcutCandidate(map->instance_type()));
2210
2211     Heap* heap = map->GetHeap();
2212
2213     if (marks_handling == IGNORE_MARKS &&
2214         ConsString::cast(object)->unchecked_second() ==
2215         heap->empty_string()) {
2216       HeapObject* first =
2217           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2218
2219       *slot = first;
2220
2221       if (!heap->InNewSpace(first)) {
2222         object->set_map_word(MapWord::FromForwardingAddress(first));
2223         return;
2224       }
2225
2226       MapWord first_word = first->map_word();
2227       if (first_word.IsForwardingAddress()) {
2228         HeapObject* target = first_word.ToForwardingAddress();
2229
2230         *slot = target;
2231         object->set_map_word(MapWord::FromForwardingAddress(target));
2232         return;
2233       }
2234
2235       heap->DoScavengeObject(first->map(), slot, first);
2236       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2237       return;
2238     }
2239
2240     int object_size = ConsString::kSize;
2241     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2242         map, slot, object, object_size);
2243   }
2244
2245   template<ObjectContents object_contents>
2246   class ObjectEvacuationStrategy {
2247    public:
2248     template<int object_size>
2249     static inline void VisitSpecialized(Map* map,
2250                                         HeapObject** slot,
2251                                         HeapObject* object) {
2252       EvacuateObject<object_contents, kObjectAlignment>(
2253           map, slot, object, object_size);
2254     }
2255
2256     static inline void Visit(Map* map,
2257                              HeapObject** slot,
2258                              HeapObject* object) {
2259       int object_size = map->instance_size();
2260       EvacuateObject<object_contents, kObjectAlignment>(
2261           map, slot, object, object_size);
2262     }
2263   };
2264
2265   static VisitorDispatchTable<ScavengingCallback> table_;
2266 };
2267
2268
2269 template<MarksHandling marks_handling,
2270          LoggingAndProfiling logging_and_profiling_mode>
2271 VisitorDispatchTable<ScavengingCallback>
2272     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2273
2274
2275 static void InitializeScavengingVisitorsTables() {
2276   ScavengingVisitor<TRANSFER_MARKS,
2277                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2278   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2279   ScavengingVisitor<TRANSFER_MARKS,
2280                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2281   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2282 }
2283
2284
2285 void Heap::SelectScavengingVisitorsTable() {
2286   bool logging_and_profiling =
2287       isolate()->logger()->is_logging() ||
2288       isolate()->cpu_profiler()->is_profiling() ||
2289       (isolate()->heap_profiler() != NULL &&
2290        isolate()->heap_profiler()->is_profiling());
2291
2292   if (!incremental_marking()->IsMarking()) {
2293     if (!logging_and_profiling) {
2294       scavenging_visitors_table_.CopyFrom(
2295           ScavengingVisitor<IGNORE_MARKS,
2296                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2297     } else {
2298       scavenging_visitors_table_.CopyFrom(
2299           ScavengingVisitor<IGNORE_MARKS,
2300                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2301     }
2302   } else {
2303     if (!logging_and_profiling) {
2304       scavenging_visitors_table_.CopyFrom(
2305           ScavengingVisitor<TRANSFER_MARKS,
2306                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2307     } else {
2308       scavenging_visitors_table_.CopyFrom(
2309           ScavengingVisitor<TRANSFER_MARKS,
2310                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2311     }
2312
2313     if (incremental_marking()->IsCompacting()) {
2314       // When compacting forbid short-circuiting of cons-strings.
2315       // Scavenging code relies on the fact that new space object
2316       // can't be evacuated into evacuation candidate but
2317       // short-circuiting violates this assumption.
2318       scavenging_visitors_table_.Register(
2319           StaticVisitorBase::kVisitShortcutCandidate,
2320           scavenging_visitors_table_.GetVisitorById(
2321               StaticVisitorBase::kVisitConsString));
2322     }
2323   }
2324 }
2325
2326
2327 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2328   SLOW_ASSERT(HEAP->InFromSpace(object));
2329   MapWord first_word = object->map_word();
2330   SLOW_ASSERT(!first_word.IsForwardingAddress());
2331   Map* map = first_word.ToMap();
2332   map->GetHeap()->DoScavengeObject(map, p, object);
2333 }
2334
2335
2336 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2337                                       int instance_size) {
2338   Object* result;
2339   MaybeObject* maybe_result = AllocateRawMap();
2340   if (!maybe_result->ToObject(&result)) return maybe_result;
2341
2342   // Map::cast cannot be used due to uninitialized map field.
2343   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2344   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2345   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2346   reinterpret_cast<Map*>(result)->set_visitor_id(
2347         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2348   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2349   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2350   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2351   reinterpret_cast<Map*>(result)->set_bit_field(0);
2352   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2353   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2354                    Map::OwnsDescriptors::encode(true);
2355   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2356   return result;
2357 }
2358
2359
2360 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2361                                int instance_size,
2362                                ElementsKind elements_kind) {
2363   Object* result;
2364   MaybeObject* maybe_result = AllocateRawMap();
2365   if (!maybe_result->To(&result)) return maybe_result;
2366
2367   Map* map = reinterpret_cast<Map*>(result);
2368   map->set_map_no_write_barrier(meta_map());
2369   map->set_instance_type(instance_type);
2370   map->set_visitor_id(
2371       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2372   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2373   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2374   map->set_instance_size(instance_size);
2375   map->set_inobject_properties(0);
2376   map->set_pre_allocated_property_fields(0);
2377   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2378   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2379                           SKIP_WRITE_BARRIER);
2380   map->init_back_pointer(undefined_value());
2381   map->set_unused_property_fields(0);
2382   map->set_instance_descriptors(empty_descriptor_array());
2383   map->set_bit_field(0);
2384   map->set_bit_field2(1 << Map::kIsExtensible);
2385   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2386                    Map::OwnsDescriptors::encode(true);
2387   map->set_bit_field3(bit_field3);
2388   map->set_elements_kind(elements_kind);
2389
2390   return map;
2391 }
2392
2393
2394 MaybeObject* Heap::AllocateCodeCache() {
2395   CodeCache* code_cache;
2396   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2397     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2398   }
2399   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2400   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2401   return code_cache;
2402 }
2403
2404
2405 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2406   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2407 }
2408
2409
2410 MaybeObject* Heap::AllocateAccessorPair() {
2411   AccessorPair* accessors;
2412   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2413     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2414   }
2415   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2416   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2417   return accessors;
2418 }
2419
2420
2421 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2422   TypeFeedbackInfo* info;
2423   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2424     if (!maybe_info->To(&info)) return maybe_info;
2425   }
2426   info->initialize_storage();
2427   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2428                                 SKIP_WRITE_BARRIER);
2429   return info;
2430 }
2431
2432
2433 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2434   AliasedArgumentsEntry* entry;
2435   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2436     if (!maybe_entry->To(&entry)) return maybe_entry;
2437   }
2438   entry->set_aliased_context_slot(aliased_context_slot);
2439   return entry;
2440 }
2441
2442
2443 const Heap::StringTypeTable Heap::string_type_table[] = {
2444 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2445   {type, size, k##camel_name##MapRootIndex},
2446   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2447 #undef STRING_TYPE_ELEMENT
2448 };
2449
2450
2451 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2452 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2453   {contents, k##name##RootIndex},
2454   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2455 #undef CONSTANT_STRING_ELEMENT
2456 };
2457
2458
2459 const Heap::StructTable Heap::struct_table[] = {
2460 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2461   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2462   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2463 #undef STRUCT_TABLE_ELEMENT
2464 };
2465
2466
2467 bool Heap::CreateInitialMaps() {
2468   Object* obj;
2469   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2470     if (!maybe_obj->ToObject(&obj)) return false;
2471   }
2472   // Map::cast cannot be used due to uninitialized map field.
2473   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2474   set_meta_map(new_meta_map);
2475   new_meta_map->set_map(new_meta_map);
2476
2477   { MaybeObject* maybe_obj =
2478         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2479     if (!maybe_obj->ToObject(&obj)) return false;
2480   }
2481   set_fixed_array_map(Map::cast(obj));
2482
2483   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2484     if (!maybe_obj->ToObject(&obj)) return false;
2485   }
2486   set_oddball_map(Map::cast(obj));
2487
2488   // Allocate the empty array.
2489   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2490     if (!maybe_obj->ToObject(&obj)) return false;
2491   }
2492   set_empty_fixed_array(FixedArray::cast(obj));
2493
2494   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2495     if (!maybe_obj->ToObject(&obj)) return false;
2496   }
2497   set_null_value(Oddball::cast(obj));
2498   Oddball::cast(obj)->set_kind(Oddball::kNull);
2499
2500   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2501     if (!maybe_obj->ToObject(&obj)) return false;
2502   }
2503   set_undefined_value(Oddball::cast(obj));
2504   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2505   ASSERT(!InNewSpace(undefined_value()));
2506
2507   // Allocate the empty descriptor array.
2508   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2509     if (!maybe_obj->ToObject(&obj)) return false;
2510   }
2511   set_empty_descriptor_array(DescriptorArray::cast(obj));
2512
2513   // Fix the instance_descriptors for the existing maps.
2514   meta_map()->set_code_cache(empty_fixed_array());
2515   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2516   meta_map()->init_back_pointer(undefined_value());
2517   meta_map()->set_instance_descriptors(empty_descriptor_array());
2518
2519   fixed_array_map()->set_code_cache(empty_fixed_array());
2520   fixed_array_map()->set_dependent_code(
2521       DependentCode::cast(empty_fixed_array()));
2522   fixed_array_map()->init_back_pointer(undefined_value());
2523   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2524
2525   oddball_map()->set_code_cache(empty_fixed_array());
2526   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2527   oddball_map()->init_back_pointer(undefined_value());
2528   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2529
2530   // Fix prototype object for existing maps.
2531   meta_map()->set_prototype(null_value());
2532   meta_map()->set_constructor(null_value());
2533
2534   fixed_array_map()->set_prototype(null_value());
2535   fixed_array_map()->set_constructor(null_value());
2536
2537   oddball_map()->set_prototype(null_value());
2538   oddball_map()->set_constructor(null_value());
2539
2540   { MaybeObject* maybe_obj =
2541         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2542     if (!maybe_obj->ToObject(&obj)) return false;
2543   }
2544   set_fixed_cow_array_map(Map::cast(obj));
2545   ASSERT(fixed_array_map() != fixed_cow_array_map());
2546
2547   { MaybeObject* maybe_obj =
2548         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2549     if (!maybe_obj->ToObject(&obj)) return false;
2550   }
2551   set_scope_info_map(Map::cast(obj));
2552
2553   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2554     if (!maybe_obj->ToObject(&obj)) return false;
2555   }
2556   set_heap_number_map(Map::cast(obj));
2557
2558   { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2559     if (!maybe_obj->ToObject(&obj)) return false;
2560   }
2561   set_symbol_map(Map::cast(obj));
2562
2563   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2564     if (!maybe_obj->ToObject(&obj)) return false;
2565   }
2566   set_foreign_map(Map::cast(obj));
2567
2568   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2569     const StringTypeTable& entry = string_type_table[i];
2570     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2571       if (!maybe_obj->ToObject(&obj)) return false;
2572     }
2573     roots_[entry.index] = Map::cast(obj);
2574   }
2575
2576   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2577     if (!maybe_obj->ToObject(&obj)) return false;
2578   }
2579   set_undetectable_string_map(Map::cast(obj));
2580   Map::cast(obj)->set_is_undetectable();
2581
2582   { MaybeObject* maybe_obj =
2583         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2584     if (!maybe_obj->ToObject(&obj)) return false;
2585   }
2586   set_undetectable_ascii_string_map(Map::cast(obj));
2587   Map::cast(obj)->set_is_undetectable();
2588
2589   { MaybeObject* maybe_obj =
2590         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2591     if (!maybe_obj->ToObject(&obj)) return false;
2592   }
2593   set_fixed_double_array_map(Map::cast(obj));
2594
2595   { MaybeObject* maybe_obj =
2596         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2597     if (!maybe_obj->ToObject(&obj)) return false;
2598   }
2599   set_byte_array_map(Map::cast(obj));
2600
2601   { MaybeObject* maybe_obj =
2602         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2603     if (!maybe_obj->ToObject(&obj)) return false;
2604   }
2605   set_free_space_map(Map::cast(obj));
2606
2607   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2608     if (!maybe_obj->ToObject(&obj)) return false;
2609   }
2610   set_empty_byte_array(ByteArray::cast(obj));
2611
2612   { MaybeObject* maybe_obj =
2613         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2614     if (!maybe_obj->ToObject(&obj)) return false;
2615   }
2616   set_external_pixel_array_map(Map::cast(obj));
2617
2618   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2619                                          ExternalArray::kAlignedSize);
2620     if (!maybe_obj->ToObject(&obj)) return false;
2621   }
2622   set_external_byte_array_map(Map::cast(obj));
2623
2624   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2625                                          ExternalArray::kAlignedSize);
2626     if (!maybe_obj->ToObject(&obj)) return false;
2627   }
2628   set_external_unsigned_byte_array_map(Map::cast(obj));
2629
2630   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2631                                          ExternalArray::kAlignedSize);
2632     if (!maybe_obj->ToObject(&obj)) return false;
2633   }
2634   set_external_short_array_map(Map::cast(obj));
2635
2636   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2637                                          ExternalArray::kAlignedSize);
2638     if (!maybe_obj->ToObject(&obj)) return false;
2639   }
2640   set_external_unsigned_short_array_map(Map::cast(obj));
2641
2642   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2643                                          ExternalArray::kAlignedSize);
2644     if (!maybe_obj->ToObject(&obj)) return false;
2645   }
2646   set_external_int_array_map(Map::cast(obj));
2647
2648   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2649                                          ExternalArray::kAlignedSize);
2650     if (!maybe_obj->ToObject(&obj)) return false;
2651   }
2652   set_external_unsigned_int_array_map(Map::cast(obj));
2653
2654   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2655                                          ExternalArray::kAlignedSize);
2656     if (!maybe_obj->ToObject(&obj)) return false;
2657   }
2658   set_external_float_array_map(Map::cast(obj));
2659
2660   { MaybeObject* maybe_obj =
2661         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2662     if (!maybe_obj->ToObject(&obj)) return false;
2663   }
2664   set_non_strict_arguments_elements_map(Map::cast(obj));
2665
2666   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2667                                          ExternalArray::kAlignedSize);
2668     if (!maybe_obj->ToObject(&obj)) return false;
2669   }
2670   set_external_double_array_map(Map::cast(obj));
2671
2672   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2673     if (!maybe_obj->ToObject(&obj)) return false;
2674   }
2675   set_empty_external_byte_array(ExternalArray::cast(obj));
2676
2677   { MaybeObject* maybe_obj =
2678         AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2679     if (!maybe_obj->ToObject(&obj)) return false;
2680   }
2681   set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2682
2683   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2684     if (!maybe_obj->ToObject(&obj)) return false;
2685   }
2686   set_empty_external_short_array(ExternalArray::cast(obj));
2687
2688   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2689       kExternalUnsignedShortArray);
2690     if (!maybe_obj->ToObject(&obj)) return false;
2691   }
2692   set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2693
2694   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2695     if (!maybe_obj->ToObject(&obj)) return false;
2696   }
2697   set_empty_external_int_array(ExternalArray::cast(obj));
2698
2699   { MaybeObject* maybe_obj =
2700         AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2701     if (!maybe_obj->ToObject(&obj)) return false;
2702   }
2703   set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2704
2705   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2706     if (!maybe_obj->ToObject(&obj)) return false;
2707   }
2708   set_empty_external_float_array(ExternalArray::cast(obj));
2709
2710   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2711     if (!maybe_obj->ToObject(&obj)) return false;
2712   }
2713   set_empty_external_double_array(ExternalArray::cast(obj));
2714
2715   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2716     if (!maybe_obj->ToObject(&obj)) return false;
2717   }
2718   set_empty_external_pixel_array(ExternalArray::cast(obj));
2719
2720   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2721     if (!maybe_obj->ToObject(&obj)) return false;
2722   }
2723   set_code_map(Map::cast(obj));
2724
2725   { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2726     if (!maybe_obj->ToObject(&obj)) return false;
2727   }
2728   set_cell_map(Map::cast(obj));
2729
2730   { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2731                                          PropertyCell::kSize);
2732     if (!maybe_obj->ToObject(&obj)) return false;
2733   }
2734   set_global_property_cell_map(Map::cast(obj));
2735
2736   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2737     if (!maybe_obj->ToObject(&obj)) return false;
2738   }
2739   set_one_pointer_filler_map(Map::cast(obj));
2740
2741   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2742     if (!maybe_obj->ToObject(&obj)) return false;
2743   }
2744   set_two_pointer_filler_map(Map::cast(obj));
2745
2746   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2747     const StructTable& entry = struct_table[i];
2748     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2749       if (!maybe_obj->ToObject(&obj)) return false;
2750     }
2751     roots_[entry.index] = Map::cast(obj);
2752   }
2753
2754   { MaybeObject* maybe_obj =
2755         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2756     if (!maybe_obj->ToObject(&obj)) return false;
2757   }
2758   set_hash_table_map(Map::cast(obj));
2759
2760   { MaybeObject* maybe_obj =
2761         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2762     if (!maybe_obj->ToObject(&obj)) return false;
2763   }
2764   set_function_context_map(Map::cast(obj));
2765
2766   { MaybeObject* maybe_obj =
2767         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2768     if (!maybe_obj->ToObject(&obj)) return false;
2769   }
2770   set_catch_context_map(Map::cast(obj));
2771
2772   { MaybeObject* maybe_obj =
2773         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2774     if (!maybe_obj->ToObject(&obj)) return false;
2775   }
2776   set_with_context_map(Map::cast(obj));
2777
2778   { MaybeObject* maybe_obj =
2779         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2780     if (!maybe_obj->ToObject(&obj)) return false;
2781   }
2782   set_block_context_map(Map::cast(obj));
2783
2784   { MaybeObject* maybe_obj =
2785         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2786     if (!maybe_obj->ToObject(&obj)) return false;
2787   }
2788   set_module_context_map(Map::cast(obj));
2789
2790   { MaybeObject* maybe_obj =
2791         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2792     if (!maybe_obj->ToObject(&obj)) return false;
2793   }
2794   set_global_context_map(Map::cast(obj));
2795
2796   { MaybeObject* maybe_obj =
2797         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2798     if (!maybe_obj->ToObject(&obj)) return false;
2799   }
2800   Map* native_context_map = Map::cast(obj);
2801   native_context_map->set_dictionary_map(true);
2802   native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2803   set_native_context_map(native_context_map);
2804
2805   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2806                                          SharedFunctionInfo::kAlignedSize);
2807     if (!maybe_obj->ToObject(&obj)) return false;
2808   }
2809   set_shared_function_info_map(Map::cast(obj));
2810
2811   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2812                                          JSMessageObject::kSize);
2813     if (!maybe_obj->ToObject(&obj)) return false;
2814   }
2815   set_message_object_map(Map::cast(obj));
2816
2817   Map* external_map;
2818   { MaybeObject* maybe_obj =
2819         AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2820     if (!maybe_obj->To(&external_map)) return false;
2821   }
2822   external_map->set_is_extensible(false);
2823   set_external_map(external_map);
2824
2825   ASSERT(!InNewSpace(empty_fixed_array()));
2826   return true;
2827 }
2828
2829
2830 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2831   // Statically ensure that it is safe to allocate heap numbers in paged
2832   // spaces.
2833   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2834   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2835
2836   Object* result;
2837   { MaybeObject* maybe_result =
2838         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2839     if (!maybe_result->ToObject(&result)) return maybe_result;
2840   }
2841
2842   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2843   HeapNumber::cast(result)->set_value(value);
2844   return result;
2845 }
2846
2847
2848 MaybeObject* Heap::AllocateHeapNumber(double value) {
2849   // Use general version, if we're forced to always allocate.
2850   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2851
2852   // This version of AllocateHeapNumber is optimized for
2853   // allocation in new space.
2854   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2855   Object* result;
2856   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2857     if (!maybe_result->ToObject(&result)) return maybe_result;
2858   }
2859   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2860   HeapNumber::cast(result)->set_value(value);
2861   return result;
2862 }
2863
2864
2865 MaybeObject* Heap::AllocateCell(Object* value) {
2866   Object* result;
2867   { MaybeObject* maybe_result = AllocateRawCell();
2868     if (!maybe_result->ToObject(&result)) return maybe_result;
2869   }
2870   HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2871   Cell::cast(result)->set_value(value);
2872   return result;
2873 }
2874
2875
2876 MaybeObject* Heap::AllocatePropertyCell(Object* value) {
2877   Object* result;
2878   MaybeObject* maybe_result = AllocateRawPropertyCell();
2879   if (!maybe_result->ToObject(&result)) return maybe_result;
2880
2881   HeapObject::cast(result)->set_map_no_write_barrier(
2882       global_property_cell_map());
2883   PropertyCell* cell = PropertyCell::cast(result);
2884   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2885                            SKIP_WRITE_BARRIER);
2886   cell->set_value(value);
2887   cell->set_type(Type::None());
2888   maybe_result = cell->SetValueInferType(value);
2889   if (maybe_result->IsFailure()) return maybe_result;
2890   return result;
2891 }
2892
2893
2894 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2895   Box* result;
2896   MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2897   if (!maybe_result->To(&result)) return maybe_result;
2898   result->set_value(value);
2899   return result;
2900 }
2901
2902
2903 MaybeObject* Heap::AllocateAllocationSite() {
2904   Object* result;
2905   MaybeObject* maybe_result = Allocate(allocation_site_map(),
2906                                        OLD_POINTER_SPACE);
2907   if (!maybe_result->ToObject(&result)) return maybe_result;
2908   AllocationSite* site = AllocationSite::cast(result);
2909   site->Initialize();
2910
2911   // Link the site
2912   site->set_weak_next(allocation_sites_list());
2913   set_allocation_sites_list(site);
2914   return result;
2915 }
2916
2917
2918 MaybeObject* Heap::CreateOddball(const char* to_string,
2919                                  Object* to_number,
2920                                  byte kind) {
2921   Object* result;
2922   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2923     if (!maybe_result->ToObject(&result)) return maybe_result;
2924   }
2925   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2926 }
2927
2928
2929 bool Heap::CreateApiObjects() {
2930   Object* obj;
2931
2932   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2933     if (!maybe_obj->ToObject(&obj)) return false;
2934   }
2935   // Don't use Smi-only elements optimizations for objects with the neander
2936   // map. There are too many cases where element values are set directly with a
2937   // bottleneck to trap the Smi-only -> fast elements transition, and there
2938   // appears to be no benefit for optimize this case.
2939   Map* new_neander_map = Map::cast(obj);
2940   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2941   set_neander_map(new_neander_map);
2942
2943   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2944     if (!maybe_obj->ToObject(&obj)) return false;
2945   }
2946   Object* elements;
2947   { MaybeObject* maybe_elements = AllocateFixedArray(2);
2948     if (!maybe_elements->ToObject(&elements)) return false;
2949   }
2950   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2951   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2952   set_message_listeners(JSObject::cast(obj));
2953
2954   return true;
2955 }
2956
2957
2958 void Heap::CreateJSEntryStub() {
2959   JSEntryStub stub;
2960   set_js_entry_code(*stub.GetCode(isolate()));
2961 }
2962
2963
2964 void Heap::CreateJSConstructEntryStub() {
2965   JSConstructEntryStub stub;
2966   set_js_construct_entry_code(*stub.GetCode(isolate()));
2967 }
2968
2969
2970 void Heap::CreateFixedStubs() {
2971   // Here we create roots for fixed stubs. They are needed at GC
2972   // for cooking and uncooking (check out frames.cc).
2973   // The eliminates the need for doing dictionary lookup in the
2974   // stub cache for these stubs.
2975   HandleScope scope(isolate());
2976   // gcc-4.4 has problem generating correct code of following snippet:
2977   // {  JSEntryStub stub;
2978   //    js_entry_code_ = *stub.GetCode();
2979   // }
2980   // {  JSConstructEntryStub stub;
2981   //    js_construct_entry_code_ = *stub.GetCode();
2982   // }
2983   // To workaround the problem, make separate functions without inlining.
2984   Heap::CreateJSEntryStub();
2985   Heap::CreateJSConstructEntryStub();
2986
2987   // Create stubs that should be there, so we don't unexpectedly have to
2988   // create them if we need them during the creation of another stub.
2989   // Stub creation mixes raw pointers and handles in an unsafe manner so
2990   // we cannot create stubs while we are creating stubs.
2991   CodeStub::GenerateStubsAheadOfTime(isolate());
2992 }
2993
2994
2995 bool Heap::CreateInitialObjects() {
2996   Object* obj;
2997
2998   // The -0 value must be set before NumberFromDouble works.
2999   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3000     if (!maybe_obj->ToObject(&obj)) return false;
3001   }
3002   set_minus_zero_value(HeapNumber::cast(obj));
3003   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3004
3005   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3006     if (!maybe_obj->ToObject(&obj)) return false;
3007   }
3008   set_nan_value(HeapNumber::cast(obj));
3009
3010   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3011     if (!maybe_obj->ToObject(&obj)) return false;
3012   }
3013   set_infinity_value(HeapNumber::cast(obj));
3014
3015   // The hole has not been created yet, but we want to put something
3016   // predictable in the gaps in the string table, so lets make that Smi zero.
3017   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3018
3019   // Allocate initial string table.
3020   { MaybeObject* maybe_obj =
3021         StringTable::Allocate(this, kInitialStringTableSize);
3022     if (!maybe_obj->ToObject(&obj)) return false;
3023   }
3024   // Don't use set_string_table() due to asserts.
3025   roots_[kStringTableRootIndex] = obj;
3026
3027   // Finish initializing oddballs after creating the string table.
3028   { MaybeObject* maybe_obj =
3029         undefined_value()->Initialize("undefined",
3030                                       nan_value(),
3031                                       Oddball::kUndefined);
3032     if (!maybe_obj->ToObject(&obj)) return false;
3033   }
3034
3035   // Initialize the null_value.
3036   { MaybeObject* maybe_obj =
3037         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
3038     if (!maybe_obj->ToObject(&obj)) return false;
3039   }
3040
3041   { MaybeObject* maybe_obj = CreateOddball("true",
3042                                            Smi::FromInt(1),
3043                                            Oddball::kTrue);
3044     if (!maybe_obj->ToObject(&obj)) return false;
3045   }
3046   set_true_value(Oddball::cast(obj));
3047
3048   { MaybeObject* maybe_obj = CreateOddball("false",
3049                                            Smi::FromInt(0),
3050                                            Oddball::kFalse);
3051     if (!maybe_obj->ToObject(&obj)) return false;
3052   }
3053   set_false_value(Oddball::cast(obj));
3054
3055   { MaybeObject* maybe_obj = CreateOddball("hole",
3056                                            Smi::FromInt(-1),
3057                                            Oddball::kTheHole);
3058     if (!maybe_obj->ToObject(&obj)) return false;
3059   }
3060   set_the_hole_value(Oddball::cast(obj));
3061
3062   { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3063                                            Smi::FromInt(-1),
3064                                            Oddball::kUninitialized);
3065     if (!maybe_obj->ToObject(&obj)) return false;
3066   }
3067   set_uninitialized_value(Oddball::cast(obj));
3068
3069   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3070                                            Smi::FromInt(-4),
3071                                            Oddball::kArgumentMarker);
3072     if (!maybe_obj->ToObject(&obj)) return false;
3073   }
3074   set_arguments_marker(Oddball::cast(obj));
3075
3076   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3077                                            Smi::FromInt(-2),
3078                                            Oddball::kOther);
3079     if (!maybe_obj->ToObject(&obj)) return false;
3080   }
3081   set_no_interceptor_result_sentinel(obj);
3082
3083   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3084                                            Smi::FromInt(-3),
3085                                            Oddball::kOther);
3086     if (!maybe_obj->ToObject(&obj)) return false;
3087   }
3088   set_termination_exception(obj);
3089
3090   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3091     { MaybeObject* maybe_obj =
3092           InternalizeUtf8String(constant_string_table[i].contents);
3093       if (!maybe_obj->ToObject(&obj)) return false;
3094     }
3095     roots_[constant_string_table[i].index] = String::cast(obj);
3096   }
3097
3098   // Allocate the hidden string which is used to identify the hidden properties
3099   // in JSObjects. The hash code has a special value so that it will not match
3100   // the empty string when searching for the property. It cannot be part of the
3101   // loop above because it needs to be allocated manually with the special
3102   // hash code in place. The hash code for the hidden_string is zero to ensure
3103   // that it will always be at the first entry in property descriptors.
3104   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3105       OneByteVector("", 0), String::kEmptyStringHash);
3106     if (!maybe_obj->ToObject(&obj)) return false;
3107   }
3108   hidden_string_ = String::cast(obj);
3109
3110   // Allocate the code_stubs dictionary. The initial size is set to avoid
3111   // expanding the dictionary during bootstrapping.
3112   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3113     if (!maybe_obj->ToObject(&obj)) return false;
3114   }
3115   set_code_stubs(UnseededNumberDictionary::cast(obj));
3116
3117
3118   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3119   // is set to avoid expanding the dictionary during bootstrapping.
3120   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3121     if (!maybe_obj->ToObject(&obj)) return false;
3122   }
3123   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3124
3125   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3126     if (!maybe_obj->ToObject(&obj)) return false;
3127   }
3128   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3129
3130   set_instanceof_cache_function(Smi::FromInt(0));
3131   set_instanceof_cache_map(Smi::FromInt(0));
3132   set_instanceof_cache_answer(Smi::FromInt(0));
3133
3134   CreateFixedStubs();
3135
3136   // Allocate the dictionary of intrinsic function names.
3137   { MaybeObject* maybe_obj =
3138         NameDictionary::Allocate(this, Runtime::kNumFunctions);
3139     if (!maybe_obj->ToObject(&obj)) return false;
3140   }
3141   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3142                                                                        obj);
3143     if (!maybe_obj->ToObject(&obj)) return false;
3144   }
3145   set_intrinsic_function_names(NameDictionary::cast(obj));
3146
3147   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3148     if (!maybe_obj->ToObject(&obj)) return false;
3149   }
3150   set_number_string_cache(FixedArray::cast(obj));
3151
3152   // Allocate cache for single character one byte strings.
3153   { MaybeObject* maybe_obj =
3154         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3155     if (!maybe_obj->ToObject(&obj)) return false;
3156   }
3157   set_single_character_string_cache(FixedArray::cast(obj));
3158
3159   // Allocate cache for string split.
3160   { MaybeObject* maybe_obj = AllocateFixedArray(
3161       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3162     if (!maybe_obj->ToObject(&obj)) return false;
3163   }
3164   set_string_split_cache(FixedArray::cast(obj));
3165
3166   { MaybeObject* maybe_obj = AllocateFixedArray(
3167       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3168     if (!maybe_obj->ToObject(&obj)) return false;
3169   }
3170   set_regexp_multiple_cache(FixedArray::cast(obj));
3171
3172   // Allocate cache for external strings pointing to native source code.
3173   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3174     if (!maybe_obj->ToObject(&obj)) return false;
3175   }
3176   set_natives_source_cache(FixedArray::cast(obj));
3177
3178   // Allocate object to hold object observation state.
3179   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3180     if (!maybe_obj->ToObject(&obj)) return false;
3181   }
3182   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3183     if (!maybe_obj->ToObject(&obj)) return false;
3184   }
3185   set_observation_state(JSObject::cast(obj));
3186
3187   { MaybeObject* maybe_obj = AllocateSymbol();
3188     if (!maybe_obj->ToObject(&obj)) return false;
3189   }
3190   set_frozen_symbol(Symbol::cast(obj));
3191
3192   { MaybeObject* maybe_obj = AllocateSymbol();
3193     if (!maybe_obj->ToObject(&obj)) return false;
3194   }
3195   set_elements_transition_symbol(Symbol::cast(obj));
3196
3197   { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3198     if (!maybe_obj->ToObject(&obj)) return false;
3199   }
3200   SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3201   set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3202
3203   { MaybeObject* maybe_obj = AllocateSymbol();
3204     if (!maybe_obj->ToObject(&obj)) return false;
3205   }
3206   set_observed_symbol(Symbol::cast(obj));
3207
3208   // Handling of script id generation is in Factory::NewScript.
3209   set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3210
3211   // Initialize keyed lookup cache.
3212   isolate_->keyed_lookup_cache()->Clear();
3213
3214   // Initialize context slot cache.
3215   isolate_->context_slot_cache()->Clear();
3216
3217   // Initialize descriptor cache.
3218   isolate_->descriptor_lookup_cache()->Clear();
3219
3220   // Initialize compilation cache.
3221   isolate_->compilation_cache()->Clear();
3222
3223   return true;
3224 }
3225
3226
3227 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3228   RootListIndex writable_roots[] = {
3229     kStoreBufferTopRootIndex,
3230     kStackLimitRootIndex,
3231     kNumberStringCacheRootIndex,
3232     kInstanceofCacheFunctionRootIndex,
3233     kInstanceofCacheMapRootIndex,
3234     kInstanceofCacheAnswerRootIndex,
3235     kCodeStubsRootIndex,
3236     kNonMonomorphicCacheRootIndex,
3237     kPolymorphicCodeCacheRootIndex,
3238     kLastScriptIdRootIndex,
3239     kEmptyScriptRootIndex,
3240     kRealStackLimitRootIndex,
3241     kArgumentsAdaptorDeoptPCOffsetRootIndex,
3242     kConstructStubDeoptPCOffsetRootIndex,
3243     kGetterStubDeoptPCOffsetRootIndex,
3244     kSetterStubDeoptPCOffsetRootIndex,
3245     kStringTableRootIndex,
3246   };
3247
3248   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3249     if (root_index == writable_roots[i])
3250       return true;
3251   }
3252   return false;
3253 }
3254
3255
3256 Object* RegExpResultsCache::Lookup(Heap* heap,
3257                                    String* key_string,
3258                                    Object* key_pattern,
3259                                    ResultsCacheType type) {
3260   FixedArray* cache;
3261   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3262   if (type == STRING_SPLIT_SUBSTRINGS) {
3263     ASSERT(key_pattern->IsString());
3264     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3265     cache = heap->string_split_cache();
3266   } else {
3267     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3268     ASSERT(key_pattern->IsFixedArray());
3269     cache = heap->regexp_multiple_cache();
3270   }
3271
3272   uint32_t hash = key_string->Hash();
3273   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3274       ~(kArrayEntriesPerCacheEntry - 1));
3275   if (cache->get(index + kStringOffset) == key_string &&
3276       cache->get(index + kPatternOffset) == key_pattern) {
3277     return cache->get(index + kArrayOffset);
3278   }
3279   index =
3280       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3281   if (cache->get(index + kStringOffset) == key_string &&
3282       cache->get(index + kPatternOffset) == key_pattern) {
3283     return cache->get(index + kArrayOffset);
3284   }
3285   return Smi::FromInt(0);
3286 }
3287
3288
3289 void RegExpResultsCache::Enter(Heap* heap,
3290                                String* key_string,
3291                                Object* key_pattern,
3292                                FixedArray* value_array,
3293                                ResultsCacheType type) {
3294   FixedArray* cache;
3295   if (!key_string->IsInternalizedString()) return;
3296   if (type == STRING_SPLIT_SUBSTRINGS) {
3297     ASSERT(key_pattern->IsString());
3298     if (!key_pattern->IsInternalizedString()) return;
3299     cache = heap->string_split_cache();
3300   } else {
3301     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3302     ASSERT(key_pattern->IsFixedArray());
3303     cache = heap->regexp_multiple_cache();
3304   }
3305
3306   uint32_t hash = key_string->Hash();
3307   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3308       ~(kArrayEntriesPerCacheEntry - 1));
3309   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3310     cache->set(index + kStringOffset, key_string);
3311     cache->set(index + kPatternOffset, key_pattern);
3312     cache->set(index + kArrayOffset, value_array);
3313   } else {
3314     uint32_t index2 =
3315         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3316     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3317       cache->set(index2 + kStringOffset, key_string);
3318       cache->set(index2 + kPatternOffset, key_pattern);
3319       cache->set(index2 + kArrayOffset, value_array);
3320     } else {
3321       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3322       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3323       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3324       cache->set(index + kStringOffset, key_string);
3325       cache->set(index + kPatternOffset, key_pattern);
3326       cache->set(index + kArrayOffset, value_array);
3327     }
3328   }
3329   // If the array is a reasonably short list of substrings, convert it into a
3330   // list of internalized strings.
3331   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3332     for (int i = 0; i < value_array->length(); i++) {
3333       String* str = String::cast(value_array->get(i));
3334       Object* internalized_str;
3335       MaybeObject* maybe_string = heap->InternalizeString(str);
3336       if (maybe_string->ToObject(&internalized_str)) {
3337         value_array->set(i, internalized_str);
3338       }
3339     }
3340   }
3341   // Convert backing store to a copy-on-write array.
3342   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3343 }
3344
3345
3346 void RegExpResultsCache::Clear(FixedArray* cache) {
3347   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3348     cache->set(i, Smi::FromInt(0));
3349   }
3350 }
3351
3352
3353 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3354   MaybeObject* maybe_obj =
3355       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3356   return maybe_obj;
3357 }
3358
3359
3360 int Heap::FullSizeNumberStringCacheLength() {
3361   // Compute the size of the number string cache based on the max newspace size.
3362   // The number string cache has a minimum size based on twice the initial cache
3363   // size to ensure that it is bigger after being made 'full size'.
3364   int number_string_cache_size = max_semispace_size_ / 512;
3365   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3366                                  Min(0x4000, number_string_cache_size));
3367   // There is a string and a number per entry so the length is twice the number
3368   // of entries.
3369   return number_string_cache_size * 2;
3370 }
3371
3372
3373 void Heap::AllocateFullSizeNumberStringCache() {
3374   // The idea is to have a small number string cache in the snapshot to keep
3375   // boot-time memory usage down.  If we expand the number string cache already
3376   // while creating the snapshot then that didn't work out.
3377   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3378   MaybeObject* maybe_obj =
3379       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3380   Object* new_cache;
3381   if (maybe_obj->ToObject(&new_cache)) {
3382     // We don't bother to repopulate the cache with entries from the old cache.
3383     // It will be repopulated soon enough with new strings.
3384     set_number_string_cache(FixedArray::cast(new_cache));
3385   }
3386   // If allocation fails then we just return without doing anything.  It is only
3387   // a cache, so best effort is OK here.
3388 }
3389
3390
3391 void Heap::FlushNumberStringCache() {
3392   // Flush the number to string cache.
3393   int len = number_string_cache()->length();
3394   for (int i = 0; i < len; i++) {
3395     number_string_cache()->set_undefined(this, i);
3396   }
3397 }
3398
3399
3400 static inline int double_get_hash(double d) {
3401   DoubleRepresentation rep(d);
3402   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3403 }
3404
3405
3406 static inline int smi_get_hash(Smi* smi) {
3407   return smi->value();
3408 }
3409
3410
3411 Object* Heap::GetNumberStringCache(Object* number) {
3412   int hash;
3413   int mask = (number_string_cache()->length() >> 1) - 1;
3414   if (number->IsSmi()) {
3415     hash = smi_get_hash(Smi::cast(number)) & mask;
3416   } else {
3417     hash = double_get_hash(number->Number()) & mask;
3418   }
3419   Object* key = number_string_cache()->get(hash * 2);
3420   if (key == number) {
3421     return String::cast(number_string_cache()->get(hash * 2 + 1));
3422   } else if (key->IsHeapNumber() &&
3423              number->IsHeapNumber() &&
3424              key->Number() == number->Number()) {
3425     return String::cast(number_string_cache()->get(hash * 2 + 1));
3426   }
3427   return undefined_value();
3428 }
3429
3430
3431 void Heap::SetNumberStringCache(Object* number, String* string) {
3432   int hash;
3433   int mask = (number_string_cache()->length() >> 1) - 1;
3434   if (number->IsSmi()) {
3435     hash = smi_get_hash(Smi::cast(number)) & mask;
3436   } else {
3437     hash = double_get_hash(number->Number()) & mask;
3438   }
3439   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3440       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3441     // The first time we have a hash collision, we move to the full sized
3442     // number string cache.
3443     AllocateFullSizeNumberStringCache();
3444     return;
3445   }
3446   number_string_cache()->set(hash * 2, number);
3447   number_string_cache()->set(hash * 2 + 1, string);
3448 }
3449
3450
3451 MaybeObject* Heap::NumberToString(Object* number,
3452                                   bool check_number_string_cache,
3453                                   PretenureFlag pretenure) {
3454   isolate_->counters()->number_to_string_runtime()->Increment();
3455   if (check_number_string_cache) {
3456     Object* cached = GetNumberStringCache(number);
3457     if (cached != undefined_value()) {
3458       return cached;
3459     }
3460   }
3461
3462   char arr[100];
3463   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3464   const char* str;
3465   if (number->IsSmi()) {
3466     int num = Smi::cast(number)->value();
3467     str = IntToCString(num, buffer);
3468   } else {
3469     double num = HeapNumber::cast(number)->value();
3470     str = DoubleToCString(num, buffer);
3471   }
3472
3473   Object* js_string;
3474   MaybeObject* maybe_js_string =
3475       AllocateStringFromOneByte(CStrVector(str), pretenure);
3476   if (maybe_js_string->ToObject(&js_string)) {
3477     SetNumberStringCache(number, String::cast(js_string));
3478   }
3479   return maybe_js_string;
3480 }
3481
3482
3483 MaybeObject* Heap::Uint32ToString(uint32_t value,
3484                                   bool check_number_string_cache) {
3485   Object* number;
3486   MaybeObject* maybe = NumberFromUint32(value);
3487   if (!maybe->To<Object>(&number)) return maybe;
3488   return NumberToString(number, check_number_string_cache);
3489 }
3490
3491
3492 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3493   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3494 }
3495
3496
3497 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3498     ExternalArrayType array_type) {
3499   switch (array_type) {
3500     case kExternalByteArray:
3501       return kExternalByteArrayMapRootIndex;
3502     case kExternalUnsignedByteArray:
3503       return kExternalUnsignedByteArrayMapRootIndex;
3504     case kExternalShortArray:
3505       return kExternalShortArrayMapRootIndex;
3506     case kExternalUnsignedShortArray:
3507       return kExternalUnsignedShortArrayMapRootIndex;
3508     case kExternalIntArray:
3509       return kExternalIntArrayMapRootIndex;
3510     case kExternalUnsignedIntArray:
3511       return kExternalUnsignedIntArrayMapRootIndex;
3512     case kExternalFloatArray:
3513       return kExternalFloatArrayMapRootIndex;
3514     case kExternalDoubleArray:
3515       return kExternalDoubleArrayMapRootIndex;
3516     case kExternalPixelArray:
3517       return kExternalPixelArrayMapRootIndex;
3518     default:
3519       UNREACHABLE();
3520       return kUndefinedValueRootIndex;
3521   }
3522 }
3523
3524 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3525     ElementsKind elementsKind) {
3526   switch (elementsKind) {
3527     case EXTERNAL_BYTE_ELEMENTS:
3528       return kEmptyExternalByteArrayRootIndex;
3529     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3530       return kEmptyExternalUnsignedByteArrayRootIndex;
3531     case EXTERNAL_SHORT_ELEMENTS:
3532       return kEmptyExternalShortArrayRootIndex;
3533     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3534       return kEmptyExternalUnsignedShortArrayRootIndex;
3535     case EXTERNAL_INT_ELEMENTS:
3536       return kEmptyExternalIntArrayRootIndex;
3537     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3538       return kEmptyExternalUnsignedIntArrayRootIndex;
3539     case EXTERNAL_FLOAT_ELEMENTS:
3540       return kEmptyExternalFloatArrayRootIndex;
3541     case EXTERNAL_DOUBLE_ELEMENTS:
3542       return kEmptyExternalDoubleArrayRootIndex;
3543     case EXTERNAL_PIXEL_ELEMENTS:
3544       return kEmptyExternalPixelArrayRootIndex;
3545     default:
3546       UNREACHABLE();
3547       return kUndefinedValueRootIndex;
3548   }
3549 }
3550
3551
3552 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3553   return ExternalArray::cast(
3554       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3555 }
3556
3557
3558
3559
3560 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3561   // We need to distinguish the minus zero value and this cannot be
3562   // done after conversion to int. Doing this by comparing bit
3563   // patterns is faster than using fpclassify() et al.
3564   static const DoubleRepresentation minus_zero(-0.0);
3565
3566   DoubleRepresentation rep(value);
3567   if (rep.bits == minus_zero.bits) {
3568     return AllocateHeapNumber(-0.0, pretenure);
3569   }
3570
3571   int int_value = FastD2I(value);
3572   if (value == int_value && Smi::IsValid(int_value)) {
3573     return Smi::FromInt(int_value);
3574   }
3575
3576   // Materialize the value in the heap.
3577   return AllocateHeapNumber(value, pretenure);
3578 }
3579
3580
3581 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3582   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3583   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3584   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3585   Foreign* result;
3586   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3587   if (!maybe_result->To(&result)) return maybe_result;
3588   result->set_foreign_address(address);
3589   return result;
3590 }
3591
3592
3593 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3594   SharedFunctionInfo* share;
3595   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3596   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3597
3598   // Set pointer fields.
3599   share->set_name(name);
3600   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3601   share->set_code(illegal);
3602   share->set_optimized_code_map(Smi::FromInt(0));
3603   share->set_scope_info(ScopeInfo::Empty(isolate_));
3604   Code* construct_stub =
3605       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3606   share->set_construct_stub(construct_stub);
3607   share->set_instance_class_name(Object_string());
3608   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3609   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3610   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3611   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3612   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3613   share->set_ast_node_count(0);
3614   share->set_counters(0);
3615
3616   // Set integer fields (smi or int, depending on the architecture).
3617   share->set_length(0);
3618   share->set_formal_parameter_count(0);
3619   share->set_expected_nof_properties(0);
3620   share->set_num_literals(0);
3621   share->set_start_position_and_type(0);
3622   share->set_end_position(0);
3623   share->set_function_token_position(0);
3624   // All compiler hints default to false or 0.
3625   share->set_compiler_hints(0);
3626   share->set_opt_count(0);
3627
3628   return share;
3629 }
3630
3631
3632 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3633                                            JSArray* arguments,
3634                                            int start_position,
3635                                            int end_position,
3636                                            Object* script,
3637                                            Object* stack_trace,
3638                                            Object* stack_frames) {
3639   Object* result;
3640   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3641     if (!maybe_result->ToObject(&result)) return maybe_result;
3642   }
3643   JSMessageObject* message = JSMessageObject::cast(result);
3644   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3645   message->initialize_elements();
3646   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3647   message->set_type(type);
3648   message->set_arguments(arguments);
3649   message->set_start_position(start_position);
3650   message->set_end_position(end_position);
3651   message->set_script(script);
3652   message->set_stack_trace(stack_trace);
3653   message->set_stack_frames(stack_frames);
3654   return result;
3655 }
3656
3657
3658
3659 // Returns true for a character in a range.  Both limits are inclusive.
3660 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3661   // This makes uses of the the unsigned wraparound.
3662   return character - from <= to - from;
3663 }
3664
3665
3666 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3667     Heap* heap,
3668     uint16_t c1,
3669     uint16_t c2) {
3670   String* result;
3671   // Numeric strings have a different hash algorithm not known by
3672   // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3673   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3674       heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3675     return result;
3676   // Now we know the length is 2, we might as well make use of that fact
3677   // when building the new string.
3678   } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3679     // We can do this.
3680     ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3681     Object* result;
3682     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3683       if (!maybe_result->ToObject(&result)) return maybe_result;
3684     }
3685     uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3686     dest[0] = static_cast<uint8_t>(c1);
3687     dest[1] = static_cast<uint8_t>(c2);
3688     return result;
3689   } else {
3690     Object* result;
3691     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3692       if (!maybe_result->ToObject(&result)) return maybe_result;
3693     }
3694     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3695     dest[0] = c1;
3696     dest[1] = c2;
3697     return result;
3698   }
3699 }
3700
3701
3702 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3703   int first_length = first->length();
3704   if (first_length == 0) {
3705     return second;
3706   }
3707
3708   int second_length = second->length();
3709   if (second_length == 0) {
3710     return first;
3711   }
3712
3713   int length = first_length + second_length;
3714
3715   // Optimization for 2-byte strings often used as keys in a decompression
3716   // dictionary.  Check whether we already have the string in the string
3717   // table to prevent creation of many unneccesary strings.
3718   if (length == 2) {
3719     uint16_t c1 = first->Get(0);
3720     uint16_t c2 = second->Get(0);
3721     return MakeOrFindTwoCharacterString(this, c1, c2);
3722   }
3723
3724   bool first_is_one_byte = first->IsOneByteRepresentation();
3725   bool second_is_one_byte = second->IsOneByteRepresentation();
3726   bool is_one_byte = first_is_one_byte && second_is_one_byte;
3727   // Make sure that an out of memory exception is thrown if the length
3728   // of the new cons string is too large.
3729   if (length > String::kMaxLength || length < 0) {
3730     isolate()->context()->mark_out_of_memory();
3731     return Failure::OutOfMemoryException(0x4);
3732   }
3733
3734   bool is_one_byte_data_in_two_byte_string = false;
3735   if (!is_one_byte) {
3736     // At least one of the strings uses two-byte representation so we
3737     // can't use the fast case code for short ASCII strings below, but
3738     // we can try to save memory if all chars actually fit in ASCII.
3739     is_one_byte_data_in_two_byte_string =
3740         first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3741     if (is_one_byte_data_in_two_byte_string) {
3742       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3743     }
3744   }
3745
3746   // If the resulting string is small make a flat string.
3747   if (length < ConsString::kMinLength) {
3748     // Note that neither of the two inputs can be a slice because:
3749     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3750     ASSERT(first->IsFlat());
3751     ASSERT(second->IsFlat());
3752     if (is_one_byte) {
3753       Object* result;
3754       { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3755         if (!maybe_result->ToObject(&result)) return maybe_result;
3756       }
3757       // Copy the characters into the new object.
3758       uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3759       // Copy first part.
3760       const uint8_t* src;
3761       if (first->IsExternalString()) {
3762         src = ExternalAsciiString::cast(first)->GetChars();
3763       } else {
3764         src = SeqOneByteString::cast(first)->GetChars();
3765       }
3766       for (int i = 0; i < first_length; i++) *dest++ = src[i];
3767       // Copy second part.
3768       if (second->IsExternalString()) {
3769         src = ExternalAsciiString::cast(second)->GetChars();
3770       } else {
3771         src = SeqOneByteString::cast(second)->GetChars();
3772       }
3773       for (int i = 0; i < second_length; i++) *dest++ = src[i];
3774       return result;
3775     } else {
3776       if (is_one_byte_data_in_two_byte_string) {
3777         Object* result;
3778         { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3779           if (!maybe_result->ToObject(&result)) return maybe_result;
3780         }
3781         // Copy the characters into the new object.
3782         uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3783         String::WriteToFlat(first, dest, 0, first_length);
3784         String::WriteToFlat(second, dest + first_length, 0, second_length);
3785         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3786         return result;
3787       }
3788
3789       Object* result;
3790       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3791         if (!maybe_result->ToObject(&result)) return maybe_result;
3792       }
3793       // Copy the characters into the new object.
3794       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3795       String::WriteToFlat(first, dest, 0, first_length);
3796       String::WriteToFlat(second, dest + first_length, 0, second_length);
3797       return result;
3798     }
3799   }
3800
3801   Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3802       cons_ascii_string_map() : cons_string_map();
3803
3804   Object* result;
3805   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3806     if (!maybe_result->ToObject(&result)) return maybe_result;
3807   }
3808
3809   DisallowHeapAllocation no_gc;
3810   ConsString* cons_string = ConsString::cast(result);
3811   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3812   cons_string->set_length(length);
3813   cons_string->set_hash_field(String::kEmptyHashField);
3814   cons_string->set_first(first, mode);
3815   cons_string->set_second(second, mode);
3816   return result;
3817 }
3818
3819
3820 MaybeObject* Heap::AllocateSubString(String* buffer,
3821                                      int start,
3822                                      int end,
3823                                      PretenureFlag pretenure) {
3824   int length = end - start;
3825   if (length <= 0) {
3826     return empty_string();
3827   } else if (length == 1) {
3828     return LookupSingleCharacterStringFromCode(buffer->Get(start));
3829   } else if (length == 2) {
3830     // Optimization for 2-byte strings often used as keys in a decompression
3831     // dictionary.  Check whether we already have the string in the string
3832     // table to prevent creation of many unnecessary strings.
3833     uint16_t c1 = buffer->Get(start);
3834     uint16_t c2 = buffer->Get(start + 1);
3835     return MakeOrFindTwoCharacterString(this, c1, c2);
3836   }
3837
3838   // Make an attempt to flatten the buffer to reduce access time.
3839   buffer = buffer->TryFlattenGetString();
3840
3841   if (!FLAG_string_slices ||
3842       !buffer->IsFlat() ||
3843       length < SlicedString::kMinLength ||
3844       pretenure == TENURED) {
3845     Object* result;
3846     // WriteToFlat takes care of the case when an indirect string has a
3847     // different encoding from its underlying string.  These encodings may
3848     // differ because of externalization.
3849     bool is_one_byte = buffer->IsOneByteRepresentation();
3850     { MaybeObject* maybe_result = is_one_byte
3851                                   ? AllocateRawOneByteString(length, pretenure)
3852                                   : AllocateRawTwoByteString(length, pretenure);
3853       if (!maybe_result->ToObject(&result)) return maybe_result;
3854     }
3855     String* string_result = String::cast(result);
3856     // Copy the characters into the new object.
3857     if (is_one_byte) {
3858       ASSERT(string_result->IsOneByteRepresentation());
3859       uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3860       String::WriteToFlat(buffer, dest, start, end);
3861     } else {
3862       ASSERT(string_result->IsTwoByteRepresentation());
3863       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3864       String::WriteToFlat(buffer, dest, start, end);
3865     }
3866     return result;
3867   }
3868
3869   ASSERT(buffer->IsFlat());
3870 #if VERIFY_HEAP
3871   if (FLAG_verify_heap) {
3872     buffer->StringVerify();
3873   }
3874 #endif
3875
3876   Object* result;
3877   // When slicing an indirect string we use its encoding for a newly created
3878   // slice and don't check the encoding of the underlying string.  This is safe
3879   // even if the encodings are different because of externalization.  If an
3880   // indirect ASCII string is pointing to a two-byte string, the two-byte char
3881   // codes of the underlying string must still fit into ASCII (because
3882   // externalization must not change char codes).
3883   { Map* map = buffer->IsOneByteRepresentation()
3884                  ? sliced_ascii_string_map()
3885                  : sliced_string_map();
3886     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3887     if (!maybe_result->ToObject(&result)) return maybe_result;
3888   }
3889
3890   DisallowHeapAllocation no_gc;
3891   SlicedString* sliced_string = SlicedString::cast(result);
3892   sliced_string->set_length(length);
3893   sliced_string->set_hash_field(String::kEmptyHashField);
3894   if (buffer->IsConsString()) {
3895     ConsString* cons = ConsString::cast(buffer);
3896     ASSERT(cons->second()->length() == 0);
3897     sliced_string->set_parent(cons->first());
3898     sliced_string->set_offset(start);
3899   } else if (buffer->IsSlicedString()) {
3900     // Prevent nesting sliced strings.
3901     SlicedString* parent_slice = SlicedString::cast(buffer);
3902     sliced_string->set_parent(parent_slice->parent());
3903     sliced_string->set_offset(start + parent_slice->offset());
3904   } else {
3905     sliced_string->set_parent(buffer);
3906     sliced_string->set_offset(start);
3907   }
3908   ASSERT(sliced_string->parent()->IsSeqString() ||
3909          sliced_string->parent()->IsExternalString());
3910   return result;
3911 }
3912
3913
3914 MaybeObject* Heap::AllocateExternalStringFromAscii(
3915     const ExternalAsciiString::Resource* resource) {
3916   size_t length = resource->length();
3917   if (length > static_cast<size_t>(String::kMaxLength)) {
3918     isolate()->context()->mark_out_of_memory();
3919     return Failure::OutOfMemoryException(0x5);
3920   }
3921
3922   Map* map = external_ascii_string_map();
3923   Object* result;
3924   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3925     if (!maybe_result->ToObject(&result)) return maybe_result;
3926   }
3927
3928   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3929   external_string->set_length(static_cast<int>(length));
3930   external_string->set_hash_field(String::kEmptyHashField);
3931   external_string->set_resource(resource);
3932
3933   return result;
3934 }
3935
3936
3937 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3938     const ExternalTwoByteString::Resource* resource) {
3939   size_t length = resource->length();
3940   if (length > static_cast<size_t>(String::kMaxLength)) {
3941     isolate()->context()->mark_out_of_memory();
3942     return Failure::OutOfMemoryException(0x6);
3943   }
3944
3945   // For small strings we check whether the resource contains only
3946   // one byte characters.  If yes, we use a different string map.
3947   static const size_t kOneByteCheckLengthLimit = 32;
3948   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3949       String::IsOneByte(resource->data(), static_cast<int>(length));
3950   Map* map = is_one_byte ?
3951       external_string_with_one_byte_data_map() : external_string_map();
3952   Object* result;
3953   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3954     if (!maybe_result->ToObject(&result)) return maybe_result;
3955   }
3956
3957   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3958   external_string->set_length(static_cast<int>(length));
3959   external_string->set_hash_field(String::kEmptyHashField);
3960   external_string->set_resource(resource);
3961
3962   return result;
3963 }
3964
3965
3966 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3967   if (code <= String::kMaxOneByteCharCode) {
3968     Object* value = single_character_string_cache()->get(code);
3969     if (value != undefined_value()) return value;
3970
3971     uint8_t buffer[1];
3972     buffer[0] = static_cast<uint8_t>(code);
3973     Object* result;
3974     MaybeObject* maybe_result =
3975         InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3976
3977     if (!maybe_result->ToObject(&result)) return maybe_result;
3978     single_character_string_cache()->set(code, result);
3979     return result;
3980   }
3981
3982   Object* result;
3983   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3984     if (!maybe_result->ToObject(&result)) return maybe_result;
3985   }
3986   String* answer = String::cast(result);
3987   answer->Set(0, code);
3988   return answer;
3989 }
3990
3991
3992 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3993   if (length < 0 || length > ByteArray::kMaxLength) {
3994     return Failure::OutOfMemoryException(0x7);
3995   }
3996   if (pretenure == NOT_TENURED) {
3997     return AllocateByteArray(length);
3998   }
3999   int size = ByteArray::SizeFor(length);
4000   Object* result;
4001   { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
4002                    ? old_data_space_->AllocateRaw(size)
4003                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4004     if (!maybe_result->ToObject(&result)) return maybe_result;
4005   }
4006
4007   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4008       byte_array_map());
4009   reinterpret_cast<ByteArray*>(result)->set_length(length);
4010   return result;
4011 }
4012
4013
4014 MaybeObject* Heap::AllocateByteArray(int length) {
4015   if (length < 0 || length > ByteArray::kMaxLength) {
4016     return Failure::OutOfMemoryException(0x8);
4017   }
4018   int size = ByteArray::SizeFor(length);
4019   AllocationSpace space =
4020       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
4021   Object* result;
4022   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4023     if (!maybe_result->ToObject(&result)) return maybe_result;
4024   }
4025
4026   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4027       byte_array_map());
4028   reinterpret_cast<ByteArray*>(result)->set_length(length);
4029   return result;
4030 }
4031
4032
4033 void Heap::CreateFillerObjectAt(Address addr, int size) {
4034   if (size == 0) return;
4035   HeapObject* filler = HeapObject::FromAddress(addr);
4036   if (size == kPointerSize) {
4037     filler->set_map_no_write_barrier(one_pointer_filler_map());
4038   } else if (size == 2 * kPointerSize) {
4039     filler->set_map_no_write_barrier(two_pointer_filler_map());
4040   } else {
4041     filler->set_map_no_write_barrier(free_space_map());
4042     FreeSpace::cast(filler)->set_size(size);
4043   }
4044 }
4045
4046
4047 MaybeObject* Heap::AllocateExternalArray(int length,
4048                                          ExternalArrayType array_type,
4049                                          void* external_pointer,
4050                                          PretenureFlag pretenure) {
4051   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4052   Object* result;
4053   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
4054                                             space,
4055                                             OLD_DATA_SPACE);
4056     if (!maybe_result->ToObject(&result)) return maybe_result;
4057   }
4058
4059   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4060       MapForExternalArrayType(array_type));
4061   reinterpret_cast<ExternalArray*>(result)->set_length(length);
4062   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4063       external_pointer);
4064
4065   return result;
4066 }
4067
4068
4069 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4070                               Code::Flags flags,
4071                               Handle<Object> self_reference,
4072                               bool immovable,
4073                               bool crankshafted) {
4074   // Allocate ByteArray before the Code object, so that we do not risk
4075   // leaving uninitialized Code object (and breaking the heap).
4076   ByteArray* reloc_info;
4077   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4078   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4079
4080   // Compute size.
4081   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4082   int obj_size = Code::SizeFor(body_size);
4083   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4084   MaybeObject* maybe_result;
4085   // Large code objects and code objects which should stay at a fixed address
4086   // are allocated in large object space.
4087   HeapObject* result;
4088   bool force_lo_space = obj_size > code_space()->AreaSize();
4089   if (force_lo_space) {
4090     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4091   } else {
4092     maybe_result = code_space_->AllocateRaw(obj_size);
4093   }
4094   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4095
4096   if (immovable && !force_lo_space &&
4097       // Objects on the first page of each space are never moved.
4098       !code_space_->FirstPage()->Contains(result->address())) {
4099     // Discard the first code allocation, which was on a page where it could be
4100     // moved.
4101     CreateFillerObjectAt(result->address(), obj_size);
4102     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4103     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4104   }
4105
4106   // Initialize the object
4107   result->set_map_no_write_barrier(code_map());
4108   Code* code = Code::cast(result);
4109   ASSERT(!isolate_->code_range()->exists() ||
4110       isolate_->code_range()->contains(code->address()));
4111   code->set_instruction_size(desc.instr_size);
4112   code->set_relocation_info(reloc_info);
4113   code->set_flags(flags);
4114   if (code->is_call_stub() || code->is_keyed_call_stub()) {
4115     code->set_check_type(RECEIVER_MAP_CHECK);
4116   }
4117   code->set_is_crankshafted(crankshafted);
4118   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4119   code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4120   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4121   code->set_gc_metadata(Smi::FromInt(0));
4122   code->set_ic_age(global_ic_age_);
4123   code->set_prologue_offset(kPrologueOffsetNotSet);
4124   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4125     code->set_marked_for_deoptimization(false);
4126   }
4127   // Allow self references to created code object by patching the handle to
4128   // point to the newly allocated Code object.
4129   if (!self_reference.is_null()) {
4130     *(self_reference.location()) = code;
4131   }
4132   // Migrate generated code.
4133   // The generated code can contain Object** values (typically from handles)
4134   // that are dereferenced during the copy to point directly to the actual heap
4135   // objects. These pointers can include references to the code object itself,
4136   // through the self_reference parameter.
4137   code->CopyFrom(desc);
4138
4139 #ifdef VERIFY_HEAP
4140   if (FLAG_verify_heap) {
4141     code->Verify();
4142   }
4143 #endif
4144   return code;
4145 }
4146
4147
4148 MaybeObject* Heap::CopyCode(Code* code) {
4149   // Allocate an object the same size as the code object.
4150   int obj_size = code->Size();
4151   MaybeObject* maybe_result;
4152   if (obj_size > code_space()->AreaSize()) {
4153     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4154   } else {
4155     maybe_result = code_space_->AllocateRaw(obj_size);
4156   }
4157
4158   Object* result;
4159   if (!maybe_result->ToObject(&result)) return maybe_result;
4160
4161   // Copy code object.
4162   Address old_addr = code->address();
4163   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4164   CopyBlock(new_addr, old_addr, obj_size);
4165   // Relocate the copy.
4166   Code* new_code = Code::cast(result);
4167   ASSERT(!isolate_->code_range()->exists() ||
4168       isolate_->code_range()->contains(code->address()));
4169   new_code->Relocate(new_addr - old_addr);
4170   return new_code;
4171 }
4172
4173
4174 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4175   // Allocate ByteArray before the Code object, so that we do not risk
4176   // leaving uninitialized Code object (and breaking the heap).
4177   Object* reloc_info_array;
4178   { MaybeObject* maybe_reloc_info_array =
4179         AllocateByteArray(reloc_info.length(), TENURED);
4180     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4181       return maybe_reloc_info_array;
4182     }
4183   }
4184
4185   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4186
4187   int new_obj_size = Code::SizeFor(new_body_size);
4188
4189   Address old_addr = code->address();
4190
4191   size_t relocation_offset =
4192       static_cast<size_t>(code->instruction_end() - old_addr);
4193
4194   MaybeObject* maybe_result;
4195   if (new_obj_size > code_space()->AreaSize()) {
4196     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4197   } else {
4198     maybe_result = code_space_->AllocateRaw(new_obj_size);
4199   }
4200
4201   Object* result;
4202   if (!maybe_result->ToObject(&result)) return maybe_result;
4203
4204   // Copy code object.
4205   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4206
4207   // Copy header and instructions.
4208   CopyBytes(new_addr, old_addr, relocation_offset);
4209
4210   Code* new_code = Code::cast(result);
4211   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4212
4213   // Copy patched rinfo.
4214   CopyBytes(new_code->relocation_start(),
4215             reloc_info.start(),
4216             static_cast<size_t>(reloc_info.length()));
4217
4218   // Relocate the copy.
4219   ASSERT(!isolate_->code_range()->exists() ||
4220       isolate_->code_range()->contains(code->address()));
4221   new_code->Relocate(new_addr - old_addr);
4222
4223 #ifdef VERIFY_HEAP
4224   if (FLAG_verify_heap) {
4225     code->Verify();
4226   }
4227 #endif
4228   return new_code;
4229 }
4230
4231
4232 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4233     Handle<AllocationSite> allocation_site) {
4234   ASSERT(gc_state_ == NOT_IN_GC);
4235   ASSERT(map->instance_type() != MAP_TYPE);
4236   // If allocation failures are disallowed, we may allocate in a different
4237   // space when new space is full and the object is not a large object.
4238   AllocationSpace retry_space =
4239       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4240   int size = map->instance_size() + AllocationMemento::kSize;
4241   Object* result;
4242   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4243   if (!maybe_result->ToObject(&result)) return maybe_result;
4244   // No need for write barrier since object is white and map is in old space.
4245   HeapObject::cast(result)->set_map_no_write_barrier(map);
4246   AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4247       reinterpret_cast<Address>(result) + map->instance_size());
4248   alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4249   alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4250   return result;
4251 }
4252
4253
4254 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4255   ASSERT(gc_state_ == NOT_IN_GC);
4256   ASSERT(map->instance_type() != MAP_TYPE);
4257   // If allocation failures are disallowed, we may allocate in a different
4258   // space when new space is full and the object is not a large object.
4259   AllocationSpace retry_space =
4260       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4261   int size = map->instance_size();
4262   Object* result;
4263   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4264   if (!maybe_result->ToObject(&result)) return maybe_result;
4265   // No need for write barrier since object is white and map is in old space.
4266   HeapObject::cast(result)->set_map_no_write_barrier(map);
4267   return result;
4268 }
4269
4270
4271 void Heap::InitializeFunction(JSFunction* function,
4272                               SharedFunctionInfo* shared,
4273                               Object* prototype) {
4274   ASSERT(!prototype->IsMap());
4275   function->initialize_properties();
4276   function->initialize_elements();
4277   function->set_shared(shared);
4278   function->set_code(shared->code());
4279   function->set_prototype_or_initial_map(prototype);
4280   function->set_context(undefined_value());
4281   function->set_literals_or_bindings(empty_fixed_array());
4282   function->set_next_function_link(undefined_value());
4283 }
4284
4285
4286 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4287   // Make sure to use globals from the function's context, since the function
4288   // can be from a different context.
4289   Context* native_context = function->context()->native_context();
4290   Map* new_map;
4291   if (function->shared()->is_generator()) {
4292     // Generator prototypes can share maps since they don't have "constructor"
4293     // properties.
4294     new_map = native_context->generator_object_prototype_map();
4295   } else {
4296     // Each function prototype gets a fresh map to avoid unwanted sharing of
4297     // maps between prototypes of different constructors.
4298     JSFunction* object_function = native_context->object_function();
4299     ASSERT(object_function->has_initial_map());
4300     MaybeObject* maybe_map = object_function->initial_map()->Copy();
4301     if (!maybe_map->To(&new_map)) return maybe_map;
4302   }
4303
4304   Object* prototype;
4305   MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4306   if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4307
4308   if (!function->shared()->is_generator()) {
4309     MaybeObject* maybe_failure =
4310         JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4311             constructor_string(), function, DONT_ENUM);
4312     if (maybe_failure->IsFailure()) return maybe_failure;
4313   }
4314
4315   return prototype;
4316 }
4317
4318
4319 MaybeObject* Heap::AllocateFunction(Map* function_map,
4320                                     SharedFunctionInfo* shared,
4321                                     Object* prototype,
4322                                     PretenureFlag pretenure) {
4323   AllocationSpace space =
4324       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4325   Object* result;
4326   { MaybeObject* maybe_result = Allocate(function_map, space);
4327     if (!maybe_result->ToObject(&result)) return maybe_result;
4328   }
4329   InitializeFunction(JSFunction::cast(result), shared, prototype);
4330   return result;
4331 }
4332
4333
4334 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4335   // To get fast allocation and map sharing for arguments objects we
4336   // allocate them based on an arguments boilerplate.
4337
4338   JSObject* boilerplate;
4339   int arguments_object_size;
4340   bool strict_mode_callee = callee->IsJSFunction() &&
4341       !JSFunction::cast(callee)->shared()->is_classic_mode();
4342   if (strict_mode_callee) {
4343     boilerplate =
4344         isolate()->context()->native_context()->
4345             strict_mode_arguments_boilerplate();
4346     arguments_object_size = kArgumentsObjectSizeStrict;
4347   } else {
4348     boilerplate =
4349         isolate()->context()->native_context()->arguments_boilerplate();
4350     arguments_object_size = kArgumentsObjectSize;
4351   }
4352
4353   // This calls Copy directly rather than using Heap::AllocateRaw so we
4354   // duplicate the check here.
4355   ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4356
4357   // Check that the size of the boilerplate matches our
4358   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4359   // on the size being a known constant.
4360   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4361
4362   // Do the allocation.
4363   Object* result;
4364   { MaybeObject* maybe_result =
4365         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4366     if (!maybe_result->ToObject(&result)) return maybe_result;
4367   }
4368
4369   // Copy the content. The arguments boilerplate doesn't have any
4370   // fields that point to new space so it's safe to skip the write
4371   // barrier here.
4372   CopyBlock(HeapObject::cast(result)->address(),
4373             boilerplate->address(),
4374             JSObject::kHeaderSize);
4375
4376   // Set the length property.
4377   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4378                                                 Smi::FromInt(length),
4379                                                 SKIP_WRITE_BARRIER);
4380   // Set the callee property for non-strict mode arguments object only.
4381   if (!strict_mode_callee) {
4382     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4383                                                   callee);
4384   }
4385
4386   // Check the state of the object
4387   ASSERT(JSObject::cast(result)->HasFastProperties());
4388   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4389
4390   return result;
4391 }
4392
4393
4394 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4395   ASSERT(!fun->has_initial_map());
4396
4397   // First create a new map with the size and number of in-object properties
4398   // suggested by the function.
4399   InstanceType instance_type;
4400   int instance_size;
4401   int in_object_properties;
4402   if (fun->shared()->is_generator()) {
4403     instance_type = JS_GENERATOR_OBJECT_TYPE;
4404     instance_size = JSGeneratorObject::kSize;
4405     in_object_properties = 0;
4406   } else {
4407     instance_type = JS_OBJECT_TYPE;
4408     instance_size = fun->shared()->CalculateInstanceSize();
4409     in_object_properties = fun->shared()->CalculateInObjectProperties();
4410   }
4411   Map* map;
4412   MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4413   if (!maybe_map->To(&map)) return maybe_map;
4414
4415   // Fetch or allocate prototype.
4416   Object* prototype;
4417   if (fun->has_instance_prototype()) {
4418     prototype = fun->instance_prototype();
4419   } else {
4420     MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4421     if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4422   }
4423   map->set_inobject_properties(in_object_properties);
4424   map->set_unused_property_fields(in_object_properties);
4425   map->set_prototype(prototype);
4426   ASSERT(map->has_fast_object_elements());
4427
4428   if (!fun->shared()->is_generator()) {
4429     fun->shared()->StartInobjectSlackTracking(map);
4430   }
4431
4432   return map;
4433 }
4434
4435
4436 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4437                                      FixedArray* properties,
4438                                      Map* map) {
4439   obj->set_properties(properties);
4440   obj->initialize_elements();
4441   // TODO(1240798): Initialize the object's body using valid initial values
4442   // according to the object's initial map.  For example, if the map's
4443   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4444   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4445   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4446   // verification code has to cope with (temporarily) invalid objects.  See
4447   // for example, JSArray::JSArrayVerify).
4448   Object* filler;
4449   // We cannot always fill with one_pointer_filler_map because objects
4450   // created from API functions expect their internal fields to be initialized
4451   // with undefined_value.
4452   // Pre-allocated fields need to be initialized with undefined_value as well
4453   // so that object accesses before the constructor completes (e.g. in the
4454   // debugger) will not cause a crash.
4455   if (map->constructor()->IsJSFunction() &&
4456       JSFunction::cast(map->constructor())->shared()->
4457           IsInobjectSlackTrackingInProgress()) {
4458     // We might want to shrink the object later.
4459     ASSERT(obj->GetInternalFieldCount() == 0);
4460     filler = Heap::one_pointer_filler_map();
4461   } else {
4462     filler = Heap::undefined_value();
4463   }
4464   obj->InitializeBody(map, Heap::undefined_value(), filler);
4465 }
4466
4467
4468 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4469   // JSFunctions should be allocated using AllocateFunction to be
4470   // properly initialized.
4471   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4472
4473   // Both types of global objects should be allocated using
4474   // AllocateGlobalObject to be properly initialized.
4475   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4476   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4477
4478   // Allocate the backing storage for the properties.
4479   int prop_size = map->InitialPropertiesLength();
4480   ASSERT(prop_size >= 0);
4481   Object* properties;
4482   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4483     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4484   }
4485
4486   // Allocate the JSObject.
4487   AllocationSpace space =
4488       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4489   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4490   Object* obj;
4491   MaybeObject* maybe_obj = Allocate(map, space);
4492   if (!maybe_obj->To(&obj)) return maybe_obj;
4493
4494   // Initialize the JSObject.
4495   InitializeJSObjectFromMap(JSObject::cast(obj),
4496                             FixedArray::cast(properties),
4497                             map);
4498   ASSERT(JSObject::cast(obj)->HasFastElements() ||
4499          JSObject::cast(obj)->HasExternalArrayElements());
4500   return obj;
4501 }
4502
4503
4504 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4505     Handle<AllocationSite> allocation_site) {
4506   // JSFunctions should be allocated using AllocateFunction to be
4507   // properly initialized.
4508   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4509
4510   // Both types of global objects should be allocated using
4511   // AllocateGlobalObject to be properly initialized.
4512   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4513   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4514
4515   // Allocate the backing storage for the properties.
4516   int prop_size = map->InitialPropertiesLength();
4517   ASSERT(prop_size >= 0);
4518   Object* properties;
4519   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4520     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4521   }
4522
4523   // Allocate the JSObject.
4524   AllocationSpace space = NEW_SPACE;
4525   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4526   Object* obj;
4527   MaybeObject* maybe_obj =
4528       AllocateWithAllocationSite(map, space, allocation_site);
4529   if (!maybe_obj->To(&obj)) return maybe_obj;
4530
4531   // Initialize the JSObject.
4532   InitializeJSObjectFromMap(JSObject::cast(obj),
4533                             FixedArray::cast(properties),
4534                             map);
4535   ASSERT(JSObject::cast(obj)->HasFastElements());
4536   return obj;
4537 }
4538
4539
4540 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4541                                     PretenureFlag pretenure) {
4542   // Allocate the initial map if absent.
4543   if (!constructor->has_initial_map()) {
4544     Object* initial_map;
4545     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4546       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4547     }
4548     constructor->set_initial_map(Map::cast(initial_map));
4549     Map::cast(initial_map)->set_constructor(constructor);
4550   }
4551   // Allocate the object based on the constructors initial map.
4552   MaybeObject* result = AllocateJSObjectFromMap(
4553       constructor->initial_map(), pretenure);
4554 #ifdef DEBUG
4555   // Make sure result is NOT a global object if valid.
4556   Object* non_failure;
4557   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4558 #endif
4559   return result;
4560 }
4561
4562
4563 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4564     Handle<AllocationSite> allocation_site) {
4565   // Allocate the initial map if absent.
4566   if (!constructor->has_initial_map()) {
4567     Object* initial_map;
4568     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4569       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4570     }
4571     constructor->set_initial_map(Map::cast(initial_map));
4572     Map::cast(initial_map)->set_constructor(constructor);
4573   }
4574   // Allocate the object based on the constructors initial map, or the payload
4575   // advice
4576   Map* initial_map = constructor->initial_map();
4577
4578   Smi* smi = Smi::cast(allocation_site->transition_info());
4579   ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4580   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4581   if (to_kind != initial_map->elements_kind()) {
4582     MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4583     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4584     // Possibly alter the mode, since we found an updated elements kind
4585     // in the type info cell.
4586     mode = AllocationSite::GetMode(to_kind);
4587   }
4588
4589   MaybeObject* result;
4590   if (mode == TRACK_ALLOCATION_SITE) {
4591     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4592         allocation_site);
4593   } else {
4594     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4595   }
4596 #ifdef DEBUG
4597   // Make sure result is NOT a global object if valid.
4598   Object* non_failure;
4599   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4600 #endif
4601   return result;
4602 }
4603
4604
4605 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4606   ASSERT(function->shared()->is_generator());
4607   Map *map;
4608   if (function->has_initial_map()) {
4609     map = function->initial_map();
4610   } else {
4611     // Allocate the initial map if absent.
4612     MaybeObject* maybe_map = AllocateInitialMap(function);
4613     if (!maybe_map->To(&map)) return maybe_map;
4614     function->set_initial_map(map);
4615     map->set_constructor(function);
4616   }
4617   ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4618   return AllocateJSObjectFromMap(map);
4619 }
4620
4621
4622 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4623   // Allocate a fresh map. Modules do not have a prototype.
4624   Map* map;
4625   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4626   if (!maybe_map->To(&map)) return maybe_map;
4627   // Allocate the object based on the map.
4628   JSModule* module;
4629   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4630   if (!maybe_module->To(&module)) return maybe_module;
4631   module->set_context(context);
4632   module->set_scope_info(scope_info);
4633   return module;
4634 }
4635
4636
4637 MaybeObject* Heap::AllocateJSArrayAndStorage(
4638     ElementsKind elements_kind,
4639     int length,
4640     int capacity,
4641     ArrayStorageAllocationMode mode,
4642     PretenureFlag pretenure) {
4643   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4644   JSArray* array;
4645   if (!maybe_array->To(&array)) return maybe_array;
4646
4647   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4648   // for performance reasons.
4649   ASSERT(capacity >= length);
4650
4651   if (capacity == 0) {
4652     array->set_length(Smi::FromInt(0));
4653     array->set_elements(empty_fixed_array());
4654     return array;
4655   }
4656
4657   FixedArrayBase* elms;
4658   MaybeObject* maybe_elms = NULL;
4659   if (IsFastDoubleElementsKind(elements_kind)) {
4660     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4661       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4662     } else {
4663       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4664       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4665     }
4666   } else {
4667     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4668     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4669       maybe_elms = AllocateUninitializedFixedArray(capacity);
4670     } else {
4671       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4672       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4673     }
4674   }
4675   if (!maybe_elms->To(&elms)) return maybe_elms;
4676
4677   array->set_elements(elms);
4678   array->set_length(Smi::FromInt(length));
4679   return array;
4680 }
4681
4682
4683 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4684     ElementsKind elements_kind,
4685     int length,
4686     int capacity,
4687     Handle<AllocationSite> allocation_site,
4688     ArrayStorageAllocationMode mode) {
4689   MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4690       allocation_site);
4691   JSArray* array;
4692   if (!maybe_array->To(&array)) return maybe_array;
4693   return AllocateJSArrayStorage(array, length, capacity, mode);
4694 }
4695
4696
4697 MaybeObject* Heap::AllocateJSArrayStorage(
4698     JSArray* array,
4699     int length,
4700     int capacity,
4701     ArrayStorageAllocationMode mode) {
4702   ASSERT(capacity >= length);
4703
4704   if (capacity == 0) {
4705     array->set_length(Smi::FromInt(0));
4706     array->set_elements(empty_fixed_array());
4707     return array;
4708   }
4709
4710   FixedArrayBase* elms;
4711   MaybeObject* maybe_elms = NULL;
4712   ElementsKind elements_kind = array->GetElementsKind();
4713   if (IsFastDoubleElementsKind(elements_kind)) {
4714     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4715       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4716     } else {
4717       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4718       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4719     }
4720   } else {
4721     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4722     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4723       maybe_elms = AllocateUninitializedFixedArray(capacity);
4724     } else {
4725       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4726       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4727     }
4728   }
4729   if (!maybe_elms->To(&elms)) return maybe_elms;
4730
4731   array->set_elements(elms);
4732   array->set_length(Smi::FromInt(length));
4733   return array;
4734 }
4735
4736
4737 MaybeObject* Heap::AllocateJSArrayWithElements(
4738     FixedArrayBase* elements,
4739     ElementsKind elements_kind,
4740     int length,
4741     PretenureFlag pretenure) {
4742   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4743   JSArray* array;
4744   if (!maybe_array->To(&array)) return maybe_array;
4745
4746   array->set_elements(elements);
4747   array->set_length(Smi::FromInt(length));
4748   array->ValidateElements();
4749   return array;
4750 }
4751
4752
4753 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4754   // Allocate map.
4755   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4756   // maps. Will probably depend on the identity of the handler object, too.
4757   Map* map;
4758   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4759   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4760   map->set_prototype(prototype);
4761
4762   // Allocate the proxy object.
4763   JSProxy* result;
4764   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4765   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4766   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4767   result->set_handler(handler);
4768   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4769   return result;
4770 }
4771
4772
4773 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4774                                            Object* call_trap,
4775                                            Object* construct_trap,
4776                                            Object* prototype) {
4777   // Allocate map.
4778   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4779   // maps. Will probably depend on the identity of the handler object, too.
4780   Map* map;
4781   MaybeObject* maybe_map_obj =
4782       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4783   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4784   map->set_prototype(prototype);
4785
4786   // Allocate the proxy object.
4787   JSFunctionProxy* result;
4788   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4789   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4790   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4791   result->set_handler(handler);
4792   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4793   result->set_call_trap(call_trap);
4794   result->set_construct_trap(construct_trap);
4795   return result;
4796 }
4797
4798
4799 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4800   ASSERT(constructor->has_initial_map());
4801   Map* map = constructor->initial_map();
4802   ASSERT(map->is_dictionary_map());
4803
4804   // Make sure no field properties are described in the initial map.
4805   // This guarantees us that normalizing the properties does not
4806   // require us to change property values to PropertyCells.
4807   ASSERT(map->NextFreePropertyIndex() == 0);
4808
4809   // Make sure we don't have a ton of pre-allocated slots in the
4810   // global objects. They will be unused once we normalize the object.
4811   ASSERT(map->unused_property_fields() == 0);
4812   ASSERT(map->inobject_properties() == 0);
4813
4814   // Initial size of the backing store to avoid resize of the storage during
4815   // bootstrapping. The size differs between the JS global object ad the
4816   // builtins object.
4817   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4818
4819   // Allocate a dictionary object for backing storage.
4820   NameDictionary* dictionary;
4821   MaybeObject* maybe_dictionary =
4822       NameDictionary::Allocate(
4823           this,
4824           map->NumberOfOwnDescriptors() * 2 + initial_size);
4825   if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4826
4827   // The global object might be created from an object template with accessors.
4828   // Fill these accessors into the dictionary.
4829   DescriptorArray* descs = map->instance_descriptors();
4830   for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4831     PropertyDetails details = descs->GetDetails(i);
4832     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
4833     PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4834     Object* value = descs->GetCallbacksObject(i);
4835     MaybeObject* maybe_value = AllocatePropertyCell(value);
4836     if (!maybe_value->ToObject(&value)) return maybe_value;
4837
4838     MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4839     if (!maybe_added->To(&dictionary)) return maybe_added;
4840   }
4841
4842   // Allocate the global object and initialize it with the backing store.
4843   JSObject* global;
4844   MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4845   if (!maybe_global->To(&global)) return maybe_global;
4846
4847   InitializeJSObjectFromMap(global, dictionary, map);
4848
4849   // Create a new map for the global object.
4850   Map* new_map;
4851   MaybeObject* maybe_map = map->CopyDropDescriptors();
4852   if (!maybe_map->To(&new_map)) return maybe_map;
4853   new_map->set_dictionary_map(true);
4854
4855   // Set up the global object as a normalized object.
4856   global->set_map(new_map);
4857   global->set_properties(dictionary);
4858
4859   // Make sure result is a global object with properties in dictionary.
4860   ASSERT(global->IsGlobalObject());
4861   ASSERT(!global->HasFastProperties());
4862   return global;
4863 }
4864
4865
4866 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4867   // Never used to copy functions.  If functions need to be copied we
4868   // have to be careful to clear the literals array.
4869   SLOW_ASSERT(!source->IsJSFunction());
4870
4871   // Make the clone.
4872   Map* map = source->map();
4873   int object_size = map->instance_size();
4874   Object* clone;
4875
4876   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4877
4878   // If we're forced to always allocate, we use the general allocation
4879   // functions which may leave us with an object in old space.
4880   if (always_allocate()) {
4881     { MaybeObject* maybe_clone =
4882           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4883       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4884     }
4885     Address clone_address = HeapObject::cast(clone)->address();
4886     CopyBlock(clone_address,
4887               source->address(),
4888               object_size);
4889     // Update write barrier for all fields that lie beyond the header.
4890     RecordWrites(clone_address,
4891                  JSObject::kHeaderSize,
4892                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4893   } else {
4894     wb_mode = SKIP_WRITE_BARRIER;
4895
4896     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4897       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4898     }
4899     SLOW_ASSERT(InNewSpace(clone));
4900     // Since we know the clone is allocated in new space, we can copy
4901     // the contents without worrying about updating the write barrier.
4902     CopyBlock(HeapObject::cast(clone)->address(),
4903               source->address(),
4904               object_size);
4905   }
4906
4907   SLOW_ASSERT(
4908       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4909   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4910   FixedArray* properties = FixedArray::cast(source->properties());
4911   // Update elements if necessary.
4912   if (elements->length() > 0) {
4913     Object* elem;
4914     { MaybeObject* maybe_elem;
4915       if (elements->map() == fixed_cow_array_map()) {
4916         maybe_elem = FixedArray::cast(elements);
4917       } else if (source->HasFastDoubleElements()) {
4918         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4919       } else {
4920         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4921       }
4922       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4923     }
4924     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4925   }
4926   // Update properties if necessary.
4927   if (properties->length() > 0) {
4928     Object* prop;
4929     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4930       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4931     }
4932     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4933   }
4934   // Return the new clone.
4935   return clone;
4936 }
4937
4938
4939 MaybeObject* Heap::CopyJSObjectWithAllocationSite(
4940     JSObject* source,
4941     AllocationSite* site) {
4942   // Never used to copy functions.  If functions need to be copied we
4943   // have to be careful to clear the literals array.
4944   SLOW_ASSERT(!source->IsJSFunction());
4945
4946   // Make the clone.
4947   Map* map = source->map();
4948   int object_size = map->instance_size();
4949   Object* clone;
4950
4951   ASSERT(map->CanTrackAllocationSite());
4952   ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4953   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4954
4955   // If we're forced to always allocate, we use the general allocation
4956   // functions which may leave us with an object in old space.
4957   int adjusted_object_size = object_size;
4958   if (always_allocate()) {
4959     // We'll only track origin if we are certain to allocate in new space
4960     const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4961     if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
4962       adjusted_object_size += AllocationMemento::kSize;
4963     }
4964
4965     { MaybeObject* maybe_clone =
4966           AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4967       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4968     }
4969     Address clone_address = HeapObject::cast(clone)->address();
4970     CopyBlock(clone_address,
4971               source->address(),
4972               object_size);
4973     // Update write barrier for all fields that lie beyond the header.
4974     int write_barrier_offset = adjusted_object_size > object_size
4975         ? JSArray::kSize + AllocationMemento::kSize
4976         : JSObject::kHeaderSize;
4977     if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4978       RecordWrites(clone_address,
4979                    write_barrier_offset,
4980                    (object_size - write_barrier_offset) / kPointerSize);
4981     }
4982
4983     // Track allocation site information, if we failed to allocate it inline.
4984     if (InNewSpace(clone) &&
4985         adjusted_object_size == object_size) {
4986       MaybeObject* maybe_alloc_memento =
4987           AllocateStruct(ALLOCATION_MEMENTO_TYPE);
4988       AllocationMemento* alloc_memento;
4989       if (maybe_alloc_memento->To(&alloc_memento)) {
4990         alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4991         alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
4992       }
4993     }
4994   } else {
4995     wb_mode = SKIP_WRITE_BARRIER;
4996     adjusted_object_size += AllocationMemento::kSize;
4997
4998     { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4999       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5000     }
5001     SLOW_ASSERT(InNewSpace(clone));
5002     // Since we know the clone is allocated in new space, we can copy
5003     // the contents without worrying about updating the write barrier.
5004     CopyBlock(HeapObject::cast(clone)->address(),
5005               source->address(),
5006               object_size);
5007   }
5008
5009   if (adjusted_object_size > object_size) {
5010     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5011         reinterpret_cast<Address>(clone) + object_size);
5012     alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5013     alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5014   }
5015
5016   SLOW_ASSERT(
5017       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5018   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5019   FixedArray* properties = FixedArray::cast(source->properties());
5020   // Update elements if necessary.
5021   if (elements->length() > 0) {
5022     Object* elem;
5023     { MaybeObject* maybe_elem;
5024       if (elements->map() == fixed_cow_array_map()) {
5025         maybe_elem = FixedArray::cast(elements);
5026       } else if (source->HasFastDoubleElements()) {
5027         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
5028       } else {
5029         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
5030       }
5031       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
5032     }
5033     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
5034   }
5035   // Update properties if necessary.
5036   if (properties->length() > 0) {
5037     Object* prop;
5038     { MaybeObject* maybe_prop = CopyFixedArray(properties);
5039       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
5040     }
5041     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
5042   }
5043   // Return the new clone.
5044   return clone;
5045 }
5046
5047
5048 MaybeObject* Heap::ReinitializeJSReceiver(
5049     JSReceiver* object, InstanceType type, int size) {
5050   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
5051
5052   // Allocate fresh map.
5053   // TODO(rossberg): Once we optimize proxies, cache these maps.
5054   Map* map;
5055   MaybeObject* maybe = AllocateMap(type, size);
5056   if (!maybe->To<Map>(&map)) return maybe;
5057
5058   // Check that the receiver has at least the size of the fresh object.
5059   int size_difference = object->map()->instance_size() - map->instance_size();
5060   ASSERT(size_difference >= 0);
5061
5062   map->set_prototype(object->map()->prototype());
5063
5064   // Allocate the backing storage for the properties.
5065   int prop_size = map->unused_property_fields() - map->inobject_properties();
5066   Object* properties;
5067   maybe = AllocateFixedArray(prop_size, TENURED);
5068   if (!maybe->ToObject(&properties)) return maybe;
5069
5070   // Functions require some allocation, which might fail here.
5071   SharedFunctionInfo* shared = NULL;
5072   if (type == JS_FUNCTION_TYPE) {
5073     String* name;
5074     maybe =
5075         InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
5076     if (!maybe->To<String>(&name)) return maybe;
5077     maybe = AllocateSharedFunctionInfo(name);
5078     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
5079   }
5080
5081   // Because of possible retries of this function after failure,
5082   // we must NOT fail after this point, where we have changed the type!
5083
5084   // Reset the map for the object.
5085   object->set_map(map);
5086   JSObject* jsobj = JSObject::cast(object);
5087
5088   // Reinitialize the object from the constructor map.
5089   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
5090
5091   // Functions require some minimal initialization.
5092   if (type == JS_FUNCTION_TYPE) {
5093     map->set_function_with_prototype(true);
5094     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
5095     JSFunction::cast(object)->set_context(
5096         isolate()->context()->native_context());
5097   }
5098
5099   // Put in filler if the new object is smaller than the old.
5100   if (size_difference > 0) {
5101     CreateFillerObjectAt(
5102         object->address() + map->instance_size(), size_difference);
5103   }
5104
5105   return object;
5106 }
5107
5108
5109 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5110                                              JSGlobalProxy* object) {
5111   ASSERT(constructor->has_initial_map());
5112   Map* map = constructor->initial_map();
5113
5114   // Check that the already allocated object has the same size and type as
5115   // objects allocated using the constructor.
5116   ASSERT(map->instance_size() == object->map()->instance_size());
5117   ASSERT(map->instance_type() == object->map()->instance_type());
5118
5119   // Allocate the backing storage for the properties.
5120   int prop_size = map->unused_property_fields() - map->inobject_properties();
5121   Object* properties;
5122   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5123     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5124   }
5125
5126   // Reset the map for the object.
5127   object->set_map(constructor->initial_map());
5128
5129   // Reinitialize the object from the constructor map.
5130   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5131   return object;
5132 }
5133
5134
5135 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5136                                            PretenureFlag pretenure) {
5137   int length = string.length();
5138   if (length == 1) {
5139     return Heap::LookupSingleCharacterStringFromCode(string[0]);
5140   }
5141   Object* result;
5142   { MaybeObject* maybe_result =
5143         AllocateRawOneByteString(string.length(), pretenure);
5144     if (!maybe_result->ToObject(&result)) return maybe_result;
5145   }
5146
5147   // Copy the characters into the new object.
5148   CopyChars(SeqOneByteString::cast(result)->GetChars(),
5149             string.start(),
5150             length);
5151   return result;
5152 }
5153
5154
5155 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5156                                               int non_ascii_start,
5157                                               PretenureFlag pretenure) {
5158   // Continue counting the number of characters in the UTF-8 string, starting
5159   // from the first non-ascii character or word.
5160   Access<UnicodeCache::Utf8Decoder>
5161       decoder(isolate_->unicode_cache()->utf8_decoder());
5162   decoder->Reset(string.start() + non_ascii_start,
5163                  string.length() - non_ascii_start);
5164   int utf16_length = decoder->Utf16Length();
5165   ASSERT(utf16_length > 0);
5166   // Allocate string.
5167   Object* result;
5168   {
5169     int chars = non_ascii_start + utf16_length;
5170     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5171     if (!maybe_result->ToObject(&result)) return maybe_result;
5172   }
5173   // Convert and copy the characters into the new object.
5174   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5175   // Copy ascii portion.
5176   uint16_t* data = twobyte->GetChars();
5177   if (non_ascii_start != 0) {
5178     const char* ascii_data = string.start();
5179     for (int i = 0; i < non_ascii_start; i++) {
5180       *data++ = *ascii_data++;
5181     }
5182   }
5183   // Now write the remainder.
5184   decoder->WriteUtf16(data, utf16_length);
5185   return result;
5186 }
5187
5188
5189 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5190                                              PretenureFlag pretenure) {
5191   // Check if the string is an ASCII string.
5192   Object* result;
5193   int length = string.length();
5194   const uc16* start = string.start();
5195
5196   if (String::IsOneByte(start, length)) {
5197     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5198     if (!maybe_result->ToObject(&result)) return maybe_result;
5199     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5200   } else {  // It's not a one byte string.
5201     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5202     if (!maybe_result->ToObject(&result)) return maybe_result;
5203     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5204   }
5205   return result;
5206 }
5207
5208
5209 Map* Heap::InternalizedStringMapForString(String* string) {
5210   // If the string is in new space it cannot be used as internalized.
5211   if (InNewSpace(string)) return NULL;
5212
5213   // Find the corresponding internalized string map for strings.
5214   switch (string->map()->instance_type()) {
5215     case STRING_TYPE: return internalized_string_map();
5216     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5217     case CONS_STRING_TYPE: return cons_internalized_string_map();
5218     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5219     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5220     case EXTERNAL_ASCII_STRING_TYPE:
5221       return external_ascii_internalized_string_map();
5222     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5223       return external_internalized_string_with_one_byte_data_map();
5224     case SHORT_EXTERNAL_STRING_TYPE:
5225       return short_external_internalized_string_map();
5226     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5227       return short_external_ascii_internalized_string_map();
5228     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5229       return short_external_internalized_string_with_one_byte_data_map();
5230     default: return NULL;  // No match found.
5231   }
5232 }
5233
5234
5235 static inline void WriteOneByteData(Vector<const char> vector,
5236                                     uint8_t* chars,
5237                                     int len) {
5238   // Only works for ascii.
5239   ASSERT(vector.length() == len);
5240   OS::MemCopy(chars, vector.start(), len);
5241 }
5242
5243 static inline void WriteTwoByteData(Vector<const char> vector,
5244                                     uint16_t* chars,
5245                                     int len) {
5246   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5247   unsigned stream_length = vector.length();
5248   while (stream_length != 0) {
5249     unsigned consumed = 0;
5250     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5251     ASSERT(c != unibrow::Utf8::kBadChar);
5252     ASSERT(consumed <= stream_length);
5253     stream_length -= consumed;
5254     stream += consumed;
5255     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5256       len -= 2;
5257       if (len < 0) break;
5258       *chars++ = unibrow::Utf16::LeadSurrogate(c);
5259       *chars++ = unibrow::Utf16::TrailSurrogate(c);
5260     } else {
5261       len -= 1;
5262       if (len < 0) break;
5263       *chars++ = c;
5264     }
5265   }
5266   ASSERT(stream_length == 0);
5267   ASSERT(len == 0);
5268 }
5269
5270
5271 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5272   ASSERT(s->length() == len);
5273   String::WriteToFlat(s, chars, 0, len);
5274 }
5275
5276
5277 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5278   ASSERT(s->length() == len);
5279   String::WriteToFlat(s, chars, 0, len);
5280 }
5281
5282
5283 template<bool is_one_byte, typename T>
5284 MaybeObject* Heap::AllocateInternalizedStringImpl(
5285     T t, int chars, uint32_t hash_field) {
5286   ASSERT(chars >= 0);
5287   // Compute map and object size.
5288   int size;
5289   Map* map;
5290
5291   if (is_one_byte) {
5292     if (chars > SeqOneByteString::kMaxLength) {
5293       return Failure::OutOfMemoryException(0x9);
5294     }
5295     map = ascii_internalized_string_map();
5296     size = SeqOneByteString::SizeFor(chars);
5297   } else {
5298     if (chars > SeqTwoByteString::kMaxLength) {
5299       return Failure::OutOfMemoryException(0xa);
5300     }
5301     map = internalized_string_map();
5302     size = SeqTwoByteString::SizeFor(chars);
5303   }
5304
5305   // Allocate string.
5306   Object* result;
5307   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5308                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5309                    : old_data_space_->AllocateRaw(size);
5310     if (!maybe_result->ToObject(&result)) return maybe_result;
5311   }
5312
5313   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5314   // Set length and hash fields of the allocated string.
5315   String* answer = String::cast(result);
5316   answer->set_length(chars);
5317   answer->set_hash_field(hash_field);
5318
5319   ASSERT_EQ(size, answer->Size());
5320
5321   if (is_one_byte) {
5322     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5323   } else {
5324     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5325   }
5326   return answer;
5327 }
5328
5329
5330 // Need explicit instantiations.
5331 template
5332 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5333 template
5334 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5335     String*, int, uint32_t);
5336 template
5337 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5338     Vector<const char>, int, uint32_t);
5339
5340
5341 MaybeObject* Heap::AllocateRawOneByteString(int length,
5342                                             PretenureFlag pretenure) {
5343   if (length < 0 || length > SeqOneByteString::kMaxLength) {
5344     return Failure::OutOfMemoryException(0xb);
5345   }
5346   int size = SeqOneByteString::SizeFor(length);
5347   ASSERT(size <= SeqOneByteString::kMaxSize);
5348   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5349   AllocationSpace retry_space = OLD_DATA_SPACE;
5350
5351   if (size > Page::kMaxNonCodeHeapObjectSize) {
5352     // Allocate in large object space, retry space will be ignored.
5353     space = LO_SPACE;
5354   }
5355
5356   Object* result;
5357   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5358     if (!maybe_result->ToObject(&result)) return maybe_result;
5359   }
5360
5361   // Partially initialize the object.
5362   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5363   String::cast(result)->set_length(length);
5364   String::cast(result)->set_hash_field(String::kEmptyHashField);
5365   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5366
5367   return result;
5368 }
5369
5370
5371 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5372                                             PretenureFlag pretenure) {
5373   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5374     return Failure::OutOfMemoryException(0xc);
5375   }
5376   int size = SeqTwoByteString::SizeFor(length);
5377   ASSERT(size <= SeqTwoByteString::kMaxSize);
5378   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5379   AllocationSpace retry_space = OLD_DATA_SPACE;
5380
5381   if (size > Page::kMaxNonCodeHeapObjectSize) {
5382     // Allocate in large object space, retry space will be ignored.
5383     space = LO_SPACE;
5384   }
5385
5386   Object* result;
5387   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5388     if (!maybe_result->ToObject(&result)) return maybe_result;
5389   }
5390
5391   // Partially initialize the object.
5392   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5393   String::cast(result)->set_length(length);
5394   String::cast(result)->set_hash_field(String::kEmptyHashField);
5395   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5396   return result;
5397 }
5398
5399
5400 MaybeObject* Heap::AllocateJSArray(
5401     ElementsKind elements_kind,
5402     PretenureFlag pretenure) {
5403   Context* native_context = isolate()->context()->native_context();
5404   JSFunction* array_function = native_context->array_function();
5405   Map* map = array_function->initial_map();
5406   Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5407   if (transition_map != NULL) map = transition_map;
5408   return AllocateJSObjectFromMap(map, pretenure);
5409 }
5410
5411
5412 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5413     ElementsKind elements_kind,
5414     Handle<AllocationSite> allocation_site) {
5415   Context* native_context = isolate()->context()->native_context();
5416   JSFunction* array_function = native_context->array_function();
5417   Map* map = array_function->initial_map();
5418   Object* maybe_map_array = native_context->js_array_maps();
5419   if (!maybe_map_array->IsUndefined()) {
5420     Object* maybe_transitioned_map =
5421         FixedArray::cast(maybe_map_array)->get(elements_kind);
5422     if (!maybe_transitioned_map->IsUndefined()) {
5423       map = Map::cast(maybe_transitioned_map);
5424     }
5425   }
5426   return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
5427 }
5428
5429
5430 MaybeObject* Heap::AllocateEmptyFixedArray() {
5431   int size = FixedArray::SizeFor(0);
5432   Object* result;
5433   { MaybeObject* maybe_result =
5434         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5435     if (!maybe_result->ToObject(&result)) return maybe_result;
5436   }
5437   // Initialize the object.
5438   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5439       fixed_array_map());
5440   reinterpret_cast<FixedArray*>(result)->set_length(0);
5441   return result;
5442 }
5443
5444
5445 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5446   return AllocateExternalArray(0, array_type, NULL, TENURED);
5447 }
5448
5449
5450 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5451   if (length < 0 || length > FixedArray::kMaxLength) {
5452     return Failure::OutOfMemoryException(0xd);
5453   }
5454   ASSERT(length > 0);
5455   // Use the general function if we're forced to always allocate.
5456   if (always_allocate()) return AllocateFixedArray(length, TENURED);
5457   // Allocate the raw data for a fixed array.
5458   int size = FixedArray::SizeFor(length);
5459   return size <= Page::kMaxNonCodeHeapObjectSize
5460       ? new_space_.AllocateRaw(size)
5461       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5462 }
5463
5464
5465 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5466   int len = src->length();
5467   Object* obj;
5468   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5469     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5470   }
5471   if (InNewSpace(obj)) {
5472     HeapObject* dst = HeapObject::cast(obj);
5473     dst->set_map_no_write_barrier(map);
5474     CopyBlock(dst->address() + kPointerSize,
5475               src->address() + kPointerSize,
5476               FixedArray::SizeFor(len) - kPointerSize);
5477     return obj;
5478   }
5479   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5480   FixedArray* result = FixedArray::cast(obj);
5481   result->set_length(len);
5482
5483   // Copy the content
5484   DisallowHeapAllocation no_gc;
5485   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5486   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5487   return result;
5488 }
5489
5490
5491 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5492                                                Map* map) {
5493   int len = src->length();
5494   Object* obj;
5495   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5496     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5497   }
5498   HeapObject* dst = HeapObject::cast(obj);
5499   dst->set_map_no_write_barrier(map);
5500   CopyBlock(
5501       dst->address() + FixedDoubleArray::kLengthOffset,
5502       src->address() + FixedDoubleArray::kLengthOffset,
5503       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5504   return obj;
5505 }
5506
5507
5508 MaybeObject* Heap::AllocateFixedArray(int length) {
5509   ASSERT(length >= 0);
5510   if (length == 0) return empty_fixed_array();
5511   Object* result;
5512   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5513     if (!maybe_result->ToObject(&result)) return maybe_result;
5514   }
5515   // Initialize header.
5516   FixedArray* array = reinterpret_cast<FixedArray*>(result);
5517   array->set_map_no_write_barrier(fixed_array_map());
5518   array->set_length(length);
5519   // Initialize body.
5520   ASSERT(!InNewSpace(undefined_value()));
5521   MemsetPointer(array->data_start(), undefined_value(), length);
5522   return result;
5523 }
5524
5525
5526 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5527   if (length < 0 || length > FixedArray::kMaxLength) {
5528     return Failure::OutOfMemoryException(0xe);
5529   }
5530   int size = FixedArray::SizeFor(length);
5531   AllocationSpace space =
5532       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5533   AllocationSpace retry_space = OLD_POINTER_SPACE;
5534
5535   if (size > Page::kMaxNonCodeHeapObjectSize) {
5536     // Allocate in large object space, retry space will be ignored.
5537     space = LO_SPACE;
5538   }
5539
5540   return AllocateRaw(size, space, retry_space);
5541 }
5542
5543
5544 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5545     Heap* heap,
5546     int length,
5547     PretenureFlag pretenure,
5548     Object* filler) {
5549   ASSERT(length >= 0);
5550   ASSERT(heap->empty_fixed_array()->IsFixedArray());
5551   if (length == 0) return heap->empty_fixed_array();
5552
5553   ASSERT(!heap->InNewSpace(filler));
5554   Object* result;
5555   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5556     if (!maybe_result->ToObject(&result)) return maybe_result;
5557   }
5558
5559   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5560   FixedArray* array = FixedArray::cast(result);
5561   array->set_length(length);
5562   MemsetPointer(array->data_start(), filler, length);
5563   return array;
5564 }
5565
5566
5567 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5568   return AllocateFixedArrayWithFiller(this,
5569                                       length,
5570                                       pretenure,
5571                                       undefined_value());
5572 }
5573
5574
5575 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5576                                                PretenureFlag pretenure) {
5577   return AllocateFixedArrayWithFiller(this,
5578                                       length,
5579                                       pretenure,
5580                                       the_hole_value());
5581 }
5582
5583
5584 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5585   if (length == 0) return empty_fixed_array();
5586
5587   Object* obj;
5588   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5589     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5590   }
5591
5592   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5593       fixed_array_map());
5594   FixedArray::cast(obj)->set_length(length);
5595   return obj;
5596 }
5597
5598
5599 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5600   int size = FixedDoubleArray::SizeFor(0);
5601   Object* result;
5602   { MaybeObject* maybe_result =
5603         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5604     if (!maybe_result->ToObject(&result)) return maybe_result;
5605   }
5606   // Initialize the object.
5607   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5608       fixed_double_array_map());
5609   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5610   return result;
5611 }
5612
5613
5614 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5615     int length,
5616     PretenureFlag pretenure) {
5617   if (length == 0) return empty_fixed_array();
5618
5619   Object* elements_object;
5620   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5621   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5622   FixedDoubleArray* elements =
5623       reinterpret_cast<FixedDoubleArray*>(elements_object);
5624
5625   elements->set_map_no_write_barrier(fixed_double_array_map());
5626   elements->set_length(length);
5627   return elements;
5628 }
5629
5630
5631 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5632     int length,
5633     PretenureFlag pretenure) {
5634   if (length == 0) return empty_fixed_array();
5635
5636   Object* elements_object;
5637   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5638   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5639   FixedDoubleArray* elements =
5640       reinterpret_cast<FixedDoubleArray*>(elements_object);
5641
5642   for (int i = 0; i < length; ++i) {
5643     elements->set_the_hole(i);
5644   }
5645
5646   elements->set_map_no_write_barrier(fixed_double_array_map());
5647   elements->set_length(length);
5648   return elements;
5649 }
5650
5651
5652 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5653                                                PretenureFlag pretenure) {
5654   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5655     return Failure::OutOfMemoryException(0xf);
5656   }
5657   int size = FixedDoubleArray::SizeFor(length);
5658   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5659   AllocationSpace retry_space = OLD_DATA_SPACE;
5660
5661 #ifndef V8_HOST_ARCH_64_BIT
5662   size += kPointerSize;
5663 #endif
5664
5665   if (size > Page::kMaxNonCodeHeapObjectSize) {
5666     // Allocate in large object space, retry space will be ignored.
5667     space = LO_SPACE;
5668   }
5669
5670   HeapObject* object;
5671   { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5672     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5673   }
5674
5675   return EnsureDoubleAligned(this, object, size);
5676 }
5677
5678
5679 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5680   Object* result;
5681   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5682     if (!maybe_result->ToObject(&result)) return maybe_result;
5683   }
5684   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5685       hash_table_map());
5686   ASSERT(result->IsHashTable());
5687   return result;
5688 }
5689
5690
5691 MaybeObject* Heap::AllocateSymbol() {
5692   // Statically ensure that it is safe to allocate symbols in paged spaces.
5693   STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5694
5695   Object* result;
5696   MaybeObject* maybe =
5697       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5698   if (!maybe->ToObject(&result)) return maybe;
5699
5700   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5701
5702   // Generate a random hash value.
5703   int hash;
5704   int attempts = 0;
5705   do {
5706     hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5707     attempts++;
5708   } while (hash == 0 && attempts < 30);
5709   if (hash == 0) hash = 1;  // never return 0
5710
5711   Symbol::cast(result)->set_hash_field(
5712       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5713   Symbol::cast(result)->set_name(undefined_value());
5714
5715   ASSERT(result->IsSymbol());
5716   return result;
5717 }
5718
5719
5720 MaybeObject* Heap::AllocateNativeContext() {
5721   Object* result;
5722   { MaybeObject* maybe_result =
5723         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5724     if (!maybe_result->ToObject(&result)) return maybe_result;
5725   }
5726   Context* context = reinterpret_cast<Context*>(result);
5727   context->set_map_no_write_barrier(native_context_map());
5728   context->set_js_array_maps(undefined_value());
5729   ASSERT(context->IsNativeContext());
5730   ASSERT(result->IsContext());
5731   return result;
5732 }
5733
5734
5735 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5736                                          ScopeInfo* scope_info) {
5737   Object* result;
5738   { MaybeObject* maybe_result =
5739         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5740     if (!maybe_result->ToObject(&result)) return maybe_result;
5741   }
5742   Context* context = reinterpret_cast<Context*>(result);
5743   context->set_map_no_write_barrier(global_context_map());
5744   context->set_closure(function);
5745   context->set_previous(function->context());
5746   context->set_extension(scope_info);
5747   context->set_global_object(function->context()->global_object());
5748   ASSERT(context->IsGlobalContext());
5749   ASSERT(result->IsContext());
5750   return context;
5751 }
5752
5753
5754 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5755   Object* result;
5756   { MaybeObject* maybe_result =
5757         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5758     if (!maybe_result->ToObject(&result)) return maybe_result;
5759   }
5760   Context* context = reinterpret_cast<Context*>(result);
5761   context->set_map_no_write_barrier(module_context_map());
5762   // Instance link will be set later.
5763   context->set_extension(Smi::FromInt(0));
5764   return context;
5765 }
5766
5767
5768 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5769   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5770   Object* result;
5771   { MaybeObject* maybe_result = AllocateFixedArray(length);
5772     if (!maybe_result->ToObject(&result)) return maybe_result;
5773   }
5774   Context* context = reinterpret_cast<Context*>(result);
5775   context->set_map_no_write_barrier(function_context_map());
5776   context->set_closure(function);
5777   context->set_previous(function->context());
5778   context->set_extension(Smi::FromInt(0));
5779   context->set_global_object(function->context()->global_object());
5780   return context;
5781 }
5782
5783
5784 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5785                                         Context* previous,
5786                                         String* name,
5787                                         Object* thrown_object) {
5788   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5789   Object* result;
5790   { MaybeObject* maybe_result =
5791         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5792     if (!maybe_result->ToObject(&result)) return maybe_result;
5793   }
5794   Context* context = reinterpret_cast<Context*>(result);
5795   context->set_map_no_write_barrier(catch_context_map());
5796   context->set_closure(function);
5797   context->set_previous(previous);
5798   context->set_extension(name);
5799   context->set_global_object(previous->global_object());
5800   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5801   return context;
5802 }
5803
5804
5805 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5806                                        Context* previous,
5807                                        JSReceiver* extension) {
5808   Object* result;
5809   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5810     if (!maybe_result->ToObject(&result)) return maybe_result;
5811   }
5812   Context* context = reinterpret_cast<Context*>(result);
5813   context->set_map_no_write_barrier(with_context_map());
5814   context->set_closure(function);
5815   context->set_previous(previous);
5816   context->set_extension(extension);
5817   context->set_global_object(previous->global_object());
5818   return context;
5819 }
5820
5821
5822 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5823                                         Context* previous,
5824                                         ScopeInfo* scope_info) {
5825   Object* result;
5826   { MaybeObject* maybe_result =
5827         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5828     if (!maybe_result->ToObject(&result)) return maybe_result;
5829   }
5830   Context* context = reinterpret_cast<Context*>(result);
5831   context->set_map_no_write_barrier(block_context_map());
5832   context->set_closure(function);
5833   context->set_previous(previous);
5834   context->set_extension(scope_info);
5835   context->set_global_object(previous->global_object());
5836   return context;
5837 }
5838
5839
5840 MaybeObject* Heap::AllocateScopeInfo(int length) {
5841   FixedArray* scope_info;
5842   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5843   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5844   scope_info->set_map_no_write_barrier(scope_info_map());
5845   return scope_info;
5846 }
5847
5848
5849 MaybeObject* Heap::AllocateExternal(void* value) {
5850   Foreign* foreign;
5851   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5852     if (!maybe_result->To(&foreign)) return maybe_result;
5853   }
5854   JSObject* external;
5855   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5856     if (!maybe_result->To(&external)) return maybe_result;
5857   }
5858   external->SetInternalField(0, foreign);
5859   return external;
5860 }
5861
5862
5863 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5864   Map* map;
5865   switch (type) {
5866 #define MAKE_CASE(NAME, Name, name) \
5867     case NAME##_TYPE: map = name##_map(); break;
5868 STRUCT_LIST(MAKE_CASE)
5869 #undef MAKE_CASE
5870     default:
5871       UNREACHABLE();
5872       return Failure::InternalError();
5873   }
5874   int size = map->instance_size();
5875   AllocationSpace space =
5876       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5877   Object* result;
5878   { MaybeObject* maybe_result = Allocate(map, space);
5879     if (!maybe_result->ToObject(&result)) return maybe_result;
5880   }
5881   Struct::cast(result)->InitializeBody(size);
5882   return result;
5883 }
5884
5885
5886 bool Heap::IsHeapIterable() {
5887   return (!old_pointer_space()->was_swept_conservatively() &&
5888           !old_data_space()->was_swept_conservatively());
5889 }
5890
5891
5892 void Heap::EnsureHeapIsIterable() {
5893   ASSERT(AllowHeapAllocation::IsAllowed());
5894   if (!IsHeapIterable()) {
5895     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5896   }
5897   ASSERT(IsHeapIterable());
5898 }
5899
5900
5901 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5902   incremental_marking()->Step(step_size,
5903                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5904
5905   if (incremental_marking()->IsComplete()) {
5906     bool uncommit = false;
5907     if (gc_count_at_last_idle_gc_ == gc_count_) {
5908       // No GC since the last full GC, the mutator is probably not active.
5909       isolate_->compilation_cache()->Clear();
5910       uncommit = true;
5911     }
5912     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5913     mark_sweeps_since_idle_round_started_++;
5914     gc_count_at_last_idle_gc_ = gc_count_;
5915     if (uncommit) {
5916       new_space_.Shrink();
5917       UncommitFromSpace();
5918     }
5919   }
5920 }
5921
5922
5923 bool Heap::IdleNotification(int hint) {
5924   // Hints greater than this value indicate that
5925   // the embedder is requesting a lot of GC work.
5926   const int kMaxHint = 1000;
5927   const int kMinHintForIncrementalMarking = 10;
5928   // Minimal hint that allows to do full GC.
5929   const int kMinHintForFullGC = 100;
5930   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5931   // The size factor is in range [5..250]. The numbers here are chosen from
5932   // experiments. If you changes them, make sure to test with
5933   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5934   intptr_t step_size =
5935       size_factor * IncrementalMarking::kAllocatedThreshold;
5936
5937   if (contexts_disposed_ > 0) {
5938     if (hint >= kMaxHint) {
5939       // The embedder is requesting a lot of GC work after context disposal,
5940       // we age inline caches so that they don't keep objects from
5941       // the old context alive.
5942       AgeInlineCaches();
5943     }
5944     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5945     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5946         incremental_marking()->IsStopped()) {
5947       HistogramTimerScope scope(isolate_->counters()->gc_context());
5948       CollectAllGarbage(kReduceMemoryFootprintMask,
5949                         "idle notification: contexts disposed");
5950     } else {
5951       AdvanceIdleIncrementalMarking(step_size);
5952       contexts_disposed_ = 0;
5953     }
5954     // After context disposal there is likely a lot of garbage remaining, reset
5955     // the idle notification counters in order to trigger more incremental GCs
5956     // on subsequent idle notifications.
5957     StartIdleRound();
5958     return false;
5959   }
5960
5961   if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5962     return IdleGlobalGC();
5963   }
5964
5965   // By doing small chunks of GC work in each IdleNotification,
5966   // perform a round of incremental GCs and after that wait until
5967   // the mutator creates enough garbage to justify a new round.
5968   // An incremental GC progresses as follows:
5969   // 1. many incremental marking steps,
5970   // 2. one old space mark-sweep-compact,
5971   // 3. many lazy sweep steps.
5972   // Use mark-sweep-compact events to count incremental GCs in a round.
5973
5974   if (incremental_marking()->IsStopped()) {
5975     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5976         !IsSweepingComplete() &&
5977         !AdvanceSweepers(static_cast<int>(step_size))) {
5978       return false;
5979     }
5980   }
5981
5982   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5983     if (EnoughGarbageSinceLastIdleRound()) {
5984       StartIdleRound();
5985     } else {
5986       return true;
5987     }
5988   }
5989
5990   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5991                               mark_sweeps_since_idle_round_started_;
5992
5993   if (incremental_marking()->IsStopped()) {
5994     // If there are no more than two GCs left in this idle round and we are
5995     // allowed to do a full GC, then make those GCs full in order to compact
5996     // the code space.
5997     // TODO(ulan): Once we enable code compaction for incremental marking,
5998     // we can get rid of this special case and always start incremental marking.
5999     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
6000       CollectAllGarbage(kReduceMemoryFootprintMask,
6001                         "idle notification: finalize idle round");
6002       mark_sweeps_since_idle_round_started_++;
6003     } else if (hint > kMinHintForIncrementalMarking) {
6004       incremental_marking()->Start();
6005     }
6006   }
6007   if (!incremental_marking()->IsStopped() &&
6008       hint > kMinHintForIncrementalMarking) {
6009     AdvanceIdleIncrementalMarking(step_size);
6010   }
6011
6012   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6013     FinishIdleRound();
6014     return true;
6015   }
6016
6017   return false;
6018 }
6019
6020
6021 bool Heap::IdleGlobalGC() {
6022   static const int kIdlesBeforeScavenge = 4;
6023   static const int kIdlesBeforeMarkSweep = 7;
6024   static const int kIdlesBeforeMarkCompact = 8;
6025   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
6026   static const unsigned int kGCsBetweenCleanup = 4;
6027
6028   if (!last_idle_notification_gc_count_init_) {
6029     last_idle_notification_gc_count_ = gc_count_;
6030     last_idle_notification_gc_count_init_ = true;
6031   }
6032
6033   bool uncommit = true;
6034   bool finished = false;
6035
6036   // Reset the number of idle notifications received when a number of
6037   // GCs have taken place. This allows another round of cleanup based
6038   // on idle notifications if enough work has been carried out to
6039   // provoke a number of garbage collections.
6040   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
6041     number_idle_notifications_ =
6042         Min(number_idle_notifications_ + 1, kMaxIdleCount);
6043   } else {
6044     number_idle_notifications_ = 0;
6045     last_idle_notification_gc_count_ = gc_count_;
6046   }
6047
6048   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
6049     CollectGarbage(NEW_SPACE, "idle notification");
6050     new_space_.Shrink();
6051     last_idle_notification_gc_count_ = gc_count_;
6052   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
6053     // Before doing the mark-sweep collections we clear the
6054     // compilation cache to avoid hanging on to source code and
6055     // generated code for cached functions.
6056     isolate_->compilation_cache()->Clear();
6057
6058     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6059     new_space_.Shrink();
6060     last_idle_notification_gc_count_ = gc_count_;
6061
6062   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
6063     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6064     new_space_.Shrink();
6065     last_idle_notification_gc_count_ = gc_count_;
6066     number_idle_notifications_ = 0;
6067     finished = true;
6068   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
6069     // If we have received more than kIdlesBeforeMarkCompact idle
6070     // notifications we do not perform any cleanup because we don't
6071     // expect to gain much by doing so.
6072     finished = true;
6073   }
6074
6075   if (uncommit) UncommitFromSpace();
6076
6077   return finished;
6078 }
6079
6080
6081 #ifdef DEBUG
6082
6083 void Heap::Print() {
6084   if (!HasBeenSetUp()) return;
6085   isolate()->PrintStack(stdout);
6086   AllSpaces spaces(this);
6087   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6088     space->Print();
6089   }
6090 }
6091
6092
6093 void Heap::ReportCodeStatistics(const char* title) {
6094   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6095   PagedSpace::ResetCodeStatistics();
6096   // We do not look for code in new space, map space, or old space.  If code
6097   // somehow ends up in those spaces, we would miss it here.
6098   code_space_->CollectCodeStatistics();
6099   lo_space_->CollectCodeStatistics();
6100   PagedSpace::ReportCodeStatistics();
6101 }
6102
6103
6104 // This function expects that NewSpace's allocated objects histogram is
6105 // populated (via a call to CollectStatistics or else as a side effect of a
6106 // just-completed scavenge collection).
6107 void Heap::ReportHeapStatistics(const char* title) {
6108   USE(title);
6109   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6110          title, gc_count_);
6111   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6112          old_generation_allocation_limit_);
6113
6114   PrintF("\n");
6115   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6116   isolate_->global_handles()->PrintStats();
6117   PrintF("\n");
6118
6119   PrintF("Heap statistics : ");
6120   isolate_->memory_allocator()->ReportStatistics();
6121   PrintF("To space : ");
6122   new_space_.ReportStatistics();
6123   PrintF("Old pointer space : ");
6124   old_pointer_space_->ReportStatistics();
6125   PrintF("Old data space : ");
6126   old_data_space_->ReportStatistics();
6127   PrintF("Code space : ");
6128   code_space_->ReportStatistics();
6129   PrintF("Map space : ");
6130   map_space_->ReportStatistics();
6131   PrintF("Cell space : ");
6132   cell_space_->ReportStatistics();
6133   PrintF("PropertyCell space : ");
6134   property_cell_space_->ReportStatistics();
6135   PrintF("Large object space : ");
6136   lo_space_->ReportStatistics();
6137   PrintF(">>>>>> ========================================= >>>>>>\n");
6138 }
6139
6140 #endif  // DEBUG
6141
6142 bool Heap::Contains(HeapObject* value) {
6143   return Contains(value->address());
6144 }
6145
6146
6147 bool Heap::Contains(Address addr) {
6148   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6149   return HasBeenSetUp() &&
6150     (new_space_.ToSpaceContains(addr) ||
6151      old_pointer_space_->Contains(addr) ||
6152      old_data_space_->Contains(addr) ||
6153      code_space_->Contains(addr) ||
6154      map_space_->Contains(addr) ||
6155      cell_space_->Contains(addr) ||
6156      property_cell_space_->Contains(addr) ||
6157      lo_space_->SlowContains(addr));
6158 }
6159
6160
6161 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6162   return InSpace(value->address(), space);
6163 }
6164
6165
6166 bool Heap::InSpace(Address addr, AllocationSpace space) {
6167   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6168   if (!HasBeenSetUp()) return false;
6169
6170   switch (space) {
6171     case NEW_SPACE:
6172       return new_space_.ToSpaceContains(addr);
6173     case OLD_POINTER_SPACE:
6174       return old_pointer_space_->Contains(addr);
6175     case OLD_DATA_SPACE:
6176       return old_data_space_->Contains(addr);
6177     case CODE_SPACE:
6178       return code_space_->Contains(addr);
6179     case MAP_SPACE:
6180       return map_space_->Contains(addr);
6181     case CELL_SPACE:
6182       return cell_space_->Contains(addr);
6183     case PROPERTY_CELL_SPACE:
6184       return property_cell_space_->Contains(addr);
6185     case LO_SPACE:
6186       return lo_space_->SlowContains(addr);
6187   }
6188
6189   return false;
6190 }
6191
6192
6193 #ifdef VERIFY_HEAP
6194 void Heap::Verify() {
6195   CHECK(HasBeenSetUp());
6196
6197   store_buffer()->Verify();
6198
6199   VerifyPointersVisitor visitor;
6200   IterateRoots(&visitor, VISIT_ONLY_STRONG);
6201
6202   new_space_.Verify();
6203
6204   old_pointer_space_->Verify(&visitor);
6205   map_space_->Verify(&visitor);
6206
6207   VerifyPointersVisitor no_dirty_regions_visitor;
6208   old_data_space_->Verify(&no_dirty_regions_visitor);
6209   code_space_->Verify(&no_dirty_regions_visitor);
6210   cell_space_->Verify(&no_dirty_regions_visitor);
6211   property_cell_space_->Verify(&no_dirty_regions_visitor);
6212
6213   lo_space_->Verify();
6214 }
6215 #endif
6216
6217
6218 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6219   Object* result = NULL;
6220   Object* new_table;
6221   { MaybeObject* maybe_new_table =
6222         string_table()->LookupUtf8String(string, &result);
6223     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6224   }
6225   // Can't use set_string_table because StringTable::cast knows that
6226   // StringTable is a singleton and checks for identity.
6227   roots_[kStringTableRootIndex] = new_table;
6228   ASSERT(result != NULL);
6229   return result;
6230 }
6231
6232
6233 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6234   Object* result = NULL;
6235   Object* new_table;
6236   { MaybeObject* maybe_new_table =
6237         string_table()->LookupOneByteString(string, &result);
6238     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6239   }
6240   // Can't use set_string_table because StringTable::cast knows that
6241   // StringTable is a singleton and checks for identity.
6242   roots_[kStringTableRootIndex] = new_table;
6243   ASSERT(result != NULL);
6244   return result;
6245 }
6246
6247
6248 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6249                                      int from,
6250                                      int length) {
6251   Object* result = NULL;
6252   Object* new_table;
6253   { MaybeObject* maybe_new_table =
6254         string_table()->LookupSubStringOneByteString(string,
6255                                                    from,
6256                                                    length,
6257                                                    &result);
6258     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6259   }
6260   // Can't use set_string_table because StringTable::cast knows that
6261   // StringTable is a singleton and checks for identity.
6262   roots_[kStringTableRootIndex] = new_table;
6263   ASSERT(result != NULL);
6264   return result;
6265 }
6266
6267
6268 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6269   Object* result = NULL;
6270   Object* new_table;
6271   { MaybeObject* maybe_new_table =
6272         string_table()->LookupTwoByteString(string, &result);
6273     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6274   }
6275   // Can't use set_string_table because StringTable::cast knows that
6276   // StringTable is a singleton and checks for identity.
6277   roots_[kStringTableRootIndex] = new_table;
6278   ASSERT(result != NULL);
6279   return result;
6280 }
6281
6282
6283 MaybeObject* Heap::InternalizeString(String* string) {
6284   if (string->IsInternalizedString()) return string;
6285   Object* result = NULL;
6286   Object* new_table;
6287   { MaybeObject* maybe_new_table =
6288         string_table()->LookupString(string, &result);
6289     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6290   }
6291   // Can't use set_string_table because StringTable::cast knows that
6292   // StringTable is a singleton and checks for identity.
6293   roots_[kStringTableRootIndex] = new_table;
6294   ASSERT(result != NULL);
6295   return result;
6296 }
6297
6298
6299 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6300   if (string->IsInternalizedString()) {
6301     *result = string;
6302     return true;
6303   }
6304   return string_table()->LookupStringIfExists(string, result);
6305 }
6306
6307
6308 void Heap::ZapFromSpace() {
6309   NewSpacePageIterator it(new_space_.FromSpaceStart(),
6310                           new_space_.FromSpaceEnd());
6311   while (it.has_next()) {
6312     NewSpacePage* page = it.next();
6313     for (Address cursor = page->area_start(), limit = page->area_end();
6314          cursor < limit;
6315          cursor += kPointerSize) {
6316       Memory::Address_at(cursor) = kFromSpaceZapValue;
6317     }
6318   }
6319 }
6320
6321
6322 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6323                                              Address end,
6324                                              ObjectSlotCallback callback) {
6325   Address slot_address = start;
6326
6327   // We are not collecting slots on new space objects during mutation
6328   // thus we have to scan for pointers to evacuation candidates when we
6329   // promote objects. But we should not record any slots in non-black
6330   // objects. Grey object's slots would be rescanned.
6331   // White object might not survive until the end of collection
6332   // it would be a violation of the invariant to record it's slots.
6333   bool record_slots = false;
6334   if (incremental_marking()->IsCompacting()) {
6335     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6336     record_slots = Marking::IsBlack(mark_bit);
6337   }
6338
6339   while (slot_address < end) {
6340     Object** slot = reinterpret_cast<Object**>(slot_address);
6341     Object* object = *slot;
6342     // If the store buffer becomes overfull we mark pages as being exempt from
6343     // the store buffer.  These pages are scanned to find pointers that point
6344     // to the new space.  In that case we may hit newly promoted objects and
6345     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6346     if (object->IsHeapObject()) {
6347       if (Heap::InFromSpace(object)) {
6348         callback(reinterpret_cast<HeapObject**>(slot),
6349                  HeapObject::cast(object));
6350         Object* new_object = *slot;
6351         if (InNewSpace(new_object)) {
6352           SLOW_ASSERT(Heap::InToSpace(new_object));
6353           SLOW_ASSERT(new_object->IsHeapObject());
6354           store_buffer_.EnterDirectlyIntoStoreBuffer(
6355               reinterpret_cast<Address>(slot));
6356         }
6357         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6358       } else if (record_slots &&
6359                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6360         mark_compact_collector()->RecordSlot(slot, slot, object);
6361       }
6362     }
6363     slot_address += kPointerSize;
6364   }
6365 }
6366
6367
6368 #ifdef DEBUG
6369 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6370
6371
6372 bool IsAMapPointerAddress(Object** addr) {
6373   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6374   int mod = a % Map::kSize;
6375   return mod >= Map::kPointerFieldsBeginOffset &&
6376          mod < Map::kPointerFieldsEndOffset;
6377 }
6378
6379
6380 bool EverythingsAPointer(Object** addr) {
6381   return true;
6382 }
6383
6384
6385 static void CheckStoreBuffer(Heap* heap,
6386                              Object** current,
6387                              Object** limit,
6388                              Object**** store_buffer_position,
6389                              Object*** store_buffer_top,
6390                              CheckStoreBufferFilter filter,
6391                              Address special_garbage_start,
6392                              Address special_garbage_end) {
6393   Map* free_space_map = heap->free_space_map();
6394   for ( ; current < limit; current++) {
6395     Object* o = *current;
6396     Address current_address = reinterpret_cast<Address>(current);
6397     // Skip free space.
6398     if (o == free_space_map) {
6399       Address current_address = reinterpret_cast<Address>(current);
6400       FreeSpace* free_space =
6401           FreeSpace::cast(HeapObject::FromAddress(current_address));
6402       int skip = free_space->Size();
6403       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6404       ASSERT(skip > 0);
6405       current_address += skip - kPointerSize;
6406       current = reinterpret_cast<Object**>(current_address);
6407       continue;
6408     }
6409     // Skip the current linear allocation space between top and limit which is
6410     // unmarked with the free space map, but can contain junk.
6411     if (current_address == special_garbage_start &&
6412         special_garbage_end != special_garbage_start) {
6413       current_address = special_garbage_end - kPointerSize;
6414       current = reinterpret_cast<Object**>(current_address);
6415       continue;
6416     }
6417     if (!(*filter)(current)) continue;
6418     ASSERT(current_address < special_garbage_start ||
6419            current_address >= special_garbage_end);
6420     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6421     // We have to check that the pointer does not point into new space
6422     // without trying to cast it to a heap object since the hash field of
6423     // a string can contain values like 1 and 3 which are tagged null
6424     // pointers.
6425     if (!heap->InNewSpace(o)) continue;
6426     while (**store_buffer_position < current &&
6427            *store_buffer_position < store_buffer_top) {
6428       (*store_buffer_position)++;
6429     }
6430     if (**store_buffer_position != current ||
6431         *store_buffer_position == store_buffer_top) {
6432       Object** obj_start = current;
6433       while (!(*obj_start)->IsMap()) obj_start--;
6434       UNREACHABLE();
6435     }
6436   }
6437 }
6438
6439
6440 // Check that the store buffer contains all intergenerational pointers by
6441 // scanning a page and ensuring that all pointers to young space are in the
6442 // store buffer.
6443 void Heap::OldPointerSpaceCheckStoreBuffer() {
6444   OldSpace* space = old_pointer_space();
6445   PageIterator pages(space);
6446
6447   store_buffer()->SortUniq();
6448
6449   while (pages.has_next()) {
6450     Page* page = pages.next();
6451     Object** current = reinterpret_cast<Object**>(page->area_start());
6452
6453     Address end = page->area_end();
6454
6455     Object*** store_buffer_position = store_buffer()->Start();
6456     Object*** store_buffer_top = store_buffer()->Top();
6457
6458     Object** limit = reinterpret_cast<Object**>(end);
6459     CheckStoreBuffer(this,
6460                      current,
6461                      limit,
6462                      &store_buffer_position,
6463                      store_buffer_top,
6464                      &EverythingsAPointer,
6465                      space->top(),
6466                      space->limit());
6467   }
6468 }
6469
6470
6471 void Heap::MapSpaceCheckStoreBuffer() {
6472   MapSpace* space = map_space();
6473   PageIterator pages(space);
6474
6475   store_buffer()->SortUniq();
6476
6477   while (pages.has_next()) {
6478     Page* page = pages.next();
6479     Object** current = reinterpret_cast<Object**>(page->area_start());
6480
6481     Address end = page->area_end();
6482
6483     Object*** store_buffer_position = store_buffer()->Start();
6484     Object*** store_buffer_top = store_buffer()->Top();
6485
6486     Object** limit = reinterpret_cast<Object**>(end);
6487     CheckStoreBuffer(this,
6488                      current,
6489                      limit,
6490                      &store_buffer_position,
6491                      store_buffer_top,
6492                      &IsAMapPointerAddress,
6493                      space->top(),
6494                      space->limit());
6495   }
6496 }
6497
6498
6499 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6500   LargeObjectIterator it(lo_space());
6501   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6502     // We only have code, sequential strings, or fixed arrays in large
6503     // object space, and only fixed arrays can possibly contain pointers to
6504     // the young generation.
6505     if (object->IsFixedArray()) {
6506       Object*** store_buffer_position = store_buffer()->Start();
6507       Object*** store_buffer_top = store_buffer()->Top();
6508       Object** current = reinterpret_cast<Object**>(object->address());
6509       Object** limit =
6510           reinterpret_cast<Object**>(object->address() + object->Size());
6511       CheckStoreBuffer(this,
6512                        current,
6513                        limit,
6514                        &store_buffer_position,
6515                        store_buffer_top,
6516                        &EverythingsAPointer,
6517                        NULL,
6518                        NULL);
6519     }
6520   }
6521 }
6522 #endif
6523
6524
6525 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6526   IterateStrongRoots(v, mode);
6527   IterateWeakRoots(v, mode);
6528 }
6529
6530
6531 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6532   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6533   v->Synchronize(VisitorSynchronization::kStringTable);
6534   if (mode != VISIT_ALL_IN_SCAVENGE &&
6535       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6536     // Scavenge collections have special processing for this.
6537     external_string_table_.Iterate(v);
6538   }
6539   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6540 }
6541
6542
6543 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6544   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6545   v->Synchronize(VisitorSynchronization::kStrongRootList);
6546
6547   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6548   v->Synchronize(VisitorSynchronization::kInternalizedString);
6549
6550   isolate_->bootstrapper()->Iterate(v);
6551   v->Synchronize(VisitorSynchronization::kBootstrapper);
6552   isolate_->Iterate(v);
6553   v->Synchronize(VisitorSynchronization::kTop);
6554   Relocatable::Iterate(v);
6555   v->Synchronize(VisitorSynchronization::kRelocatable);
6556
6557 #ifdef ENABLE_DEBUGGER_SUPPORT
6558   isolate_->debug()->Iterate(v);
6559   if (isolate_->deoptimizer_data() != NULL) {
6560     isolate_->deoptimizer_data()->Iterate(v);
6561   }
6562 #endif
6563   v->Synchronize(VisitorSynchronization::kDebug);
6564   isolate_->compilation_cache()->Iterate(v);
6565   v->Synchronize(VisitorSynchronization::kCompilationCache);
6566
6567   // Iterate over local handles in handle scopes.
6568   isolate_->handle_scope_implementer()->Iterate(v);
6569   isolate_->IterateDeferredHandles(v);
6570   v->Synchronize(VisitorSynchronization::kHandleScope);
6571
6572   // Iterate over the builtin code objects and code stubs in the
6573   // heap. Note that it is not necessary to iterate over code objects
6574   // on scavenge collections.
6575   if (mode != VISIT_ALL_IN_SCAVENGE) {
6576     isolate_->builtins()->IterateBuiltins(v);
6577   }
6578   v->Synchronize(VisitorSynchronization::kBuiltins);
6579
6580   // Iterate over global handles.
6581   switch (mode) {
6582     case VISIT_ONLY_STRONG:
6583       isolate_->global_handles()->IterateStrongRoots(v);
6584       break;
6585     case VISIT_ALL_IN_SCAVENGE:
6586       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6587       break;
6588     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6589     case VISIT_ALL:
6590       isolate_->global_handles()->IterateAllRoots(v);
6591       break;
6592   }
6593   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6594
6595   // Iterate over eternal handles.
6596   if (mode == VISIT_ALL_IN_SCAVENGE) {
6597     isolate_->eternal_handles()->IterateNewSpaceRoots(v);
6598   } else {
6599     isolate_->eternal_handles()->IterateAllRoots(v);
6600   }
6601   v->Synchronize(VisitorSynchronization::kEternalHandles);
6602
6603   // Iterate over pointers being held by inactive threads.
6604   isolate_->thread_manager()->Iterate(v);
6605   v->Synchronize(VisitorSynchronization::kThreadManager);
6606
6607   // Iterate over the pointers the Serialization/Deserialization code is
6608   // holding.
6609   // During garbage collection this keeps the partial snapshot cache alive.
6610   // During deserialization of the startup snapshot this creates the partial
6611   // snapshot cache and deserializes the objects it refers to.  During
6612   // serialization this does nothing, since the partial snapshot cache is
6613   // empty.  However the next thing we do is create the partial snapshot,
6614   // filling up the partial snapshot cache with objects it needs as we go.
6615   SerializerDeserializer::Iterate(v);
6616   // We don't do a v->Synchronize call here, because in debug mode that will
6617   // output a flag to the snapshot.  However at this point the serializer and
6618   // deserializer are deliberately a little unsynchronized (see above) so the
6619   // checking of the sync flag in the snapshot would fail.
6620 }
6621
6622
6623 // TODO(1236194): Since the heap size is configurable on the command line
6624 // and through the API, we should gracefully handle the case that the heap
6625 // size is not big enough to fit all the initial objects.
6626 bool Heap::ConfigureHeap(int max_semispace_size,
6627                          intptr_t max_old_gen_size,
6628                          intptr_t max_executable_size) {
6629   if (HasBeenSetUp()) return false;
6630
6631   if (FLAG_stress_compaction) {
6632     // This will cause more frequent GCs when stressing.
6633     max_semispace_size_ = Page::kPageSize;
6634   }
6635
6636   if (max_semispace_size > 0) {
6637     if (max_semispace_size < Page::kPageSize) {
6638       max_semispace_size = Page::kPageSize;
6639       if (FLAG_trace_gc) {
6640         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6641                  Page::kPageSize >> 10);
6642       }
6643     }
6644     max_semispace_size_ = max_semispace_size;
6645   }
6646
6647   if (Snapshot::IsEnabled()) {
6648     // If we are using a snapshot we always reserve the default amount
6649     // of memory for each semispace because code in the snapshot has
6650     // write-barrier code that relies on the size and alignment of new
6651     // space.  We therefore cannot use a larger max semispace size
6652     // than the default reserved semispace size.
6653     if (max_semispace_size_ > reserved_semispace_size_) {
6654       max_semispace_size_ = reserved_semispace_size_;
6655       if (FLAG_trace_gc) {
6656         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6657                  reserved_semispace_size_ >> 10);
6658       }
6659     }
6660   } else {
6661     // If we are not using snapshots we reserve space for the actual
6662     // max semispace size.
6663     reserved_semispace_size_ = max_semispace_size_;
6664   }
6665
6666   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6667   if (max_executable_size > 0) {
6668     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6669   }
6670
6671   // The max executable size must be less than or equal to the max old
6672   // generation size.
6673   if (max_executable_size_ > max_old_generation_size_) {
6674     max_executable_size_ = max_old_generation_size_;
6675   }
6676
6677   // The new space size must be a power of two to support single-bit testing
6678   // for containment.
6679   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6680   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6681   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6682
6683   // The external allocation limit should be below 256 MB on all architectures
6684   // to avoid unnecessary low memory notifications, as that is the threshold
6685   // for some embedders.
6686   external_allocation_limit_ = 12 * max_semispace_size_;
6687   ASSERT(external_allocation_limit_ <= 256 * MB);
6688
6689   // The old generation is paged and needs at least one page for each space.
6690   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6691   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6692                                                        Page::kPageSize),
6693                                  RoundUp(max_old_generation_size_,
6694                                          Page::kPageSize));
6695
6696   configured_ = true;
6697   return true;
6698 }
6699
6700
6701 bool Heap::ConfigureHeapDefault() {
6702   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6703                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6704                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6705 }
6706
6707
6708 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6709   *stats->start_marker = HeapStats::kStartMarker;
6710   *stats->end_marker = HeapStats::kEndMarker;
6711   *stats->new_space_size = new_space_.SizeAsInt();
6712   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6713   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6714   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6715   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6716   *stats->old_data_space_capacity = old_data_space_->Capacity();
6717   *stats->code_space_size = code_space_->SizeOfObjects();
6718   *stats->code_space_capacity = code_space_->Capacity();
6719   *stats->map_space_size = map_space_->SizeOfObjects();
6720   *stats->map_space_capacity = map_space_->Capacity();
6721   *stats->cell_space_size = cell_space_->SizeOfObjects();
6722   *stats->cell_space_capacity = cell_space_->Capacity();
6723   *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6724   *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6725   *stats->lo_space_size = lo_space_->Size();
6726   isolate_->global_handles()->RecordStats(stats);
6727   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6728   *stats->memory_allocator_capacity =
6729       isolate()->memory_allocator()->Size() +
6730       isolate()->memory_allocator()->Available();
6731   *stats->os_error = OS::GetLastError();
6732       isolate()->memory_allocator()->Available();
6733   if (take_snapshot) {
6734     HeapIterator iterator(this);
6735     for (HeapObject* obj = iterator.next();
6736          obj != NULL;
6737          obj = iterator.next()) {
6738       InstanceType type = obj->map()->instance_type();
6739       ASSERT(0 <= type && type <= LAST_TYPE);
6740       stats->objects_per_type[type]++;
6741       stats->size_per_type[type] += obj->Size();
6742     }
6743   }
6744 }
6745
6746
6747 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6748   return old_pointer_space_->SizeOfObjects()
6749       + old_data_space_->SizeOfObjects()
6750       + code_space_->SizeOfObjects()
6751       + map_space_->SizeOfObjects()
6752       + cell_space_->SizeOfObjects()
6753       + property_cell_space_->SizeOfObjects()
6754       + lo_space_->SizeOfObjects();
6755 }
6756
6757
6758 intptr_t Heap::PromotedExternalMemorySize() {
6759   if (amount_of_external_allocated_memory_
6760       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6761   return amount_of_external_allocated_memory_
6762       - amount_of_external_allocated_memory_at_last_global_gc_;
6763 }
6764
6765
6766 V8_DECLARE_ONCE(initialize_gc_once);
6767
6768 static void InitializeGCOnce() {
6769   InitializeScavengingVisitorsTables();
6770   NewSpaceScavenger::Initialize();
6771   MarkCompactCollector::Initialize();
6772 }
6773
6774
6775 bool Heap::SetUp() {
6776 #ifdef DEBUG
6777   allocation_timeout_ = FLAG_gc_interval;
6778 #endif
6779
6780   // Initialize heap spaces and initial maps and objects. Whenever something
6781   // goes wrong, just return false. The caller should check the results and
6782   // call Heap::TearDown() to release allocated memory.
6783   //
6784   // If the heap is not yet configured (e.g. through the API), configure it.
6785   // Configuration is based on the flags new-space-size (really the semispace
6786   // size) and old-space-size if set or the initial values of semispace_size_
6787   // and old_generation_size_ otherwise.
6788   if (!configured_) {
6789     if (!ConfigureHeapDefault()) return false;
6790   }
6791
6792   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6793
6794   MarkMapPointersAsEncoded(false);
6795
6796   // Set up memory allocator.
6797   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6798       return false;
6799
6800   // Set up new space.
6801   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6802     return false;
6803   }
6804
6805   // Initialize old pointer space.
6806   old_pointer_space_ =
6807       new OldSpace(this,
6808                    max_old_generation_size_,
6809                    OLD_POINTER_SPACE,
6810                    NOT_EXECUTABLE);
6811   if (old_pointer_space_ == NULL) return false;
6812   if (!old_pointer_space_->SetUp()) return false;
6813
6814   // Initialize old data space.
6815   old_data_space_ =
6816       new OldSpace(this,
6817                    max_old_generation_size_,
6818                    OLD_DATA_SPACE,
6819                    NOT_EXECUTABLE);
6820   if (old_data_space_ == NULL) return false;
6821   if (!old_data_space_->SetUp()) return false;
6822
6823   // Initialize the code space, set its maximum capacity to the old
6824   // generation size. It needs executable memory.
6825   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6826   // virtual address space, so that they can call each other with near calls.
6827   if (code_range_size_ > 0) {
6828     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6829       return false;
6830     }
6831   }
6832
6833   code_space_ =
6834       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6835   if (code_space_ == NULL) return false;
6836   if (!code_space_->SetUp()) return false;
6837
6838   // Initialize map space.
6839   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6840   if (map_space_ == NULL) return false;
6841   if (!map_space_->SetUp()) return false;
6842
6843   // Initialize simple cell space.
6844   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6845   if (cell_space_ == NULL) return false;
6846   if (!cell_space_->SetUp()) return false;
6847
6848   // Initialize global property cell space.
6849   property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6850                                                PROPERTY_CELL_SPACE);
6851   if (property_cell_space_ == NULL) return false;
6852   if (!property_cell_space_->SetUp()) return false;
6853
6854   // The large object code space may contain code or data.  We set the memory
6855   // to be non-executable here for safety, but this means we need to enable it
6856   // explicitly when allocating large code objects.
6857   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6858   if (lo_space_ == NULL) return false;
6859   if (!lo_space_->SetUp()) return false;
6860
6861   // Set up the seed that is used to randomize the string hash function.
6862   ASSERT(hash_seed() == 0);
6863   if (FLAG_randomize_hashes) {
6864     if (FLAG_hash_seed == 0) {
6865       set_hash_seed(
6866           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6867     } else {
6868       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6869     }
6870   }
6871
6872   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6873   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6874
6875   store_buffer()->SetUp();
6876
6877   if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6878 #ifdef DEBUG
6879   relocation_mutex_locked_by_optimizer_thread_ = false;
6880 #endif  // DEBUG
6881
6882   return true;
6883 }
6884
6885
6886 bool Heap::CreateHeapObjects() {
6887   // Create initial maps.
6888   if (!CreateInitialMaps()) return false;
6889   if (!CreateApiObjects()) return false;
6890
6891   // Create initial objects
6892   if (!CreateInitialObjects()) return false;
6893
6894   native_contexts_list_ = undefined_value();
6895   array_buffers_list_ = undefined_value();
6896   allocation_sites_list_ = undefined_value();
6897   return true;
6898 }
6899
6900
6901 void Heap::SetStackLimits() {
6902   ASSERT(isolate_ != NULL);
6903   ASSERT(isolate_ == isolate());
6904   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6905   // something that looks like an out of range Smi to the GC.
6906
6907   // Set up the special root array entries containing the stack limits.
6908   // These are actually addresses, but the tag makes the GC ignore it.
6909   roots_[kStackLimitRootIndex] =
6910       reinterpret_cast<Object*>(
6911           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6912   roots_[kRealStackLimitRootIndex] =
6913       reinterpret_cast<Object*>(
6914           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6915 }
6916
6917
6918 void Heap::TearDown() {
6919 #ifdef VERIFY_HEAP
6920   if (FLAG_verify_heap) {
6921     Verify();
6922   }
6923 #endif
6924
6925   if (FLAG_print_cumulative_gc_stat) {
6926     PrintF("\n");
6927     PrintF("gc_count=%d ", gc_count_);
6928     PrintF("mark_sweep_count=%d ", ms_count_);
6929     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6930     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6931     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6932     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6933            get_max_alive_after_gc());
6934     PrintF("total_marking_time=%.1f ", marking_time());
6935     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6936     PrintF("\n\n");
6937   }
6938
6939   TearDownArrayBuffers();
6940
6941   isolate_->global_handles()->TearDown();
6942
6943   external_string_table_.TearDown();
6944
6945   mark_compact_collector()->TearDown();
6946
6947   new_space_.TearDown();
6948
6949   if (old_pointer_space_ != NULL) {
6950     old_pointer_space_->TearDown();
6951     delete old_pointer_space_;
6952     old_pointer_space_ = NULL;
6953   }
6954
6955   if (old_data_space_ != NULL) {
6956     old_data_space_->TearDown();
6957     delete old_data_space_;
6958     old_data_space_ = NULL;
6959   }
6960
6961   if (code_space_ != NULL) {
6962     code_space_->TearDown();
6963     delete code_space_;
6964     code_space_ = NULL;
6965   }
6966
6967   if (map_space_ != NULL) {
6968     map_space_->TearDown();
6969     delete map_space_;
6970     map_space_ = NULL;
6971   }
6972
6973   if (cell_space_ != NULL) {
6974     cell_space_->TearDown();
6975     delete cell_space_;
6976     cell_space_ = NULL;
6977   }
6978
6979   if (property_cell_space_ != NULL) {
6980     property_cell_space_->TearDown();
6981     delete property_cell_space_;
6982     property_cell_space_ = NULL;
6983   }
6984
6985   if (lo_space_ != NULL) {
6986     lo_space_->TearDown();
6987     delete lo_space_;
6988     lo_space_ = NULL;
6989   }
6990
6991   store_buffer()->TearDown();
6992   incremental_marking()->TearDown();
6993
6994   isolate_->memory_allocator()->TearDown();
6995
6996   delete relocation_mutex_;
6997 }
6998
6999
7000 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
7001   ASSERT(callback != NULL);
7002   GCPrologueCallbackPair pair(callback, gc_type);
7003   ASSERT(!gc_prologue_callbacks_.Contains(pair));
7004   return gc_prologue_callbacks_.Add(pair);
7005 }
7006
7007
7008 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
7009   ASSERT(callback != NULL);
7010   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
7011     if (gc_prologue_callbacks_[i].callback == callback) {
7012       gc_prologue_callbacks_.Remove(i);
7013       return;
7014     }
7015   }
7016   UNREACHABLE();
7017 }
7018
7019
7020 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
7021   ASSERT(callback != NULL);
7022   GCEpilogueCallbackPair pair(callback, gc_type);
7023   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
7024   return gc_epilogue_callbacks_.Add(pair);
7025 }
7026
7027
7028 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
7029   ASSERT(callback != NULL);
7030   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
7031     if (gc_epilogue_callbacks_[i].callback == callback) {
7032       gc_epilogue_callbacks_.Remove(i);
7033       return;
7034     }
7035   }
7036   UNREACHABLE();
7037 }
7038
7039
7040 #ifdef DEBUG
7041
7042 class PrintHandleVisitor: public ObjectVisitor {
7043  public:
7044   void VisitPointers(Object** start, Object** end) {
7045     for (Object** p = start; p < end; p++)
7046       PrintF("  handle %p to %p\n",
7047              reinterpret_cast<void*>(p),
7048              reinterpret_cast<void*>(*p));
7049   }
7050 };
7051
7052
7053 void Heap::PrintHandles() {
7054   PrintF("Handles:\n");
7055   PrintHandleVisitor v;
7056   isolate_->handle_scope_implementer()->Iterate(&v);
7057 }
7058
7059 #endif
7060
7061
7062 Space* AllSpaces::next() {
7063   switch (counter_++) {
7064     case NEW_SPACE:
7065       return heap_->new_space();
7066     case OLD_POINTER_SPACE:
7067       return heap_->old_pointer_space();
7068     case OLD_DATA_SPACE:
7069       return heap_->old_data_space();
7070     case CODE_SPACE:
7071       return heap_->code_space();
7072     case MAP_SPACE:
7073       return heap_->map_space();
7074     case CELL_SPACE:
7075       return heap_->cell_space();
7076     case PROPERTY_CELL_SPACE:
7077       return heap_->property_cell_space();
7078     case LO_SPACE:
7079       return heap_->lo_space();
7080     default:
7081       return NULL;
7082   }
7083 }
7084
7085
7086 PagedSpace* PagedSpaces::next() {
7087   switch (counter_++) {
7088     case OLD_POINTER_SPACE:
7089       return heap_->old_pointer_space();
7090     case OLD_DATA_SPACE:
7091       return heap_->old_data_space();
7092     case CODE_SPACE:
7093       return heap_->code_space();
7094     case MAP_SPACE:
7095       return heap_->map_space();
7096     case CELL_SPACE:
7097       return heap_->cell_space();
7098     case PROPERTY_CELL_SPACE:
7099       return heap_->property_cell_space();
7100     default:
7101       return NULL;
7102   }
7103 }
7104
7105
7106
7107 OldSpace* OldSpaces::next() {
7108   switch (counter_++) {
7109     case OLD_POINTER_SPACE:
7110       return heap_->old_pointer_space();
7111     case OLD_DATA_SPACE:
7112       return heap_->old_data_space();
7113     case CODE_SPACE:
7114       return heap_->code_space();
7115     default:
7116       return NULL;
7117   }
7118 }
7119
7120
7121 SpaceIterator::SpaceIterator(Heap* heap)
7122     : heap_(heap),
7123       current_space_(FIRST_SPACE),
7124       iterator_(NULL),
7125       size_func_(NULL) {
7126 }
7127
7128
7129 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7130     : heap_(heap),
7131       current_space_(FIRST_SPACE),
7132       iterator_(NULL),
7133       size_func_(size_func) {
7134 }
7135
7136
7137 SpaceIterator::~SpaceIterator() {
7138   // Delete active iterator if any.
7139   delete iterator_;
7140 }
7141
7142
7143 bool SpaceIterator::has_next() {
7144   // Iterate until no more spaces.
7145   return current_space_ != LAST_SPACE;
7146 }
7147
7148
7149 ObjectIterator* SpaceIterator::next() {
7150   if (iterator_ != NULL) {
7151     delete iterator_;
7152     iterator_ = NULL;
7153     // Move to the next space
7154     current_space_++;
7155     if (current_space_ > LAST_SPACE) {
7156       return NULL;
7157     }
7158   }
7159
7160   // Return iterator for the new current space.
7161   return CreateIterator();
7162 }
7163
7164
7165 // Create an iterator for the space to iterate.
7166 ObjectIterator* SpaceIterator::CreateIterator() {
7167   ASSERT(iterator_ == NULL);
7168
7169   switch (current_space_) {
7170     case NEW_SPACE:
7171       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7172       break;
7173     case OLD_POINTER_SPACE:
7174       iterator_ =
7175           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7176       break;
7177     case OLD_DATA_SPACE:
7178       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7179       break;
7180     case CODE_SPACE:
7181       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7182       break;
7183     case MAP_SPACE:
7184       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7185       break;
7186     case CELL_SPACE:
7187       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7188       break;
7189     case PROPERTY_CELL_SPACE:
7190       iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7191                                          size_func_);
7192       break;
7193     case LO_SPACE:
7194       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7195       break;
7196   }
7197
7198   // Return the newly allocated iterator;
7199   ASSERT(iterator_ != NULL);
7200   return iterator_;
7201 }
7202
7203
7204 class HeapObjectsFilter {
7205  public:
7206   virtual ~HeapObjectsFilter() {}
7207   virtual bool SkipObject(HeapObject* object) = 0;
7208 };
7209
7210
7211 class UnreachableObjectsFilter : public HeapObjectsFilter {
7212  public:
7213   UnreachableObjectsFilter() {
7214     MarkReachableObjects();
7215   }
7216
7217   ~UnreachableObjectsFilter() {
7218     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7219   }
7220
7221   bool SkipObject(HeapObject* object) {
7222     MarkBit mark_bit = Marking::MarkBitFrom(object);
7223     return !mark_bit.Get();
7224   }
7225
7226  private:
7227   class MarkingVisitor : public ObjectVisitor {
7228    public:
7229     MarkingVisitor() : marking_stack_(10) {}
7230
7231     void VisitPointers(Object** start, Object** end) {
7232       for (Object** p = start; p < end; p++) {
7233         if (!(*p)->IsHeapObject()) continue;
7234         HeapObject* obj = HeapObject::cast(*p);
7235         MarkBit mark_bit = Marking::MarkBitFrom(obj);
7236         if (!mark_bit.Get()) {
7237           mark_bit.Set();
7238           marking_stack_.Add(obj);
7239         }
7240       }
7241     }
7242
7243     void TransitiveClosure() {
7244       while (!marking_stack_.is_empty()) {
7245         HeapObject* obj = marking_stack_.RemoveLast();
7246         obj->Iterate(this);
7247       }
7248     }
7249
7250    private:
7251     List<HeapObject*> marking_stack_;
7252   };
7253
7254   void MarkReachableObjects() {
7255     Heap* heap = Isolate::Current()->heap();
7256     MarkingVisitor visitor;
7257     heap->IterateRoots(&visitor, VISIT_ALL);
7258     visitor.TransitiveClosure();
7259   }
7260
7261   DisallowHeapAllocation no_allocation_;
7262 };
7263
7264
7265 HeapIterator::HeapIterator(Heap* heap)
7266     : heap_(heap),
7267       filtering_(HeapIterator::kNoFiltering),
7268       filter_(NULL) {
7269   Init();
7270 }
7271
7272
7273 HeapIterator::HeapIterator(Heap* heap,
7274                            HeapIterator::HeapObjectsFiltering filtering)
7275     : heap_(heap),
7276       filtering_(filtering),
7277       filter_(NULL) {
7278   Init();
7279 }
7280
7281
7282 HeapIterator::~HeapIterator() {
7283   Shutdown();
7284 }
7285
7286
7287 void HeapIterator::Init() {
7288   // Start the iteration.
7289   space_iterator_ = new SpaceIterator(heap_);
7290   switch (filtering_) {
7291     case kFilterUnreachable:
7292       filter_ = new UnreachableObjectsFilter;
7293       break;
7294     default:
7295       break;
7296   }
7297   object_iterator_ = space_iterator_->next();
7298 }
7299
7300
7301 void HeapIterator::Shutdown() {
7302 #ifdef DEBUG
7303   // Assert that in filtering mode we have iterated through all
7304   // objects. Otherwise, heap will be left in an inconsistent state.
7305   if (filtering_ != kNoFiltering) {
7306     ASSERT(object_iterator_ == NULL);
7307   }
7308 #endif
7309   // Make sure the last iterator is deallocated.
7310   delete space_iterator_;
7311   space_iterator_ = NULL;
7312   object_iterator_ = NULL;
7313   delete filter_;
7314   filter_ = NULL;
7315 }
7316
7317
7318 HeapObject* HeapIterator::next() {
7319   if (filter_ == NULL) return NextObject();
7320
7321   HeapObject* obj = NextObject();
7322   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7323   return obj;
7324 }
7325
7326
7327 HeapObject* HeapIterator::NextObject() {
7328   // No iterator means we are done.
7329   if (object_iterator_ == NULL) return NULL;
7330
7331   if (HeapObject* obj = object_iterator_->next_object()) {
7332     // If the current iterator has more objects we are fine.
7333     return obj;
7334   } else {
7335     // Go though the spaces looking for one that has objects.
7336     while (space_iterator_->has_next()) {
7337       object_iterator_ = space_iterator_->next();
7338       if (HeapObject* obj = object_iterator_->next_object()) {
7339         return obj;
7340       }
7341     }
7342   }
7343   // Done with the last space.
7344   object_iterator_ = NULL;
7345   return NULL;
7346 }
7347
7348
7349 void HeapIterator::reset() {
7350   // Restart the iterator.
7351   Shutdown();
7352   Init();
7353 }
7354
7355
7356 #ifdef DEBUG
7357
7358 Object* const PathTracer::kAnyGlobalObject = NULL;
7359
7360 class PathTracer::MarkVisitor: public ObjectVisitor {
7361  public:
7362   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7363   void VisitPointers(Object** start, Object** end) {
7364     // Scan all HeapObject pointers in [start, end)
7365     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7366       if ((*p)->IsHeapObject())
7367         tracer_->MarkRecursively(p, this);
7368     }
7369   }
7370
7371  private:
7372   PathTracer* tracer_;
7373 };
7374
7375
7376 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7377  public:
7378   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7379   void VisitPointers(Object** start, Object** end) {
7380     // Scan all HeapObject pointers in [start, end)
7381     for (Object** p = start; p < end; p++) {
7382       if ((*p)->IsHeapObject())
7383         tracer_->UnmarkRecursively(p, this);
7384     }
7385   }
7386
7387  private:
7388   PathTracer* tracer_;
7389 };
7390
7391
7392 void PathTracer::VisitPointers(Object** start, Object** end) {
7393   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7394   // Visit all HeapObject pointers in [start, end)
7395   for (Object** p = start; !done && (p < end); p++) {
7396     if ((*p)->IsHeapObject()) {
7397       TracePathFrom(p);
7398       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7399     }
7400   }
7401 }
7402
7403
7404 void PathTracer::Reset() {
7405   found_target_ = false;
7406   object_stack_.Clear();
7407 }
7408
7409
7410 void PathTracer::TracePathFrom(Object** root) {
7411   ASSERT((search_target_ == kAnyGlobalObject) ||
7412          search_target_->IsHeapObject());
7413   found_target_in_trace_ = false;
7414   Reset();
7415
7416   MarkVisitor mark_visitor(this);
7417   MarkRecursively(root, &mark_visitor);
7418
7419   UnmarkVisitor unmark_visitor(this);
7420   UnmarkRecursively(root, &unmark_visitor);
7421
7422   ProcessResults();
7423 }
7424
7425
7426 static bool SafeIsNativeContext(HeapObject* obj) {
7427   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7428 }
7429
7430
7431 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7432   if (!(*p)->IsHeapObject()) return;
7433
7434   HeapObject* obj = HeapObject::cast(*p);
7435
7436   Object* map = obj->map();
7437
7438   if (!map->IsHeapObject()) return;  // visited before
7439
7440   if (found_target_in_trace_) return;  // stop if target found
7441   object_stack_.Add(obj);
7442   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7443       (obj == search_target_)) {
7444     found_target_in_trace_ = true;
7445     found_target_ = true;
7446     return;
7447   }
7448
7449   bool is_native_context = SafeIsNativeContext(obj);
7450
7451   // not visited yet
7452   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7453
7454   Address map_addr = map_p->address();
7455
7456   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7457
7458   // Scan the object body.
7459   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7460     // This is specialized to scan Context's properly.
7461     Object** start = reinterpret_cast<Object**>(obj->address() +
7462                                                 Context::kHeaderSize);
7463     Object** end = reinterpret_cast<Object**>(obj->address() +
7464         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7465     mark_visitor->VisitPointers(start, end);
7466   } else {
7467     obj->IterateBody(map_p->instance_type(),
7468                      obj->SizeFromMap(map_p),
7469                      mark_visitor);
7470   }
7471
7472   // Scan the map after the body because the body is a lot more interesting
7473   // when doing leak detection.
7474   MarkRecursively(&map, mark_visitor);
7475
7476   if (!found_target_in_trace_)  // don't pop if found the target
7477     object_stack_.RemoveLast();
7478 }
7479
7480
7481 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7482   if (!(*p)->IsHeapObject()) return;
7483
7484   HeapObject* obj = HeapObject::cast(*p);
7485
7486   Object* map = obj->map();
7487
7488   if (map->IsHeapObject()) return;  // unmarked already
7489
7490   Address map_addr = reinterpret_cast<Address>(map);
7491
7492   map_addr -= kMarkTag;
7493
7494   ASSERT_TAG_ALIGNED(map_addr);
7495
7496   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7497
7498   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7499
7500   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7501
7502   obj->IterateBody(Map::cast(map_p)->instance_type(),
7503                    obj->SizeFromMap(Map::cast(map_p)),
7504                    unmark_visitor);
7505 }
7506
7507
7508 void PathTracer::ProcessResults() {
7509   if (found_target_) {
7510     PrintF("=====================================\n");
7511     PrintF("====        Path to object       ====\n");
7512     PrintF("=====================================\n\n");
7513
7514     ASSERT(!object_stack_.is_empty());
7515     for (int i = 0; i < object_stack_.length(); i++) {
7516       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7517       Object* obj = object_stack_[i];
7518       obj->Print();
7519     }
7520     PrintF("=====================================\n");
7521   }
7522 }
7523
7524
7525 // Triggers a depth-first traversal of reachable objects from one
7526 // given root object and finds a path to a specific heap object and
7527 // prints it.
7528 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7529   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7530   tracer.VisitPointer(&root);
7531 }
7532
7533
7534 // Triggers a depth-first traversal of reachable objects from roots
7535 // and finds a path to a specific heap object and prints it.
7536 void Heap::TracePathToObject(Object* target) {
7537   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7538   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7539 }
7540
7541
7542 // Triggers a depth-first traversal of reachable objects from roots
7543 // and finds a path to any global object and prints it. Useful for
7544 // determining the source for leaks of global objects.
7545 void Heap::TracePathToGlobal() {
7546   PathTracer tracer(PathTracer::kAnyGlobalObject,
7547                     PathTracer::FIND_ALL,
7548                     VISIT_ALL);
7549   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7550 }
7551 #endif
7552
7553
7554 static intptr_t CountTotalHolesSize(Heap* heap) {
7555   intptr_t holes_size = 0;
7556   OldSpaces spaces(heap);
7557   for (OldSpace* space = spaces.next();
7558        space != NULL;
7559        space = spaces.next()) {
7560     holes_size += space->Waste() + space->Available();
7561   }
7562   return holes_size;
7563 }
7564
7565
7566 GCTracer::GCTracer(Heap* heap,
7567                    const char* gc_reason,
7568                    const char* collector_reason)
7569     : start_time_(0.0),
7570       start_object_size_(0),
7571       start_memory_size_(0),
7572       gc_count_(0),
7573       full_gc_count_(0),
7574       allocated_since_last_gc_(0),
7575       spent_in_mutator_(0),
7576       promoted_objects_size_(0),
7577       nodes_died_in_new_space_(0),
7578       nodes_copied_in_new_space_(0),
7579       nodes_promoted_(0),
7580       heap_(heap),
7581       gc_reason_(gc_reason),
7582       collector_reason_(collector_reason) {
7583   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7584   start_time_ = OS::TimeCurrentMillis();
7585   start_object_size_ = heap_->SizeOfObjects();
7586   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7587
7588   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7589     scopes_[i] = 0;
7590   }
7591
7592   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7593
7594   allocated_since_last_gc_ =
7595       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7596
7597   if (heap_->last_gc_end_timestamp_ > 0) {
7598     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7599   }
7600
7601   steps_count_ = heap_->incremental_marking()->steps_count();
7602   steps_took_ = heap_->incremental_marking()->steps_took();
7603   longest_step_ = heap_->incremental_marking()->longest_step();
7604   steps_count_since_last_gc_ =
7605       heap_->incremental_marking()->steps_count_since_last_gc();
7606   steps_took_since_last_gc_ =
7607       heap_->incremental_marking()->steps_took_since_last_gc();
7608 }
7609
7610
7611 GCTracer::~GCTracer() {
7612   // Printf ONE line iff flag is set.
7613   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7614
7615   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7616
7617   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7618   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7619
7620   double time = heap_->last_gc_end_timestamp_ - start_time_;
7621
7622   // Update cumulative GC statistics if required.
7623   if (FLAG_print_cumulative_gc_stat) {
7624     heap_->total_gc_time_ms_ += time;
7625     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7626     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7627                                      heap_->alive_after_last_gc_);
7628     if (!first_gc) {
7629       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7630                                    spent_in_mutator_);
7631     }
7632   } else if (FLAG_trace_gc_verbose) {
7633     heap_->total_gc_time_ms_ += time;
7634   }
7635
7636   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7637
7638   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7639
7640   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7641   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7642
7643   if (!FLAG_trace_gc_nvp) {
7644     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7645
7646     double end_memory_size_mb =
7647         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7648
7649     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7650            CollectorString(),
7651            static_cast<double>(start_object_size_) / MB,
7652            static_cast<double>(start_memory_size_) / MB,
7653            SizeOfHeapObjects(),
7654            end_memory_size_mb);
7655
7656     if (external_time > 0) PrintF("%d / ", external_time);
7657     PrintF("%.1f ms", time);
7658     if (steps_count_ > 0) {
7659       if (collector_ == SCAVENGER) {
7660         PrintF(" (+ %.1f ms in %d steps since last GC)",
7661                steps_took_since_last_gc_,
7662                steps_count_since_last_gc_);
7663       } else {
7664         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7665                    "biggest step %.1f ms)",
7666                steps_took_,
7667                steps_count_,
7668                longest_step_);
7669       }
7670     }
7671
7672     if (gc_reason_ != NULL) {
7673       PrintF(" [%s]", gc_reason_);
7674     }
7675
7676     if (collector_reason_ != NULL) {
7677       PrintF(" [%s]", collector_reason_);
7678     }
7679
7680     PrintF(".\n");
7681   } else {
7682     PrintF("pause=%.1f ", time);
7683     PrintF("mutator=%.1f ", spent_in_mutator_);
7684     PrintF("gc=");
7685     switch (collector_) {
7686       case SCAVENGER:
7687         PrintF("s");
7688         break;
7689       case MARK_COMPACTOR:
7690         PrintF("ms");
7691         break;
7692       default:
7693         UNREACHABLE();
7694     }
7695     PrintF(" ");
7696
7697     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7698     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7699     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7700     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7701     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7702     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7703     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7704     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7705     PrintF("compaction_ptrs=%.1f ",
7706         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7707     PrintF("intracompaction_ptrs=%.1f ",
7708         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7709     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7710     PrintF("weakcollection_process=%.1f ",
7711         scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7712     PrintF("weakcollection_clear=%.1f ",
7713         scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7714
7715     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7716     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7717     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7718            in_free_list_or_wasted_before_gc_);
7719     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7720
7721     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7722     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7723     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7724     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7725     PrintF("nodes_promoted=%d ", nodes_promoted_);
7726
7727     if (collector_ == SCAVENGER) {
7728       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7729       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7730     } else {
7731       PrintF("stepscount=%d ", steps_count_);
7732       PrintF("stepstook=%.1f ", steps_took_);
7733       PrintF("longeststep=%.1f ", longest_step_);
7734     }
7735
7736     PrintF("\n");
7737   }
7738
7739   heap_->PrintShortHeapStatistics();
7740 }
7741
7742
7743 const char* GCTracer::CollectorString() {
7744   switch (collector_) {
7745     case SCAVENGER:
7746       return "Scavenge";
7747     case MARK_COMPACTOR:
7748       return "Mark-sweep";
7749   }
7750   return "Unknown GC";
7751 }
7752
7753
7754 int KeyedLookupCache::Hash(Map* map, Name* name) {
7755   // Uses only lower 32 bits if pointers are larger.
7756   uintptr_t addr_hash =
7757       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7758   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7759 }
7760
7761
7762 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7763   int index = (Hash(map, name) & kHashMask);
7764   for (int i = 0; i < kEntriesPerBucket; i++) {
7765     Key& key = keys_[index + i];
7766     if ((key.map == map) && key.name->Equals(name)) {
7767       return field_offsets_[index + i];
7768     }
7769   }
7770   return kNotFound;
7771 }
7772
7773
7774 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7775   if (!name->IsUniqueName()) {
7776     String* internalized_string;
7777     if (!HEAP->InternalizeStringIfExists(
7778             String::cast(name), &internalized_string)) {
7779       return;
7780     }
7781     name = internalized_string;
7782   }
7783   // This cache is cleared only between mark compact passes, so we expect the
7784   // cache to only contain old space names.
7785   ASSERT(!HEAP->InNewSpace(name));
7786
7787   int index = (Hash(map, name) & kHashMask);
7788   // After a GC there will be free slots, so we use them in order (this may
7789   // help to get the most frequently used one in position 0).
7790   for (int i = 0; i< kEntriesPerBucket; i++) {
7791     Key& key = keys_[index];
7792     Object* free_entry_indicator = NULL;
7793     if (key.map == free_entry_indicator) {
7794       key.map = map;
7795       key.name = name;
7796       field_offsets_[index + i] = field_offset;
7797       return;
7798     }
7799   }
7800   // No free entry found in this bucket, so we move them all down one and
7801   // put the new entry at position zero.
7802   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7803     Key& key = keys_[index + i];
7804     Key& key2 = keys_[index + i - 1];
7805     key = key2;
7806     field_offsets_[index + i] = field_offsets_[index + i - 1];
7807   }
7808
7809   // Write the new first entry.
7810   Key& key = keys_[index];
7811   key.map = map;
7812   key.name = name;
7813   field_offsets_[index] = field_offset;
7814 }
7815
7816
7817 void KeyedLookupCache::Clear() {
7818   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7819 }
7820
7821
7822 void DescriptorLookupCache::Clear() {
7823   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7824 }
7825
7826
7827 #ifdef DEBUG
7828 void Heap::GarbageCollectionGreedyCheck() {
7829   ASSERT(FLAG_gc_greedy);
7830   if (isolate_->bootstrapper()->IsActive()) return;
7831   if (disallow_allocation_failure()) return;
7832   CollectGarbage(NEW_SPACE);
7833 }
7834 #endif
7835
7836
7837 TranscendentalCache::SubCache::SubCache(Type t)
7838   : type_(t),
7839     isolate_(Isolate::Current()) {
7840   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7841   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7842   for (int i = 0; i < kCacheSize; i++) {
7843     elements_[i].in[0] = in0;
7844     elements_[i].in[1] = in1;
7845     elements_[i].output = NULL;
7846   }
7847 }
7848
7849
7850 void TranscendentalCache::Clear() {
7851   for (int i = 0; i < kNumberOfCaches; i++) {
7852     if (caches_[i] != NULL) {
7853       delete caches_[i];
7854       caches_[i] = NULL;
7855     }
7856   }
7857 }
7858
7859
7860 void ExternalStringTable::CleanUp() {
7861   int last = 0;
7862   for (int i = 0; i < new_space_strings_.length(); ++i) {
7863     if (new_space_strings_[i] == heap_->the_hole_value()) {
7864       continue;
7865     }
7866     if (heap_->InNewSpace(new_space_strings_[i])) {
7867       new_space_strings_[last++] = new_space_strings_[i];
7868     } else {
7869       old_space_strings_.Add(new_space_strings_[i]);
7870     }
7871   }
7872   new_space_strings_.Rewind(last);
7873   new_space_strings_.Trim();
7874
7875   last = 0;
7876   for (int i = 0; i < old_space_strings_.length(); ++i) {
7877     if (old_space_strings_[i] == heap_->the_hole_value()) {
7878       continue;
7879     }
7880     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7881     old_space_strings_[last++] = old_space_strings_[i];
7882   }
7883   old_space_strings_.Rewind(last);
7884   old_space_strings_.Trim();
7885 #ifdef VERIFY_HEAP
7886   if (FLAG_verify_heap) {
7887     Verify();
7888   }
7889 #endif
7890 }
7891
7892
7893 void ExternalStringTable::TearDown() {
7894   new_space_strings_.Free();
7895   old_space_strings_.Free();
7896 }
7897
7898
7899 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7900   chunk->set_next_chunk(chunks_queued_for_free_);
7901   chunks_queued_for_free_ = chunk;
7902 }
7903
7904
7905 void Heap::FreeQueuedChunks() {
7906   if (chunks_queued_for_free_ == NULL) return;
7907   MemoryChunk* next;
7908   MemoryChunk* chunk;
7909   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7910     next = chunk->next_chunk();
7911     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7912
7913     if (chunk->owner()->identity() == LO_SPACE) {
7914       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7915       // If FromAnyPointerAddress encounters a slot that belongs to a large
7916       // chunk queued for deletion it will fail to find the chunk because
7917       // it try to perform a search in the list of pages owned by of the large
7918       // object space and queued chunks were detached from that list.
7919       // To work around this we split large chunk into normal kPageSize aligned
7920       // pieces and initialize size, owner and flags field of every piece.
7921       // If FromAnyPointerAddress encounters a slot that belongs to one of
7922       // these smaller pieces it will treat it as a slot on a normal Page.
7923       Address chunk_end = chunk->address() + chunk->size();
7924       MemoryChunk* inner = MemoryChunk::FromAddress(
7925           chunk->address() + Page::kPageSize);
7926       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7927       while (inner <= inner_last) {
7928         // Size of a large chunk is always a multiple of
7929         // OS::AllocateAlignment() so there is always
7930         // enough space for a fake MemoryChunk header.
7931         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7932         // Guard against overflow.
7933         if (area_end < inner->address()) area_end = chunk_end;
7934         inner->SetArea(inner->address(), area_end);
7935         inner->set_size(Page::kPageSize);
7936         inner->set_owner(lo_space());
7937         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7938         inner = MemoryChunk::FromAddress(
7939             inner->address() + Page::kPageSize);
7940       }
7941     }
7942   }
7943   isolate_->heap()->store_buffer()->Compact();
7944   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7945   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7946     next = chunk->next_chunk();
7947     isolate_->memory_allocator()->Free(chunk);
7948   }
7949   chunks_queued_for_free_ = NULL;
7950 }
7951
7952
7953 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7954   uintptr_t p = reinterpret_cast<uintptr_t>(page);
7955   // Tag the page pointer to make it findable in the dump file.
7956   if (compacted) {
7957     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7958   } else {
7959     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7960   }
7961   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7962       reinterpret_cast<Address>(p);
7963   remembered_unmapped_pages_index_++;
7964   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7965 }
7966
7967
7968 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7969   memset(object_counts_, 0, sizeof(object_counts_));
7970   memset(object_sizes_, 0, sizeof(object_sizes_));
7971   if (clear_last_time_stats) {
7972     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7973     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7974   }
7975 }
7976
7977
7978 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7979
7980
7981 void Heap::CheckpointObjectStats() {
7982   ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7983   Counters* counters = isolate()->counters();
7984 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
7985   counters->count_of_##name()->Increment(                                      \
7986       static_cast<int>(object_counts_[name]));                                 \
7987   counters->count_of_##name()->Decrement(                                      \
7988       static_cast<int>(object_counts_last_time_[name]));                       \
7989   counters->size_of_##name()->Increment(                                       \
7990       static_cast<int>(object_sizes_[name]));                                  \
7991   counters->size_of_##name()->Decrement(                                       \
7992       static_cast<int>(object_sizes_last_time_[name]));
7993   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7994 #undef ADJUST_LAST_TIME_OBJECT_COUNT
7995   int index;
7996 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7997   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
7998   counters->count_of_CODE_TYPE_##name()->Increment(       \
7999       static_cast<int>(object_counts_[index]));           \
8000   counters->count_of_CODE_TYPE_##name()->Decrement(       \
8001       static_cast<int>(object_counts_last_time_[index])); \
8002   counters->size_of_CODE_TYPE_##name()->Increment(        \
8003       static_cast<int>(object_sizes_[index]));            \
8004   counters->size_of_CODE_TYPE_##name()->Decrement(        \
8005       static_cast<int>(object_sizes_last_time_[index]));
8006   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8007 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8008 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
8009   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
8010   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
8011       static_cast<int>(object_counts_[index]));           \
8012   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
8013       static_cast<int>(object_counts_last_time_[index])); \
8014   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
8015       static_cast<int>(object_sizes_[index]));            \
8016   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
8017       static_cast<int>(object_sizes_last_time_[index]));
8018   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8019 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8020
8021   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8022   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8023   ClearObjectStats();
8024 }
8025
8026
8027 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8028   if (FLAG_parallel_recompilation) {
8029     heap_->relocation_mutex_->Lock();
8030 #ifdef DEBUG
8031     heap_->relocation_mutex_locked_by_optimizer_thread_ =
8032         heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8033 #endif  // DEBUG
8034   }
8035 }
8036
8037 } }  // namespace v8::internal