v8: upgrade to 3.20.17
[platform/upstream/nodejs.git] / deps / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
36 #include "debug.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "mark-compact.h"
42 #include "natives.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
45 #include "once.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
48 #include "snapshot.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
51 #include "v8utils.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
56 #endif
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
60 #endif
61
62 namespace v8 {
63 namespace internal {
64
65
66 Heap::Heap()
67     : isolate_(NULL),
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if V8_TARGET_ARCH_X64
71 #define LUMP_OF_MEMORY (2 * MB)
72       code_range_size_(512*MB),
73 #else
74 #define LUMP_OF_MEMORY MB
75       code_range_size_(0),
76 #endif
77 #if defined(ANDROID) || V8_TARGET_ARCH_MIPS
78       reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79       max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80       initial_semispace_size_(Page::kPageSize),
81       max_old_generation_size_(192*MB),
82       max_executable_size_(max_old_generation_size_),
83 #else
84       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86       initial_semispace_size_(Page::kPageSize),
87       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88       max_executable_size_(256l * LUMP_OF_MEMORY),
89 #endif
90
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95       survived_since_last_expansion_(0),
96       sweep_generation_(0),
97       always_allocate_scope_depth_(0),
98       linear_allocation_scope_depth_(0),
99       contexts_disposed_(0),
100       global_ic_age_(0),
101       flush_monomorphic_ics_(false),
102       scan_on_scavenge_pages_(0),
103       new_space_(this),
104       old_pointer_space_(NULL),
105       old_data_space_(NULL),
106       code_space_(NULL),
107       map_space_(NULL),
108       cell_space_(NULL),
109       property_cell_space_(NULL),
110       lo_space_(NULL),
111       gc_state_(NOT_IN_GC),
112       gc_post_processing_depth_(0),
113       ms_count_(0),
114       gc_count_(0),
115       remembered_unmapped_pages_index_(0),
116       unflattened_strings_length_(0),
117 #ifdef DEBUG
118       allocation_timeout_(0),
119       disallow_allocation_failure_(false),
120 #endif  // DEBUG
121       new_space_high_promotion_mode_active_(false),
122       old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
123       size_of_old_gen_at_last_old_space_gc_(0),
124       external_allocation_limit_(0),
125       amount_of_external_allocated_memory_(0),
126       amount_of_external_allocated_memory_at_last_global_gc_(0),
127       old_gen_exhausted_(false),
128       store_buffer_rebuilder_(store_buffer()),
129       hidden_string_(NULL),
130       global_gc_prologue_callback_(NULL),
131       global_gc_epilogue_callback_(NULL),
132       gc_safe_size_of_old_object_(NULL),
133       total_regexp_code_generated_(0),
134       tracer_(NULL),
135       young_survivors_after_last_gc_(0),
136       high_survival_rate_period_length_(0),
137       low_survival_rate_period_length_(0),
138       survival_rate_(0),
139       previous_survival_rate_trend_(Heap::STABLE),
140       survival_rate_trend_(Heap::STABLE),
141       max_gc_pause_(0.0),
142       total_gc_time_ms_(0.0),
143       max_alive_after_gc_(0),
144       min_in_mutator_(kMaxInt),
145       alive_after_last_gc_(0),
146       last_gc_end_timestamp_(0.0),
147       marking_time_(0.0),
148       sweeping_time_(0.0),
149       store_buffer_(this),
150       marking_(this),
151       incremental_marking_(this),
152       number_idle_notifications_(0),
153       last_idle_notification_gc_count_(0),
154       last_idle_notification_gc_count_init_(false),
155       mark_sweeps_since_idle_round_started_(0),
156       gc_count_at_last_idle_gc_(0),
157       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
158       gcs_since_last_deopt_(0),
159 #ifdef VERIFY_HEAP
160       no_weak_embedded_maps_verification_scope_depth_(0),
161 #endif
162       promotion_queue_(this),
163       configured_(false),
164       chunks_queued_for_free_(NULL),
165       relocation_mutex_(NULL) {
166   // Allow build-time customization of the max semispace size. Building
167   // V8 with snapshots and a non-default max semispace size is much
168   // easier if you can define it as part of the build environment.
169 #if defined(V8_MAX_SEMISPACE_SIZE)
170   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
171 #endif
172
173   intptr_t max_virtual = OS::MaxVirtualMemory();
174
175   if (max_virtual > 0) {
176     if (code_range_size_ > 0) {
177       // Reserve no more than 1/8 of the memory for the code range.
178       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
179     }
180   }
181
182   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
183   native_contexts_list_ = NULL;
184   array_buffers_list_ = Smi::FromInt(0);
185   allocation_sites_list_ = Smi::FromInt(0);
186   mark_compact_collector_.heap_ = this;
187   external_string_table_.heap_ = this;
188   // Put a dummy entry in the remembered pages so we can find the list the
189   // minidump even if there are no real unmapped pages.
190   RememberUnmappedPage(NULL, false);
191
192   ClearObjectStats(true);
193 }
194
195
196 intptr_t Heap::Capacity() {
197   if (!HasBeenSetUp()) return 0;
198
199   return new_space_.Capacity() +
200       old_pointer_space_->Capacity() +
201       old_data_space_->Capacity() +
202       code_space_->Capacity() +
203       map_space_->Capacity() +
204       cell_space_->Capacity() +
205       property_cell_space_->Capacity();
206 }
207
208
209 intptr_t Heap::CommittedMemory() {
210   if (!HasBeenSetUp()) return 0;
211
212   return new_space_.CommittedMemory() +
213       old_pointer_space_->CommittedMemory() +
214       old_data_space_->CommittedMemory() +
215       code_space_->CommittedMemory() +
216       map_space_->CommittedMemory() +
217       cell_space_->CommittedMemory() +
218       property_cell_space_->CommittedMemory() +
219       lo_space_->Size();
220 }
221
222
223 size_t Heap::CommittedPhysicalMemory() {
224   if (!HasBeenSetUp()) return 0;
225
226   return new_space_.CommittedPhysicalMemory() +
227       old_pointer_space_->CommittedPhysicalMemory() +
228       old_data_space_->CommittedPhysicalMemory() +
229       code_space_->CommittedPhysicalMemory() +
230       map_space_->CommittedPhysicalMemory() +
231       cell_space_->CommittedPhysicalMemory() +
232       property_cell_space_->CommittedPhysicalMemory() +
233       lo_space_->CommittedPhysicalMemory();
234 }
235
236
237 intptr_t Heap::CommittedMemoryExecutable() {
238   if (!HasBeenSetUp()) return 0;
239
240   return isolate()->memory_allocator()->SizeExecutable();
241 }
242
243
244 intptr_t Heap::Available() {
245   if (!HasBeenSetUp()) return 0;
246
247   return new_space_.Available() +
248       old_pointer_space_->Available() +
249       old_data_space_->Available() +
250       code_space_->Available() +
251       map_space_->Available() +
252       cell_space_->Available() +
253       property_cell_space_->Available();
254 }
255
256
257 bool Heap::HasBeenSetUp() {
258   return old_pointer_space_ != NULL &&
259          old_data_space_ != NULL &&
260          code_space_ != NULL &&
261          map_space_ != NULL &&
262          cell_space_ != NULL &&
263          property_cell_space_ != NULL &&
264          lo_space_ != NULL;
265 }
266
267
268 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
269   if (IntrusiveMarking::IsMarked(object)) {
270     return IntrusiveMarking::SizeOfMarkedObject(object);
271   }
272   return object->SizeFromMap(object->map());
273 }
274
275
276 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
277                                               const char** reason) {
278   // Is global GC requested?
279   if (space != NEW_SPACE) {
280     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
281     *reason = "GC in old space requested";
282     return MARK_COMPACTOR;
283   }
284
285   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
286     *reason = "GC in old space forced by flags";
287     return MARK_COMPACTOR;
288   }
289
290   // Is enough data promoted to justify a global GC?
291   if (OldGenerationAllocationLimitReached()) {
292     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
293     *reason = "promotion limit reached";
294     return MARK_COMPACTOR;
295   }
296
297   // Have allocation in OLD and LO failed?
298   if (old_gen_exhausted_) {
299     isolate_->counters()->
300         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
301     *reason = "old generations exhausted";
302     return MARK_COMPACTOR;
303   }
304
305   // Is there enough space left in OLD to guarantee that a scavenge can
306   // succeed?
307   //
308   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
309   // for object promotion. It counts only the bytes that the memory
310   // allocator has not yet allocated from the OS and assigned to any space,
311   // and does not count available bytes already in the old space or code
312   // space.  Undercounting is safe---we may get an unrequested full GC when
313   // a scavenge would have succeeded.
314   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
315     isolate_->counters()->
316         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
317     *reason = "scavenge might not succeed";
318     return MARK_COMPACTOR;
319   }
320
321   // Default
322   *reason = NULL;
323   return SCAVENGER;
324 }
325
326
327 // TODO(1238405): Combine the infrastructure for --heap-stats and
328 // --log-gc to avoid the complicated preprocessor and flag testing.
329 void Heap::ReportStatisticsBeforeGC() {
330   // Heap::ReportHeapStatistics will also log NewSpace statistics when
331   // compiled --log-gc is set.  The following logic is used to avoid
332   // double logging.
333 #ifdef DEBUG
334   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
335   if (FLAG_heap_stats) {
336     ReportHeapStatistics("Before GC");
337   } else if (FLAG_log_gc) {
338     new_space_.ReportStatistics();
339   }
340   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
341 #else
342   if (FLAG_log_gc) {
343     new_space_.CollectStatistics();
344     new_space_.ReportStatistics();
345     new_space_.ClearHistograms();
346   }
347 #endif  // DEBUG
348 }
349
350
351 void Heap::PrintShortHeapStatistics() {
352   if (!FLAG_trace_gc_verbose) return;
353   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
354                ", available: %6" V8_PTR_PREFIX "d KB\n",
355            isolate_->memory_allocator()->Size() / KB,
356            isolate_->memory_allocator()->Available() / KB);
357   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
358                ", available: %6" V8_PTR_PREFIX "d KB"
359                ", committed: %6" V8_PTR_PREFIX "d KB\n",
360            new_space_.Size() / KB,
361            new_space_.Available() / KB,
362            new_space_.CommittedMemory() / KB);
363   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
364                ", available: %6" V8_PTR_PREFIX "d KB"
365                ", committed: %6" V8_PTR_PREFIX "d KB\n",
366            old_pointer_space_->SizeOfObjects() / KB,
367            old_pointer_space_->Available() / KB,
368            old_pointer_space_->CommittedMemory() / KB);
369   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
370                ", available: %6" V8_PTR_PREFIX "d KB"
371                ", committed: %6" V8_PTR_PREFIX "d KB\n",
372            old_data_space_->SizeOfObjects() / KB,
373            old_data_space_->Available() / KB,
374            old_data_space_->CommittedMemory() / KB);
375   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
376                ", available: %6" V8_PTR_PREFIX "d KB"
377                ", committed: %6" V8_PTR_PREFIX "d KB\n",
378            code_space_->SizeOfObjects() / KB,
379            code_space_->Available() / KB,
380            code_space_->CommittedMemory() / KB);
381   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
382                ", available: %6" V8_PTR_PREFIX "d KB"
383                ", committed: %6" V8_PTR_PREFIX "d KB\n",
384            map_space_->SizeOfObjects() / KB,
385            map_space_->Available() / KB,
386            map_space_->CommittedMemory() / KB);
387   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
388                ", available: %6" V8_PTR_PREFIX "d KB"
389                ", committed: %6" V8_PTR_PREFIX "d KB\n",
390            cell_space_->SizeOfObjects() / KB,
391            cell_space_->Available() / KB,
392            cell_space_->CommittedMemory() / KB);
393   PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
394                ", available: %6" V8_PTR_PREFIX "d KB"
395                ", committed: %6" V8_PTR_PREFIX "d KB\n",
396            property_cell_space_->SizeOfObjects() / KB,
397            property_cell_space_->Available() / KB,
398            property_cell_space_->CommittedMemory() / KB);
399   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
400                ", available: %6" V8_PTR_PREFIX "d KB"
401                ", committed: %6" V8_PTR_PREFIX "d KB\n",
402            lo_space_->SizeOfObjects() / KB,
403            lo_space_->Available() / KB,
404            lo_space_->CommittedMemory() / KB);
405   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
406                ", available: %6" V8_PTR_PREFIX "d KB"
407                ", committed: %6" V8_PTR_PREFIX "d KB\n",
408            this->SizeOfObjects() / KB,
409            this->Available() / KB,
410            this->CommittedMemory() / KB);
411   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
412            amount_of_external_allocated_memory_ / KB);
413   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
414 }
415
416
417 // TODO(1238405): Combine the infrastructure for --heap-stats and
418 // --log-gc to avoid the complicated preprocessor and flag testing.
419 void Heap::ReportStatisticsAfterGC() {
420   // Similar to the before GC, we use some complicated logic to ensure that
421   // NewSpace statistics are logged exactly once when --log-gc is turned on.
422 #if defined(DEBUG)
423   if (FLAG_heap_stats) {
424     new_space_.CollectStatistics();
425     ReportHeapStatistics("After GC");
426   } else if (FLAG_log_gc) {
427     new_space_.ReportStatistics();
428   }
429 #else
430   if (FLAG_log_gc) new_space_.ReportStatistics();
431 #endif  // DEBUG
432 }
433
434
435 void Heap::GarbageCollectionPrologue() {
436   {  AllowHeapAllocation for_the_first_part_of_prologue;
437     isolate_->transcendental_cache()->Clear();
438     ClearJSFunctionResultCaches();
439     gc_count_++;
440     unflattened_strings_length_ = 0;
441
442     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
443       mark_compact_collector()->EnableCodeFlushing(true);
444     }
445
446 #ifdef VERIFY_HEAP
447     if (FLAG_verify_heap) {
448       Verify();
449     }
450 #endif
451   }
452
453 #ifdef DEBUG
454   ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
455
456   if (FLAG_gc_verbose) Print();
457
458   ReportStatisticsBeforeGC();
459 #endif  // DEBUG
460
461   store_buffer()->GCPrologue();
462 }
463
464
465 intptr_t Heap::SizeOfObjects() {
466   intptr_t total = 0;
467   AllSpaces spaces(this);
468   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
469     total += space->SizeOfObjects();
470   }
471   return total;
472 }
473
474
475 void Heap::RepairFreeListsAfterBoot() {
476   PagedSpaces spaces(this);
477   for (PagedSpace* space = spaces.next();
478        space != NULL;
479        space = spaces.next()) {
480     space->RepairFreeListsAfterBoot();
481   }
482 }
483
484
485 void Heap::GarbageCollectionEpilogue() {
486   store_buffer()->GCEpilogue();
487
488   // In release mode, we only zap the from space under heap verification.
489   if (Heap::ShouldZapGarbage()) {
490     ZapFromSpace();
491   }
492
493 #ifdef VERIFY_HEAP
494   if (FLAG_verify_heap) {
495     Verify();
496   }
497 #endif
498
499   AllowHeapAllocation for_the_rest_of_the_epilogue;
500
501 #ifdef DEBUG
502   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
503   if (FLAG_print_handles) PrintHandles();
504   if (FLAG_gc_verbose) Print();
505   if (FLAG_code_stats) ReportCodeStatistics("After GC");
506 #endif
507   if (FLAG_deopt_every_n_garbage_collections > 0) {
508     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
509       Deoptimizer::DeoptimizeAll(isolate());
510       gcs_since_last_deopt_ = 0;
511     }
512   }
513
514   isolate_->counters()->alive_after_last_gc()->Set(
515       static_cast<int>(SizeOfObjects()));
516
517   isolate_->counters()->string_table_capacity()->Set(
518       string_table()->Capacity());
519   isolate_->counters()->number_of_symbols()->Set(
520       string_table()->NumberOfElements());
521
522   if (CommittedMemory() > 0) {
523     isolate_->counters()->external_fragmentation_total()->AddSample(
524         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
525
526     isolate_->counters()->heap_fraction_map_space()->AddSample(
527         static_cast<int>(
528             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
529     isolate_->counters()->heap_fraction_cell_space()->AddSample(
530         static_cast<int>(
531             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
532     isolate_->counters()->heap_fraction_property_cell_space()->
533         AddSample(static_cast<int>(
534             (property_cell_space()->CommittedMemory() * 100.0) /
535             CommittedMemory()));
536
537     isolate_->counters()->heap_sample_total_committed()->AddSample(
538         static_cast<int>(CommittedMemory() / KB));
539     isolate_->counters()->heap_sample_total_used()->AddSample(
540         static_cast<int>(SizeOfObjects() / KB));
541     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
542         static_cast<int>(map_space()->CommittedMemory() / KB));
543     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
544         static_cast<int>(cell_space()->CommittedMemory() / KB));
545     isolate_->counters()->
546         heap_sample_property_cell_space_committed()->
547             AddSample(static_cast<int>(
548                 property_cell_space()->CommittedMemory() / KB));
549   }
550
551 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
552   isolate_->counters()->space##_bytes_available()->Set(                        \
553       static_cast<int>(space()->Available()));                                 \
554   isolate_->counters()->space##_bytes_committed()->Set(                        \
555       static_cast<int>(space()->CommittedMemory()));                           \
556   isolate_->counters()->space##_bytes_used()->Set(                             \
557       static_cast<int>(space()->SizeOfObjects()));
558 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
559   if (space()->CommittedMemory() > 0) {                                        \
560     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
561         static_cast<int>(100 -                                                 \
562             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
563   }
564 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
565   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
566   UPDATE_FRAGMENTATION_FOR_SPACE(space)
567
568   UPDATE_COUNTERS_FOR_SPACE(new_space)
569   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
570   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
571   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
572   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
573   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
574   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
575   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
576 #undef UPDATE_COUNTERS_FOR_SPACE
577 #undef UPDATE_FRAGMENTATION_FOR_SPACE
578 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
579
580 #if defined(DEBUG)
581   ReportStatisticsAfterGC();
582 #endif  // DEBUG
583 #ifdef ENABLE_DEBUGGER_SUPPORT
584   isolate_->debug()->AfterGarbageCollection();
585 #endif  // ENABLE_DEBUGGER_SUPPORT
586 }
587
588
589 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
590   // Since we are ignoring the return value, the exact choice of space does
591   // not matter, so long as we do not specify NEW_SPACE, which would not
592   // cause a full GC.
593   mark_compact_collector_.SetFlags(flags);
594   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
595   mark_compact_collector_.SetFlags(kNoGCFlags);
596 }
597
598
599 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
600   // Since we are ignoring the return value, the exact choice of space does
601   // not matter, so long as we do not specify NEW_SPACE, which would not
602   // cause a full GC.
603   // Major GC would invoke weak handle callbacks on weakly reachable
604   // handles, but won't collect weakly reachable objects until next
605   // major GC.  Therefore if we collect aggressively and weak handle callback
606   // has been invoked, we rerun major GC to release objects which become
607   // garbage.
608   // Note: as weak callbacks can execute arbitrary code, we cannot
609   // hope that eventually there will be no weak callbacks invocations.
610   // Therefore stop recollecting after several attempts.
611   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
612                                      kReduceMemoryFootprintMask);
613   isolate_->compilation_cache()->Clear();
614   const int kMaxNumberOfAttempts = 7;
615   const int kMinNumberOfAttempts = 2;
616   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
617     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
618         attempt + 1 >= kMinNumberOfAttempts) {
619       break;
620     }
621   }
622   mark_compact_collector()->SetFlags(kNoGCFlags);
623   new_space_.Shrink();
624   UncommitFromSpace();
625   incremental_marking()->UncommitMarkingDeque();
626 }
627
628
629 bool Heap::CollectGarbage(AllocationSpace space,
630                           GarbageCollector collector,
631                           const char* gc_reason,
632                           const char* collector_reason) {
633   // The VM is in the GC state until exiting this function.
634   VMState<GC> state(isolate_);
635
636 #ifdef DEBUG
637   // Reset the allocation timeout to the GC interval, but make sure to
638   // allow at least a few allocations after a collection. The reason
639   // for this is that we have a lot of allocation sequences and we
640   // assume that a garbage collection will allow the subsequent
641   // allocation attempts to go through.
642   allocation_timeout_ = Max(6, FLAG_gc_interval);
643 #endif
644
645   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
646     if (FLAG_trace_incremental_marking) {
647       PrintF("[IncrementalMarking] Scavenge during marking.\n");
648     }
649   }
650
651   if (collector == MARK_COMPACTOR &&
652       !mark_compact_collector()->abort_incremental_marking() &&
653       !incremental_marking()->IsStopped() &&
654       !incremental_marking()->should_hurry() &&
655       FLAG_incremental_marking_steps) {
656     // Make progress in incremental marking.
657     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
658     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
659                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
660     if (!incremental_marking()->IsComplete()) {
661       if (FLAG_trace_incremental_marking) {
662         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
663       }
664       collector = SCAVENGER;
665       collector_reason = "incremental marking delaying mark-sweep";
666     }
667   }
668
669   bool next_gc_likely_to_collect_more = false;
670
671   { GCTracer tracer(this, gc_reason, collector_reason);
672     ASSERT(AllowHeapAllocation::IsAllowed());
673     DisallowHeapAllocation no_allocation_during_gc;
674     GarbageCollectionPrologue();
675     // The GC count was incremented in the prologue.  Tell the tracer about
676     // it.
677     tracer.set_gc_count(gc_count_);
678
679     // Tell the tracer which collector we've selected.
680     tracer.set_collector(collector);
681
682     {
683       HistogramTimerScope histogram_timer_scope(
684           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
685                                    : isolate_->counters()->gc_compactor());
686       next_gc_likely_to_collect_more =
687           PerformGarbageCollection(collector, &tracer);
688     }
689
690     GarbageCollectionEpilogue();
691   }
692
693   // Start incremental marking for the next cycle. The heap snapshot
694   // generator needs incremental marking to stay off after it aborted.
695   if (!mark_compact_collector()->abort_incremental_marking() &&
696       incremental_marking()->IsStopped() &&
697       incremental_marking()->WorthActivating() &&
698       NextGCIsLikelyToBeFull()) {
699     incremental_marking()->Start();
700   }
701
702   return next_gc_likely_to_collect_more;
703 }
704
705
706 int Heap::NotifyContextDisposed() {
707   if (FLAG_parallel_recompilation) {
708     // Flush the queued recompilation tasks.
709     isolate()->optimizing_compiler_thread()->Flush();
710   }
711   flush_monomorphic_ics_ = true;
712   return ++contexts_disposed_;
713 }
714
715
716 void Heap::PerformScavenge() {
717   GCTracer tracer(this, NULL, NULL);
718   if (incremental_marking()->IsStopped()) {
719     PerformGarbageCollection(SCAVENGER, &tracer);
720   } else {
721     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
722   }
723 }
724
725
726 void Heap::MoveElements(FixedArray* array,
727                         int dst_index,
728                         int src_index,
729                         int len) {
730   if (len == 0) return;
731
732   ASSERT(array->map() != HEAP->fixed_cow_array_map());
733   Object** dst_objects = array->data_start() + dst_index;
734   OS::MemMove(dst_objects,
735               array->data_start() + src_index,
736               len * kPointerSize);
737   if (!InNewSpace(array)) {
738     for (int i = 0; i < len; i++) {
739       // TODO(hpayer): check store buffer for entries
740       if (InNewSpace(dst_objects[i])) {
741         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
742       }
743     }
744   }
745   incremental_marking()->RecordWrites(array);
746 }
747
748
749 #ifdef VERIFY_HEAP
750 // Helper class for verifying the string table.
751 class StringTableVerifier : public ObjectVisitor {
752  public:
753   void VisitPointers(Object** start, Object** end) {
754     // Visit all HeapObject pointers in [start, end).
755     for (Object** p = start; p < end; p++) {
756       if ((*p)->IsHeapObject()) {
757         // Check that the string is actually internalized.
758         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
759               (*p)->IsInternalizedString());
760       }
761     }
762   }
763 };
764
765
766 static void VerifyStringTable() {
767   StringTableVerifier verifier;
768   HEAP->string_table()->IterateElements(&verifier);
769 }
770 #endif  // VERIFY_HEAP
771
772
773 static bool AbortIncrementalMarkingAndCollectGarbage(
774     Heap* heap,
775     AllocationSpace space,
776     const char* gc_reason = NULL) {
777   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
778   bool result = heap->CollectGarbage(space, gc_reason);
779   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
780   return result;
781 }
782
783
784 void Heap::ReserveSpace(
785     int *sizes,
786     Address *locations_out) {
787   bool gc_performed = true;
788   int counter = 0;
789   static const int kThreshold = 20;
790   while (gc_performed && counter++ < kThreshold) {
791     gc_performed = false;
792     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
793     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
794       if (sizes[space] != 0) {
795         MaybeObject* allocation;
796         if (space == NEW_SPACE) {
797           allocation = new_space()->AllocateRaw(sizes[space]);
798         } else {
799           allocation = paged_space(space)->AllocateRaw(sizes[space]);
800         }
801         FreeListNode* node;
802         if (!allocation->To<FreeListNode>(&node)) {
803           if (space == NEW_SPACE) {
804             Heap::CollectGarbage(NEW_SPACE,
805                                  "failed to reserve space in the new space");
806           } else {
807             AbortIncrementalMarkingAndCollectGarbage(
808                 this,
809                 static_cast<AllocationSpace>(space),
810                 "failed to reserve space in paged space");
811           }
812           gc_performed = true;
813           break;
814         } else {
815           // Mark with a free list node, in case we have a GC before
816           // deserializing.
817           node->set_size(this, sizes[space]);
818           locations_out[space] = node->address();
819         }
820       }
821     }
822   }
823
824   if (gc_performed) {
825     // Failed to reserve the space after several attempts.
826     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
827   }
828 }
829
830
831 void Heap::EnsureFromSpaceIsCommitted() {
832   if (new_space_.CommitFromSpaceIfNeeded()) return;
833
834   // Committing memory to from space failed.
835   // Memory is exhausted and we will die.
836   V8::FatalProcessOutOfMemory("Committing semi space failed.");
837 }
838
839
840 void Heap::ClearJSFunctionResultCaches() {
841   if (isolate_->bootstrapper()->IsActive()) return;
842
843   Object* context = native_contexts_list_;
844   while (!context->IsUndefined()) {
845     // Get the caches for this context. GC can happen when the context
846     // is not fully initialized, so the caches can be undefined.
847     Object* caches_or_undefined =
848         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
849     if (!caches_or_undefined->IsUndefined()) {
850       FixedArray* caches = FixedArray::cast(caches_or_undefined);
851       // Clear the caches:
852       int length = caches->length();
853       for (int i = 0; i < length; i++) {
854         JSFunctionResultCache::cast(caches->get(i))->Clear();
855       }
856     }
857     // Get the next context:
858     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
859   }
860 }
861
862
863 void Heap::ClearNormalizedMapCaches() {
864   if (isolate_->bootstrapper()->IsActive() &&
865       !incremental_marking()->IsMarking()) {
866     return;
867   }
868
869   Object* context = native_contexts_list_;
870   while (!context->IsUndefined()) {
871     // GC can happen when the context is not fully initialized,
872     // so the cache can be undefined.
873     Object* cache =
874         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
875     if (!cache->IsUndefined()) {
876       NormalizedMapCache::cast(cache)->Clear();
877     }
878     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
879   }
880 }
881
882
883 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
884   double survival_rate =
885       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
886       start_new_space_size;
887
888   if (survival_rate > kYoungSurvivalRateHighThreshold) {
889     high_survival_rate_period_length_++;
890   } else {
891     high_survival_rate_period_length_ = 0;
892   }
893
894   if (survival_rate < kYoungSurvivalRateLowThreshold) {
895     low_survival_rate_period_length_++;
896   } else {
897     low_survival_rate_period_length_ = 0;
898   }
899
900   double survival_rate_diff = survival_rate_ - survival_rate;
901
902   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
903     set_survival_rate_trend(DECREASING);
904   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
905     set_survival_rate_trend(INCREASING);
906   } else {
907     set_survival_rate_trend(STABLE);
908   }
909
910   survival_rate_ = survival_rate;
911 }
912
913 bool Heap::PerformGarbageCollection(GarbageCollector collector,
914                                     GCTracer* tracer) {
915   bool next_gc_likely_to_collect_more = false;
916
917   if (collector != SCAVENGER) {
918     PROFILE(isolate_, CodeMovingGCEvent());
919   }
920
921 #ifdef VERIFY_HEAP
922   if (FLAG_verify_heap) {
923     VerifyStringTable();
924   }
925 #endif
926
927   GCType gc_type =
928       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
929
930   {
931     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
932     VMState<EXTERNAL> state(isolate_);
933     HandleScope handle_scope(isolate_);
934     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
935   }
936
937   EnsureFromSpaceIsCommitted();
938
939   int start_new_space_size = Heap::new_space()->SizeAsInt();
940
941   if (IsHighSurvivalRate()) {
942     // We speed up the incremental marker if it is running so that it
943     // does not fall behind the rate of promotion, which would cause a
944     // constantly growing old space.
945     incremental_marking()->NotifyOfHighPromotionRate();
946   }
947
948   if (collector == MARK_COMPACTOR) {
949     // Perform mark-sweep with optional compaction.
950     MarkCompact(tracer);
951     sweep_generation_++;
952
953     UpdateSurvivalRateTrend(start_new_space_size);
954
955     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
956
957     old_generation_allocation_limit_ =
958         OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
959
960     old_gen_exhausted_ = false;
961   } else {
962     tracer_ = tracer;
963     Scavenge();
964     tracer_ = NULL;
965
966     UpdateSurvivalRateTrend(start_new_space_size);
967   }
968
969   if (!new_space_high_promotion_mode_active_ &&
970       new_space_.Capacity() == new_space_.MaximumCapacity() &&
971       IsStableOrIncreasingSurvivalTrend() &&
972       IsHighSurvivalRate()) {
973     // Stable high survival rates even though young generation is at
974     // maximum capacity indicates that most objects will be promoted.
975     // To decrease scavenger pauses and final mark-sweep pauses, we
976     // have to limit maximal capacity of the young generation.
977     SetNewSpaceHighPromotionModeActive(true);
978     if (FLAG_trace_gc) {
979       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
980                new_space_.InitialCapacity() / MB);
981     }
982     // Support for global pre-tenuring uses the high promotion mode as a
983     // heuristic indicator of whether to pretenure or not, we trigger
984     // deoptimization here to take advantage of pre-tenuring as soon as
985     // possible.
986     if (FLAG_pretenuring) {
987       isolate_->stack_guard()->FullDeopt();
988     }
989   } else if (new_space_high_promotion_mode_active_ &&
990       IsStableOrDecreasingSurvivalTrend() &&
991       IsLowSurvivalRate()) {
992     // Decreasing low survival rates might indicate that the above high
993     // promotion mode is over and we should allow the young generation
994     // to grow again.
995     SetNewSpaceHighPromotionModeActive(false);
996     if (FLAG_trace_gc) {
997       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
998                new_space_.MaximumCapacity() / MB);
999     }
1000     // Trigger deoptimization here to turn off pre-tenuring as soon as
1001     // possible.
1002     if (FLAG_pretenuring) {
1003       isolate_->stack_guard()->FullDeopt();
1004     }
1005   }
1006
1007   if (new_space_high_promotion_mode_active_ &&
1008       new_space_.Capacity() > new_space_.InitialCapacity()) {
1009     new_space_.Shrink();
1010   }
1011
1012   isolate_->counters()->objs_since_last_young()->Set(0);
1013
1014   // Callbacks that fire after this point might trigger nested GCs and
1015   // restart incremental marking, the assertion can't be moved down.
1016   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1017
1018   gc_post_processing_depth_++;
1019   { AllowHeapAllocation allow_allocation;
1020     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1021     next_gc_likely_to_collect_more =
1022         isolate_->global_handles()->PostGarbageCollectionProcessing(
1023             collector, tracer);
1024   }
1025   gc_post_processing_depth_--;
1026
1027   isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1028
1029   // Update relocatables.
1030   Relocatable::PostGarbageCollectionProcessing();
1031
1032   if (collector == MARK_COMPACTOR) {
1033     // Register the amount of external allocated memory.
1034     amount_of_external_allocated_memory_at_last_global_gc_ =
1035         amount_of_external_allocated_memory_;
1036   }
1037
1038   {
1039     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1040     VMState<EXTERNAL> state(isolate_);
1041     HandleScope handle_scope(isolate_);
1042     CallGCEpilogueCallbacks(gc_type);
1043   }
1044
1045 #ifdef VERIFY_HEAP
1046   if (FLAG_verify_heap) {
1047     VerifyStringTable();
1048   }
1049 #endif
1050
1051   return next_gc_likely_to_collect_more;
1052 }
1053
1054
1055 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1056   if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1057     global_gc_prologue_callback_();
1058   }
1059   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1060     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1061       gc_prologue_callbacks_[i].callback(gc_type, flags);
1062     }
1063   }
1064 }
1065
1066
1067 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1068   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1069     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1070       gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1071     }
1072   }
1073   if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1074     global_gc_epilogue_callback_();
1075   }
1076 }
1077
1078
1079 void Heap::MarkCompact(GCTracer* tracer) {
1080   gc_state_ = MARK_COMPACT;
1081   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1082
1083   mark_compact_collector_.Prepare(tracer);
1084
1085   ms_count_++;
1086   tracer->set_full_gc_count(ms_count_);
1087
1088   MarkCompactPrologue();
1089
1090   mark_compact_collector_.CollectGarbage();
1091
1092   LOG(isolate_, ResourceEvent("markcompact", "end"));
1093
1094   gc_state_ = NOT_IN_GC;
1095
1096   isolate_->counters()->objs_since_last_full()->Set(0);
1097
1098   contexts_disposed_ = 0;
1099
1100   flush_monomorphic_ics_ = false;
1101 }
1102
1103
1104 void Heap::MarkCompactPrologue() {
1105   // At any old GC clear the keyed lookup cache to enable collection of unused
1106   // maps.
1107   isolate_->keyed_lookup_cache()->Clear();
1108   isolate_->context_slot_cache()->Clear();
1109   isolate_->descriptor_lookup_cache()->Clear();
1110   RegExpResultsCache::Clear(string_split_cache());
1111   RegExpResultsCache::Clear(regexp_multiple_cache());
1112
1113   isolate_->compilation_cache()->MarkCompactPrologue();
1114
1115   CompletelyClearInstanceofCache();
1116
1117   FlushNumberStringCache();
1118   if (FLAG_cleanup_code_caches_at_gc) {
1119     polymorphic_code_cache()->set_cache(undefined_value());
1120   }
1121
1122   ClearNormalizedMapCaches();
1123 }
1124
1125
1126 // Helper class for copying HeapObjects
1127 class ScavengeVisitor: public ObjectVisitor {
1128  public:
1129   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1130
1131   void VisitPointer(Object** p) { ScavengePointer(p); }
1132
1133   void VisitPointers(Object** start, Object** end) {
1134     // Copy all HeapObject pointers in [start, end)
1135     for (Object** p = start; p < end; p++) ScavengePointer(p);
1136   }
1137
1138  private:
1139   void ScavengePointer(Object** p) {
1140     Object* object = *p;
1141     if (!heap_->InNewSpace(object)) return;
1142     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1143                          reinterpret_cast<HeapObject*>(object));
1144   }
1145
1146   Heap* heap_;
1147 };
1148
1149
1150 #ifdef VERIFY_HEAP
1151 // Visitor class to verify pointers in code or data space do not point into
1152 // new space.
1153 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1154  public:
1155   void VisitPointers(Object** start, Object**end) {
1156     for (Object** current = start; current < end; current++) {
1157       if ((*current)->IsHeapObject()) {
1158         CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1159       }
1160     }
1161   }
1162 };
1163
1164
1165 static void VerifyNonPointerSpacePointers() {
1166   // Verify that there are no pointers to new space in spaces where we
1167   // do not expect them.
1168   VerifyNonPointerSpacePointersVisitor v;
1169   HeapObjectIterator code_it(HEAP->code_space());
1170   for (HeapObject* object = code_it.Next();
1171        object != NULL; object = code_it.Next())
1172     object->Iterate(&v);
1173
1174   // The old data space was normally swept conservatively so that the iterator
1175   // doesn't work, so we normally skip the next bit.
1176   if (!HEAP->old_data_space()->was_swept_conservatively()) {
1177     HeapObjectIterator data_it(HEAP->old_data_space());
1178     for (HeapObject* object = data_it.Next();
1179          object != NULL; object = data_it.Next())
1180       object->Iterate(&v);
1181   }
1182 }
1183 #endif  // VERIFY_HEAP
1184
1185
1186 void Heap::CheckNewSpaceExpansionCriteria() {
1187   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1188       survived_since_last_expansion_ > new_space_.Capacity() &&
1189       !new_space_high_promotion_mode_active_) {
1190     // Grow the size of new space if there is room to grow, enough data
1191     // has survived scavenge since the last expansion and we are not in
1192     // high promotion mode.
1193     new_space_.Grow();
1194     survived_since_last_expansion_ = 0;
1195   }
1196 }
1197
1198
1199 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1200   return heap->InNewSpace(*p) &&
1201       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1202 }
1203
1204
1205 void Heap::ScavengeStoreBufferCallback(
1206     Heap* heap,
1207     MemoryChunk* page,
1208     StoreBufferEvent event) {
1209   heap->store_buffer_rebuilder_.Callback(page, event);
1210 }
1211
1212
1213 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1214   if (event == kStoreBufferStartScanningPagesEvent) {
1215     start_of_current_page_ = NULL;
1216     current_page_ = NULL;
1217   } else if (event == kStoreBufferScanningPageEvent) {
1218     if (current_page_ != NULL) {
1219       // If this page already overflowed the store buffer during this iteration.
1220       if (current_page_->scan_on_scavenge()) {
1221         // Then we should wipe out the entries that have been added for it.
1222         store_buffer_->SetTop(start_of_current_page_);
1223       } else if (store_buffer_->Top() - start_of_current_page_ >=
1224                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1225         // Did we find too many pointers in the previous page?  The heuristic is
1226         // that no page can take more then 1/5 the remaining slots in the store
1227         // buffer.
1228         current_page_->set_scan_on_scavenge(true);
1229         store_buffer_->SetTop(start_of_current_page_);
1230       } else {
1231         // In this case the page we scanned took a reasonable number of slots in
1232         // the store buffer.  It has now been rehabilitated and is no longer
1233         // marked scan_on_scavenge.
1234         ASSERT(!current_page_->scan_on_scavenge());
1235       }
1236     }
1237     start_of_current_page_ = store_buffer_->Top();
1238     current_page_ = page;
1239   } else if (event == kStoreBufferFullEvent) {
1240     // The current page overflowed the store buffer again.  Wipe out its entries
1241     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1242     // several times while scanning.
1243     if (current_page_ == NULL) {
1244       // Store Buffer overflowed while scanning promoted objects.  These are not
1245       // in any particular page, though they are likely to be clustered by the
1246       // allocation routines.
1247       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1248     } else {
1249       // Store Buffer overflowed while scanning a particular old space page for
1250       // pointers to new space.
1251       ASSERT(current_page_ == page);
1252       ASSERT(page != NULL);
1253       current_page_->set_scan_on_scavenge(true);
1254       ASSERT(start_of_current_page_ != store_buffer_->Top());
1255       store_buffer_->SetTop(start_of_current_page_);
1256     }
1257   } else {
1258     UNREACHABLE();
1259   }
1260 }
1261
1262
1263 void PromotionQueue::Initialize() {
1264   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1265   // entries (where each is a pair of intptr_t). This allows us to simplify
1266   // the test fpr when to switch pages.
1267   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1268          == 0);
1269   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1270   front_ = rear_ =
1271       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1272   emergency_stack_ = NULL;
1273   guard_ = false;
1274 }
1275
1276
1277 void PromotionQueue::RelocateQueueHead() {
1278   ASSERT(emergency_stack_ == NULL);
1279
1280   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1281   intptr_t* head_start = rear_;
1282   intptr_t* head_end =
1283       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1284
1285   int entries_count =
1286       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1287
1288   emergency_stack_ = new List<Entry>(2 * entries_count);
1289
1290   while (head_start != head_end) {
1291     int size = static_cast<int>(*(head_start++));
1292     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1293     emergency_stack_->Add(Entry(obj, size));
1294   }
1295   rear_ = head_end;
1296 }
1297
1298
1299 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1300  public:
1301   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1302
1303   virtual Object* RetainAs(Object* object) {
1304     if (!heap_->InFromSpace(object)) {
1305       return object;
1306     }
1307
1308     MapWord map_word = HeapObject::cast(object)->map_word();
1309     if (map_word.IsForwardingAddress()) {
1310       return map_word.ToForwardingAddress();
1311     }
1312     return NULL;
1313   }
1314
1315  private:
1316   Heap* heap_;
1317 };
1318
1319
1320 void Heap::Scavenge() {
1321   RelocationLock relocation_lock(this);
1322
1323 #ifdef VERIFY_HEAP
1324   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1325 #endif
1326
1327   gc_state_ = SCAVENGE;
1328
1329   // Implements Cheney's copying algorithm
1330   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1331
1332   // Clear descriptor cache.
1333   isolate_->descriptor_lookup_cache()->Clear();
1334
1335   // Used for updating survived_since_last_expansion_ at function end.
1336   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1337
1338   CheckNewSpaceExpansionCriteria();
1339
1340   SelectScavengingVisitorsTable();
1341
1342   incremental_marking()->PrepareForScavenge();
1343
1344   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1345   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1346
1347   // Flip the semispaces.  After flipping, to space is empty, from space has
1348   // live objects.
1349   new_space_.Flip();
1350   new_space_.ResetAllocationInfo();
1351
1352   // We need to sweep newly copied objects which can be either in the
1353   // to space or promoted to the old generation.  For to-space
1354   // objects, we treat the bottom of the to space as a queue.  Newly
1355   // copied and unswept objects lie between a 'front' mark and the
1356   // allocation pointer.
1357   //
1358   // Promoted objects can go into various old-generation spaces, and
1359   // can be allocated internally in the spaces (from the free list).
1360   // We treat the top of the to space as a queue of addresses of
1361   // promoted objects.  The addresses of newly promoted and unswept
1362   // objects lie between a 'front' mark and a 'rear' mark that is
1363   // updated as a side effect of promoting an object.
1364   //
1365   // There is guaranteed to be enough room at the top of the to space
1366   // for the addresses of promoted objects: every object promoted
1367   // frees up its size in bytes from the top of the new space, and
1368   // objects are at least one pointer in size.
1369   Address new_space_front = new_space_.ToSpaceStart();
1370   promotion_queue_.Initialize();
1371
1372 #ifdef DEBUG
1373   store_buffer()->Clean();
1374 #endif
1375
1376   ScavengeVisitor scavenge_visitor(this);
1377   // Copy roots.
1378   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1379
1380   // Copy objects reachable from the old generation.
1381   {
1382     StoreBufferRebuildScope scope(this,
1383                                   store_buffer(),
1384                                   &ScavengeStoreBufferCallback);
1385     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1386   }
1387
1388   // Copy objects reachable from simple cells by scavenging cell values
1389   // directly.
1390   HeapObjectIterator cell_iterator(cell_space_);
1391   for (HeapObject* heap_object = cell_iterator.Next();
1392        heap_object != NULL;
1393        heap_object = cell_iterator.Next()) {
1394     if (heap_object->IsCell()) {
1395       Cell* cell = Cell::cast(heap_object);
1396       Address value_address = cell->ValueAddress();
1397       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1398     }
1399   }
1400
1401   // Copy objects reachable from global property cells by scavenging global
1402   // property cell values directly.
1403   HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1404   for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1405        heap_object != NULL;
1406        heap_object = js_global_property_cell_iterator.Next()) {
1407     if (heap_object->IsPropertyCell()) {
1408       PropertyCell* cell = PropertyCell::cast(heap_object);
1409       Address value_address = cell->ValueAddress();
1410       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1411       Address type_address = cell->TypeAddress();
1412       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1413     }
1414   }
1415
1416   // Copy objects reachable from the code flushing candidates list.
1417   MarkCompactCollector* collector = mark_compact_collector();
1418   if (collector->is_code_flushing_enabled()) {
1419     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1420   }
1421
1422   // Scavenge object reachable from the native contexts list directly.
1423   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1424
1425   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1426
1427   while (isolate()->global_handles()->IterateObjectGroups(
1428       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1429     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1430   }
1431   isolate()->global_handles()->RemoveObjectGroups();
1432   isolate()->global_handles()->RemoveImplicitRefGroups();
1433
1434   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1435       &IsUnscavengedHeapObject);
1436   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1437       &scavenge_visitor);
1438   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1439
1440   UpdateNewSpaceReferencesInExternalStringTable(
1441       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1442
1443   promotion_queue_.Destroy();
1444
1445   if (!FLAG_watch_ic_patching) {
1446     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1447   }
1448   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1449
1450   ScavengeWeakObjectRetainer weak_object_retainer(this);
1451   ProcessWeakReferences(&weak_object_retainer);
1452
1453   ASSERT(new_space_front == new_space_.top());
1454
1455   // Set age mark.
1456   new_space_.set_age_mark(new_space_.top());
1457
1458   new_space_.LowerInlineAllocationLimit(
1459       new_space_.inline_allocation_limit_step());
1460
1461   // Update how much has survived scavenge.
1462   IncrementYoungSurvivorsCounter(static_cast<int>(
1463       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1464
1465   LOG(isolate_, ResourceEvent("scavenge", "end"));
1466
1467   gc_state_ = NOT_IN_GC;
1468
1469   scavenges_since_last_idle_round_++;
1470 }
1471
1472
1473 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1474                                                                 Object** p) {
1475   MapWord first_word = HeapObject::cast(*p)->map_word();
1476
1477   if (!first_word.IsForwardingAddress()) {
1478     // Unreachable external string can be finalized.
1479     heap->FinalizeExternalString(String::cast(*p));
1480     return NULL;
1481   }
1482
1483   // String is still reachable.
1484   return String::cast(first_word.ToForwardingAddress());
1485 }
1486
1487
1488 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1489     ExternalStringTableUpdaterCallback updater_func) {
1490 #ifdef VERIFY_HEAP
1491   if (FLAG_verify_heap) {
1492     external_string_table_.Verify();
1493   }
1494 #endif
1495
1496   if (external_string_table_.new_space_strings_.is_empty()) return;
1497
1498   Object** start = &external_string_table_.new_space_strings_[0];
1499   Object** end = start + external_string_table_.new_space_strings_.length();
1500   Object** last = start;
1501
1502   for (Object** p = start; p < end; ++p) {
1503     ASSERT(InFromSpace(*p));
1504     String* target = updater_func(this, p);
1505
1506     if (target == NULL) continue;
1507
1508     ASSERT(target->IsExternalString());
1509
1510     if (InNewSpace(target)) {
1511       // String is still in new space.  Update the table entry.
1512       *last = target;
1513       ++last;
1514     } else {
1515       // String got promoted.  Move it to the old string list.
1516       external_string_table_.AddOldString(target);
1517     }
1518   }
1519
1520   ASSERT(last <= end);
1521   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1522 }
1523
1524
1525 void Heap::UpdateReferencesInExternalStringTable(
1526     ExternalStringTableUpdaterCallback updater_func) {
1527
1528   // Update old space string references.
1529   if (external_string_table_.old_space_strings_.length() > 0) {
1530     Object** start = &external_string_table_.old_space_strings_[0];
1531     Object** end = start + external_string_table_.old_space_strings_.length();
1532     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1533   }
1534
1535   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1536 }
1537
1538
1539 template <class T>
1540 struct WeakListVisitor;
1541
1542
1543 template <class T>
1544 static Object* VisitWeakList(Heap* heap,
1545                              Object* list,
1546                              WeakObjectRetainer* retainer,
1547                              bool record_slots) {
1548   Object* undefined = heap->undefined_value();
1549   Object* head = undefined;
1550   T* tail = NULL;
1551   MarkCompactCollector* collector = heap->mark_compact_collector();
1552   while (list != undefined) {
1553     // Check whether to keep the candidate in the list.
1554     T* candidate = reinterpret_cast<T*>(list);
1555     Object* retained = retainer->RetainAs(list);
1556     if (retained != NULL) {
1557       if (head == undefined) {
1558         // First element in the list.
1559         head = retained;
1560       } else {
1561         // Subsequent elements in the list.
1562         ASSERT(tail != NULL);
1563         WeakListVisitor<T>::SetWeakNext(tail, retained);
1564         if (record_slots) {
1565           Object** next_slot =
1566             HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1567           collector->RecordSlot(next_slot, next_slot, retained);
1568         }
1569       }
1570       // Retained object is new tail.
1571       ASSERT(!retained->IsUndefined());
1572       candidate = reinterpret_cast<T*>(retained);
1573       tail = candidate;
1574
1575
1576       // tail is a live object, visit it.
1577       WeakListVisitor<T>::VisitLiveObject(
1578           heap, tail, retainer, record_slots);
1579     } else {
1580       WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1581     }
1582
1583     // Move to next element in the list.
1584     list = WeakListVisitor<T>::WeakNext(candidate);
1585   }
1586
1587   // Terminate the list if there is one or more elements.
1588   if (tail != NULL) {
1589     WeakListVisitor<T>::SetWeakNext(tail, undefined);
1590   }
1591   return head;
1592 }
1593
1594
1595 template<>
1596 struct WeakListVisitor<JSFunction> {
1597   static void SetWeakNext(JSFunction* function, Object* next) {
1598     function->set_next_function_link(next);
1599   }
1600
1601   static Object* WeakNext(JSFunction* function) {
1602     return function->next_function_link();
1603   }
1604
1605   static int WeakNextOffset() {
1606     return JSFunction::kNextFunctionLinkOffset;
1607   }
1608
1609   static void VisitLiveObject(Heap*, JSFunction*,
1610                               WeakObjectRetainer*, bool) {
1611   }
1612
1613   static void VisitPhantomObject(Heap*, JSFunction*) {
1614   }
1615 };
1616
1617
1618 template<>
1619 struct WeakListVisitor<Context> {
1620   static void SetWeakNext(Context* context, Object* next) {
1621     context->set(Context::NEXT_CONTEXT_LINK,
1622                  next,
1623                  UPDATE_WRITE_BARRIER);
1624   }
1625
1626   static Object* WeakNext(Context* context) {
1627     return context->get(Context::NEXT_CONTEXT_LINK);
1628   }
1629
1630   static void VisitLiveObject(Heap* heap,
1631                               Context* context,
1632                               WeakObjectRetainer* retainer,
1633                               bool record_slots) {
1634     // Process the weak list of optimized functions for the context.
1635     Object* function_list_head =
1636         VisitWeakList<JSFunction>(
1637             heap,
1638             context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1639             retainer,
1640             record_slots);
1641     context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
1642                  function_list_head,
1643                  UPDATE_WRITE_BARRIER);
1644     if (record_slots) {
1645       Object** optimized_functions =
1646           HeapObject::RawField(
1647               context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1648       heap->mark_compact_collector()->RecordSlot(
1649           optimized_functions, optimized_functions, function_list_head);
1650     }
1651   }
1652
1653   static void VisitPhantomObject(Heap*, Context*) {
1654   }
1655
1656   static int WeakNextOffset() {
1657     return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1658   }
1659 };
1660
1661
1662 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1663   // We don't record weak slots during marking or scavenges.
1664   // Instead we do it once when we complete mark-compact cycle.
1665   // Note that write barrier has no effect if we are already in the middle of
1666   // compacting mark-sweep cycle and we have to record slots manually.
1667   bool record_slots =
1668       gc_state() == MARK_COMPACT &&
1669       mark_compact_collector()->is_compacting();
1670   ProcessArrayBuffers(retainer, record_slots);
1671   ProcessNativeContexts(retainer, record_slots);
1672   ProcessAllocationSites(retainer, record_slots);
1673 }
1674
1675 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1676                                  bool record_slots) {
1677   Object* head =
1678       VisitWeakList<Context>(
1679           this, native_contexts_list(), retainer, record_slots);
1680   // Update the head of the list of contexts.
1681   native_contexts_list_ = head;
1682 }
1683
1684
1685 template<>
1686 struct WeakListVisitor<JSArrayBufferView> {
1687   static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1688     obj->set_weak_next(next);
1689   }
1690
1691   static Object* WeakNext(JSArrayBufferView* obj) {
1692     return obj->weak_next();
1693   }
1694
1695   static void VisitLiveObject(Heap*,
1696                               JSArrayBufferView* obj,
1697                               WeakObjectRetainer* retainer,
1698                               bool record_slots) {}
1699
1700   static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1701
1702   static int WeakNextOffset() {
1703     return JSArrayBufferView::kWeakNextOffset;
1704   }
1705 };
1706
1707
1708 template<>
1709 struct WeakListVisitor<JSArrayBuffer> {
1710   static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1711     obj->set_weak_next(next);
1712   }
1713
1714   static Object* WeakNext(JSArrayBuffer* obj) {
1715     return obj->weak_next();
1716   }
1717
1718   static void VisitLiveObject(Heap* heap,
1719                               JSArrayBuffer* array_buffer,
1720                               WeakObjectRetainer* retainer,
1721                               bool record_slots) {
1722     Object* typed_array_obj =
1723         VisitWeakList<JSArrayBufferView>(
1724             heap,
1725             array_buffer->weak_first_view(),
1726             retainer, record_slots);
1727     array_buffer->set_weak_first_view(typed_array_obj);
1728     if (typed_array_obj != heap->undefined_value() && record_slots) {
1729       Object** slot = HeapObject::RawField(
1730           array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1731       heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1732     }
1733   }
1734
1735   static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1736     Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1737   }
1738
1739   static int WeakNextOffset() {
1740     return JSArrayBuffer::kWeakNextOffset;
1741   }
1742 };
1743
1744
1745 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1746                                bool record_slots) {
1747   Object* array_buffer_obj =
1748       VisitWeakList<JSArrayBuffer>(this,
1749                                    array_buffers_list(),
1750                                    retainer, record_slots);
1751   set_array_buffers_list(array_buffer_obj);
1752 }
1753
1754
1755 void Heap::TearDownArrayBuffers() {
1756   Object* undefined = undefined_value();
1757   for (Object* o = array_buffers_list(); o != undefined;) {
1758     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1759     Runtime::FreeArrayBuffer(isolate(), buffer);
1760     o = buffer->weak_next();
1761   }
1762   array_buffers_list_ = undefined;
1763 }
1764
1765
1766 template<>
1767 struct WeakListVisitor<AllocationSite> {
1768   static void SetWeakNext(AllocationSite* obj, Object* next) {
1769     obj->set_weak_next(next);
1770   }
1771
1772   static Object* WeakNext(AllocationSite* obj) {
1773     return obj->weak_next();
1774   }
1775
1776   static void VisitLiveObject(Heap* heap,
1777                               AllocationSite* array_buffer,
1778                               WeakObjectRetainer* retainer,
1779                               bool record_slots) {}
1780
1781   static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1782
1783   static int WeakNextOffset() {
1784     return AllocationSite::kWeakNextOffset;
1785   }
1786 };
1787
1788
1789 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1790                                   bool record_slots) {
1791   Object* allocation_site_obj =
1792       VisitWeakList<AllocationSite>(this,
1793                                     allocation_sites_list(),
1794                                     retainer, record_slots);
1795   set_allocation_sites_list(allocation_site_obj);
1796 }
1797
1798
1799 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1800   DisallowHeapAllocation no_allocation;
1801
1802   // Both the external string table and the string table may contain
1803   // external strings, but neither lists them exhaustively, nor is the
1804   // intersection set empty.  Therefore we iterate over the external string
1805   // table first, ignoring internalized strings, and then over the
1806   // internalized string table.
1807
1808   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1809    public:
1810     explicit ExternalStringTableVisitorAdapter(
1811         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1812     virtual void VisitPointers(Object** start, Object** end) {
1813       for (Object** p = start; p < end; p++) {
1814         // Visit non-internalized external strings,
1815         // since internalized strings are listed in the string table.
1816         if (!(*p)->IsInternalizedString()) {
1817           ASSERT((*p)->IsExternalString());
1818           visitor_->VisitExternalString(Utils::ToLocal(
1819               Handle<String>(String::cast(*p))));
1820         }
1821       }
1822     }
1823    private:
1824     v8::ExternalResourceVisitor* visitor_;
1825   } external_string_table_visitor(visitor);
1826
1827   external_string_table_.Iterate(&external_string_table_visitor);
1828
1829   class StringTableVisitorAdapter : public ObjectVisitor {
1830    public:
1831     explicit StringTableVisitorAdapter(
1832         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1833     virtual void VisitPointers(Object** start, Object** end) {
1834       for (Object** p = start; p < end; p++) {
1835         if ((*p)->IsExternalString()) {
1836           ASSERT((*p)->IsInternalizedString());
1837           visitor_->VisitExternalString(Utils::ToLocal(
1838               Handle<String>(String::cast(*p))));
1839         }
1840       }
1841     }
1842    private:
1843     v8::ExternalResourceVisitor* visitor_;
1844   } string_table_visitor(visitor);
1845
1846   string_table()->IterateElements(&string_table_visitor);
1847 }
1848
1849
1850 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1851  public:
1852   static inline void VisitPointer(Heap* heap, Object** p) {
1853     Object* object = *p;
1854     if (!heap->InNewSpace(object)) return;
1855     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1856                          reinterpret_cast<HeapObject*>(object));
1857   }
1858 };
1859
1860
1861 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1862                          Address new_space_front) {
1863   do {
1864     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1865     // The addresses new_space_front and new_space_.top() define a
1866     // queue of unprocessed copied objects.  Process them until the
1867     // queue is empty.
1868     while (new_space_front != new_space_.top()) {
1869       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1870         HeapObject* object = HeapObject::FromAddress(new_space_front);
1871         new_space_front +=
1872           NewSpaceScavenger::IterateBody(object->map(), object);
1873       } else {
1874         new_space_front =
1875             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1876       }
1877     }
1878
1879     // Promote and process all the to-be-promoted objects.
1880     {
1881       StoreBufferRebuildScope scope(this,
1882                                     store_buffer(),
1883                                     &ScavengeStoreBufferCallback);
1884       while (!promotion_queue()->is_empty()) {
1885         HeapObject* target;
1886         int size;
1887         promotion_queue()->remove(&target, &size);
1888
1889         // Promoted object might be already partially visited
1890         // during old space pointer iteration. Thus we search specificly
1891         // for pointers to from semispace instead of looking for pointers
1892         // to new space.
1893         ASSERT(!target->IsMap());
1894         IterateAndMarkPointersToFromSpace(target->address(),
1895                                           target->address() + size,
1896                                           &ScavengeObject);
1897       }
1898     }
1899
1900     // Take another spin if there are now unswept objects in new space
1901     // (there are currently no more unswept promoted objects).
1902   } while (new_space_front != new_space_.top());
1903
1904   return new_space_front;
1905 }
1906
1907
1908 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1909
1910
1911 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1912                                               HeapObject* object,
1913                                               int size));
1914
1915 static HeapObject* EnsureDoubleAligned(Heap* heap,
1916                                        HeapObject* object,
1917                                        int size) {
1918   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1919     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1920     return HeapObject::FromAddress(object->address() + kPointerSize);
1921   } else {
1922     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1923                                kPointerSize);
1924     return object;
1925   }
1926 }
1927
1928
1929 enum LoggingAndProfiling {
1930   LOGGING_AND_PROFILING_ENABLED,
1931   LOGGING_AND_PROFILING_DISABLED
1932 };
1933
1934
1935 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1936
1937
1938 template<MarksHandling marks_handling,
1939          LoggingAndProfiling logging_and_profiling_mode>
1940 class ScavengingVisitor : public StaticVisitorBase {
1941  public:
1942   static void Initialize() {
1943     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1944     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1945     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1946     table_.Register(kVisitByteArray, &EvacuateByteArray);
1947     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1948     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1949
1950     table_.Register(kVisitNativeContext,
1951                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1952                         template VisitSpecialized<Context::kSize>);
1953
1954     table_.Register(kVisitConsString,
1955                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1956                         template VisitSpecialized<ConsString::kSize>);
1957
1958     table_.Register(kVisitSlicedString,
1959                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1960                         template VisitSpecialized<SlicedString::kSize>);
1961
1962     table_.Register(kVisitSymbol,
1963                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1964                         template VisitSpecialized<Symbol::kSize>);
1965
1966     table_.Register(kVisitSharedFunctionInfo,
1967                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1968                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1969
1970     table_.Register(kVisitJSWeakMap,
1971                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1972                     Visit);
1973
1974     table_.Register(kVisitJSWeakSet,
1975                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1976                     Visit);
1977
1978     table_.Register(kVisitJSArrayBuffer,
1979                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1980                     Visit);
1981
1982     table_.Register(kVisitJSTypedArray,
1983                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1984                     Visit);
1985
1986     table_.Register(kVisitJSDataView,
1987                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1988                     Visit);
1989
1990     table_.Register(kVisitJSRegExp,
1991                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1992                     Visit);
1993
1994     if (marks_handling == IGNORE_MARKS) {
1995       table_.Register(kVisitJSFunction,
1996                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1997                           template VisitSpecialized<JSFunction::kSize>);
1998     } else {
1999       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2000     }
2001
2002     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2003                                    kVisitDataObject,
2004                                    kVisitDataObjectGeneric>();
2005
2006     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2007                                    kVisitJSObject,
2008                                    kVisitJSObjectGeneric>();
2009
2010     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2011                                    kVisitStruct,
2012                                    kVisitStructGeneric>();
2013   }
2014
2015   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2016     return &table_;
2017   }
2018
2019  private:
2020   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
2021
2022   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2023     bool should_record = false;
2024 #ifdef DEBUG
2025     should_record = FLAG_heap_stats;
2026 #endif
2027     should_record = should_record || FLAG_log_gc;
2028     if (should_record) {
2029       if (heap->new_space()->Contains(obj)) {
2030         heap->new_space()->RecordAllocation(obj);
2031       } else {
2032         heap->new_space()->RecordPromotion(obj);
2033       }
2034     }
2035   }
2036
2037   // Helper function used by CopyObject to copy a source object to an
2038   // allocated target object and update the forwarding pointer in the source
2039   // object.  Returns the target object.
2040   INLINE(static void MigrateObject(Heap* heap,
2041                                    HeapObject* source,
2042                                    HeapObject* target,
2043                                    int size)) {
2044     // Copy the content of source to target.
2045     heap->CopyBlock(target->address(), source->address(), size);
2046
2047     // Set the forwarding address.
2048     source->set_map_word(MapWord::FromForwardingAddress(target));
2049
2050     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2051       // Update NewSpace stats if necessary.
2052       RecordCopiedObject(heap, target);
2053       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
2054       Isolate* isolate = heap->isolate();
2055       if (isolate->logger()->is_logging_code_events() ||
2056           isolate->cpu_profiler()->is_profiling()) {
2057         if (target->IsSharedFunctionInfo()) {
2058           PROFILE(isolate, SharedFunctionInfoMoveEvent(
2059               source->address(), target->address()));
2060         }
2061       }
2062     }
2063
2064     if (marks_handling == TRANSFER_MARKS) {
2065       if (Marking::TransferColor(source, target)) {
2066         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2067       }
2068     }
2069   }
2070
2071
2072   template<ObjectContents object_contents, int alignment>
2073   static inline void EvacuateObject(Map* map,
2074                                     HeapObject** slot,
2075                                     HeapObject* object,
2076                                     int object_size) {
2077     SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
2078     SLOW_ASSERT(object->Size() == object_size);
2079
2080     int allocation_size = object_size;
2081     if (alignment != kObjectAlignment) {
2082       ASSERT(alignment == kDoubleAlignment);
2083       allocation_size += kPointerSize;
2084     }
2085
2086     Heap* heap = map->GetHeap();
2087     if (heap->ShouldBePromoted(object->address(), object_size)) {
2088       MaybeObject* maybe_result;
2089
2090       if (object_contents == DATA_OBJECT) {
2091         maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2092       } else {
2093         maybe_result =
2094             heap->old_pointer_space()->AllocateRaw(allocation_size);
2095       }
2096
2097       Object* result = NULL;  // Initialization to please compiler.
2098       if (maybe_result->ToObject(&result)) {
2099         HeapObject* target = HeapObject::cast(result);
2100
2101         if (alignment != kObjectAlignment) {
2102           target = EnsureDoubleAligned(heap, target, allocation_size);
2103         }
2104
2105         // Order is important: slot might be inside of the target if target
2106         // was allocated over a dead object and slot comes from the store
2107         // buffer.
2108         *slot = target;
2109         MigrateObject(heap, object, target, object_size);
2110
2111         if (object_contents == POINTER_OBJECT) {
2112           if (map->instance_type() == JS_FUNCTION_TYPE) {
2113             heap->promotion_queue()->insert(
2114                 target, JSFunction::kNonWeakFieldsEndOffset);
2115           } else {
2116             heap->promotion_queue()->insert(target, object_size);
2117           }
2118         }
2119
2120         heap->tracer()->increment_promoted_objects_size(object_size);
2121         return;
2122       }
2123     }
2124     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2125     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2126     Object* result = allocation->ToObjectUnchecked();
2127     HeapObject* target = HeapObject::cast(result);
2128
2129     if (alignment != kObjectAlignment) {
2130       target = EnsureDoubleAligned(heap, target, allocation_size);
2131     }
2132
2133     // Order is important: slot might be inside of the target if target
2134     // was allocated over a dead object and slot comes from the store
2135     // buffer.
2136     *slot = target;
2137     MigrateObject(heap, object, target, object_size);
2138     return;
2139   }
2140
2141
2142   static inline void EvacuateJSFunction(Map* map,
2143                                         HeapObject** slot,
2144                                         HeapObject* object) {
2145     ObjectEvacuationStrategy<POINTER_OBJECT>::
2146         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2147
2148     HeapObject* target = *slot;
2149     MarkBit mark_bit = Marking::MarkBitFrom(target);
2150     if (Marking::IsBlack(mark_bit)) {
2151       // This object is black and it might not be rescanned by marker.
2152       // We should explicitly record code entry slot for compaction because
2153       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2154       // miss it as it is not HeapObject-tagged.
2155       Address code_entry_slot =
2156           target->address() + JSFunction::kCodeEntryOffset;
2157       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2158       map->GetHeap()->mark_compact_collector()->
2159           RecordCodeEntrySlot(code_entry_slot, code);
2160     }
2161   }
2162
2163
2164   static inline void EvacuateFixedArray(Map* map,
2165                                         HeapObject** slot,
2166                                         HeapObject* object) {
2167     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2168     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2169         map, slot, object, object_size);
2170   }
2171
2172
2173   static inline void EvacuateFixedDoubleArray(Map* map,
2174                                               HeapObject** slot,
2175                                               HeapObject* object) {
2176     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2177     int object_size = FixedDoubleArray::SizeFor(length);
2178     EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2179         map, slot, object, object_size);
2180   }
2181
2182
2183   static inline void EvacuateByteArray(Map* map,
2184                                        HeapObject** slot,
2185                                        HeapObject* object) {
2186     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2187     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2188         map, slot, object, object_size);
2189   }
2190
2191
2192   static inline void EvacuateSeqOneByteString(Map* map,
2193                                             HeapObject** slot,
2194                                             HeapObject* object) {
2195     int object_size = SeqOneByteString::cast(object)->
2196         SeqOneByteStringSize(map->instance_type());
2197     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2198         map, slot, object, object_size);
2199   }
2200
2201
2202   static inline void EvacuateSeqTwoByteString(Map* map,
2203                                               HeapObject** slot,
2204                                               HeapObject* object) {
2205     int object_size = SeqTwoByteString::cast(object)->
2206         SeqTwoByteStringSize(map->instance_type());
2207     EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2208         map, slot, object, object_size);
2209   }
2210
2211
2212   static inline bool IsShortcutCandidate(int type) {
2213     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2214   }
2215
2216   static inline void EvacuateShortcutCandidate(Map* map,
2217                                                HeapObject** slot,
2218                                                HeapObject* object) {
2219     ASSERT(IsShortcutCandidate(map->instance_type()));
2220
2221     Heap* heap = map->GetHeap();
2222
2223     if (marks_handling == IGNORE_MARKS &&
2224         ConsString::cast(object)->unchecked_second() ==
2225         heap->empty_string()) {
2226       HeapObject* first =
2227           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2228
2229       *slot = first;
2230
2231       if (!heap->InNewSpace(first)) {
2232         object->set_map_word(MapWord::FromForwardingAddress(first));
2233         return;
2234       }
2235
2236       MapWord first_word = first->map_word();
2237       if (first_word.IsForwardingAddress()) {
2238         HeapObject* target = first_word.ToForwardingAddress();
2239
2240         *slot = target;
2241         object->set_map_word(MapWord::FromForwardingAddress(target));
2242         return;
2243       }
2244
2245       heap->DoScavengeObject(first->map(), slot, first);
2246       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2247       return;
2248     }
2249
2250     int object_size = ConsString::kSize;
2251     EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2252         map, slot, object, object_size);
2253   }
2254
2255   template<ObjectContents object_contents>
2256   class ObjectEvacuationStrategy {
2257    public:
2258     template<int object_size>
2259     static inline void VisitSpecialized(Map* map,
2260                                         HeapObject** slot,
2261                                         HeapObject* object) {
2262       EvacuateObject<object_contents, kObjectAlignment>(
2263           map, slot, object, object_size);
2264     }
2265
2266     static inline void Visit(Map* map,
2267                              HeapObject** slot,
2268                              HeapObject* object) {
2269       int object_size = map->instance_size();
2270       EvacuateObject<object_contents, kObjectAlignment>(
2271           map, slot, object, object_size);
2272     }
2273   };
2274
2275   static VisitorDispatchTable<ScavengingCallback> table_;
2276 };
2277
2278
2279 template<MarksHandling marks_handling,
2280          LoggingAndProfiling logging_and_profiling_mode>
2281 VisitorDispatchTable<ScavengingCallback>
2282     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2283
2284
2285 static void InitializeScavengingVisitorsTables() {
2286   ScavengingVisitor<TRANSFER_MARKS,
2287                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2288   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2289   ScavengingVisitor<TRANSFER_MARKS,
2290                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2291   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2292 }
2293
2294
2295 void Heap::SelectScavengingVisitorsTable() {
2296   bool logging_and_profiling =
2297       isolate()->logger()->is_logging() ||
2298       isolate()->cpu_profiler()->is_profiling() ||
2299       (isolate()->heap_profiler() != NULL &&
2300        isolate()->heap_profiler()->is_profiling());
2301
2302   if (!incremental_marking()->IsMarking()) {
2303     if (!logging_and_profiling) {
2304       scavenging_visitors_table_.CopyFrom(
2305           ScavengingVisitor<IGNORE_MARKS,
2306                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2307     } else {
2308       scavenging_visitors_table_.CopyFrom(
2309           ScavengingVisitor<IGNORE_MARKS,
2310                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2311     }
2312   } else {
2313     if (!logging_and_profiling) {
2314       scavenging_visitors_table_.CopyFrom(
2315           ScavengingVisitor<TRANSFER_MARKS,
2316                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2317     } else {
2318       scavenging_visitors_table_.CopyFrom(
2319           ScavengingVisitor<TRANSFER_MARKS,
2320                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2321     }
2322
2323     if (incremental_marking()->IsCompacting()) {
2324       // When compacting forbid short-circuiting of cons-strings.
2325       // Scavenging code relies on the fact that new space object
2326       // can't be evacuated into evacuation candidate but
2327       // short-circuiting violates this assumption.
2328       scavenging_visitors_table_.Register(
2329           StaticVisitorBase::kVisitShortcutCandidate,
2330           scavenging_visitors_table_.GetVisitorById(
2331               StaticVisitorBase::kVisitConsString));
2332     }
2333   }
2334 }
2335
2336
2337 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2338   SLOW_ASSERT(HEAP->InFromSpace(object));
2339   MapWord first_word = object->map_word();
2340   SLOW_ASSERT(!first_word.IsForwardingAddress());
2341   Map* map = first_word.ToMap();
2342   map->GetHeap()->DoScavengeObject(map, p, object);
2343 }
2344
2345
2346 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2347                                       int instance_size) {
2348   Object* result;
2349   MaybeObject* maybe_result = AllocateRawMap();
2350   if (!maybe_result->ToObject(&result)) return maybe_result;
2351
2352   // Map::cast cannot be used due to uninitialized map field.
2353   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2354   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2355   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2356   reinterpret_cast<Map*>(result)->set_visitor_id(
2357         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2358   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2359   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2360   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2361   reinterpret_cast<Map*>(result)->set_bit_field(0);
2362   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2363   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2364                    Map::OwnsDescriptors::encode(true);
2365   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2366   return result;
2367 }
2368
2369
2370 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2371                                int instance_size,
2372                                ElementsKind elements_kind) {
2373   Object* result;
2374   MaybeObject* maybe_result = AllocateRawMap();
2375   if (!maybe_result->To(&result)) return maybe_result;
2376
2377   Map* map = reinterpret_cast<Map*>(result);
2378   map->set_map_no_write_barrier(meta_map());
2379   map->set_instance_type(instance_type);
2380   map->set_visitor_id(
2381       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2382   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2383   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2384   map->set_instance_size(instance_size);
2385   map->set_inobject_properties(0);
2386   map->set_pre_allocated_property_fields(0);
2387   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2388   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2389                           SKIP_WRITE_BARRIER);
2390   map->init_back_pointer(undefined_value());
2391   map->set_unused_property_fields(0);
2392   map->set_instance_descriptors(empty_descriptor_array());
2393   map->set_bit_field(0);
2394   map->set_bit_field2(1 << Map::kIsExtensible);
2395   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2396                    Map::OwnsDescriptors::encode(true);
2397   map->set_bit_field3(bit_field3);
2398   map->set_elements_kind(elements_kind);
2399
2400   return map;
2401 }
2402
2403
2404 MaybeObject* Heap::AllocateCodeCache() {
2405   CodeCache* code_cache;
2406   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2407     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2408   }
2409   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2410   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2411   return code_cache;
2412 }
2413
2414
2415 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2416   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2417 }
2418
2419
2420 MaybeObject* Heap::AllocateAccessorPair() {
2421   AccessorPair* accessors;
2422   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2423     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2424   }
2425   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2426   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2427   return accessors;
2428 }
2429
2430
2431 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2432   TypeFeedbackInfo* info;
2433   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2434     if (!maybe_info->To(&info)) return maybe_info;
2435   }
2436   info->initialize_storage();
2437   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2438                                 SKIP_WRITE_BARRIER);
2439   return info;
2440 }
2441
2442
2443 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2444   AliasedArgumentsEntry* entry;
2445   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2446     if (!maybe_entry->To(&entry)) return maybe_entry;
2447   }
2448   entry->set_aliased_context_slot(aliased_context_slot);
2449   return entry;
2450 }
2451
2452
2453 const Heap::StringTypeTable Heap::string_type_table[] = {
2454 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2455   {type, size, k##camel_name##MapRootIndex},
2456   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2457 #undef STRING_TYPE_ELEMENT
2458 };
2459
2460
2461 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2462 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2463   {contents, k##name##RootIndex},
2464   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2465 #undef CONSTANT_STRING_ELEMENT
2466 };
2467
2468
2469 const Heap::StructTable Heap::struct_table[] = {
2470 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2471   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2472   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2473 #undef STRUCT_TABLE_ELEMENT
2474 };
2475
2476
2477 bool Heap::CreateInitialMaps() {
2478   Object* obj;
2479   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2480     if (!maybe_obj->ToObject(&obj)) return false;
2481   }
2482   // Map::cast cannot be used due to uninitialized map field.
2483   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2484   set_meta_map(new_meta_map);
2485   new_meta_map->set_map(new_meta_map);
2486
2487   { MaybeObject* maybe_obj =
2488         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2489     if (!maybe_obj->ToObject(&obj)) return false;
2490   }
2491   set_fixed_array_map(Map::cast(obj));
2492
2493   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2494     if (!maybe_obj->ToObject(&obj)) return false;
2495   }
2496   set_oddball_map(Map::cast(obj));
2497
2498   // Allocate the empty array.
2499   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2500     if (!maybe_obj->ToObject(&obj)) return false;
2501   }
2502   set_empty_fixed_array(FixedArray::cast(obj));
2503
2504   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2505     if (!maybe_obj->ToObject(&obj)) return false;
2506   }
2507   set_null_value(Oddball::cast(obj));
2508   Oddball::cast(obj)->set_kind(Oddball::kNull);
2509
2510   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2511     if (!maybe_obj->ToObject(&obj)) return false;
2512   }
2513   set_undefined_value(Oddball::cast(obj));
2514   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2515   ASSERT(!InNewSpace(undefined_value()));
2516
2517   // Allocate the empty descriptor array.
2518   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2519     if (!maybe_obj->ToObject(&obj)) return false;
2520   }
2521   set_empty_descriptor_array(DescriptorArray::cast(obj));
2522
2523   // Fix the instance_descriptors for the existing maps.
2524   meta_map()->set_code_cache(empty_fixed_array());
2525   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2526   meta_map()->init_back_pointer(undefined_value());
2527   meta_map()->set_instance_descriptors(empty_descriptor_array());
2528
2529   fixed_array_map()->set_code_cache(empty_fixed_array());
2530   fixed_array_map()->set_dependent_code(
2531       DependentCode::cast(empty_fixed_array()));
2532   fixed_array_map()->init_back_pointer(undefined_value());
2533   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2534
2535   oddball_map()->set_code_cache(empty_fixed_array());
2536   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2537   oddball_map()->init_back_pointer(undefined_value());
2538   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2539
2540   // Fix prototype object for existing maps.
2541   meta_map()->set_prototype(null_value());
2542   meta_map()->set_constructor(null_value());
2543
2544   fixed_array_map()->set_prototype(null_value());
2545   fixed_array_map()->set_constructor(null_value());
2546
2547   oddball_map()->set_prototype(null_value());
2548   oddball_map()->set_constructor(null_value());
2549
2550   { MaybeObject* maybe_obj =
2551         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2552     if (!maybe_obj->ToObject(&obj)) return false;
2553   }
2554   set_fixed_cow_array_map(Map::cast(obj));
2555   ASSERT(fixed_array_map() != fixed_cow_array_map());
2556
2557   { MaybeObject* maybe_obj =
2558         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2559     if (!maybe_obj->ToObject(&obj)) return false;
2560   }
2561   set_scope_info_map(Map::cast(obj));
2562
2563   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2564     if (!maybe_obj->ToObject(&obj)) return false;
2565   }
2566   set_heap_number_map(Map::cast(obj));
2567
2568   { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2569     if (!maybe_obj->ToObject(&obj)) return false;
2570   }
2571   set_symbol_map(Map::cast(obj));
2572
2573   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2574     if (!maybe_obj->ToObject(&obj)) return false;
2575   }
2576   set_foreign_map(Map::cast(obj));
2577
2578   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2579     const StringTypeTable& entry = string_type_table[i];
2580     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2581       if (!maybe_obj->ToObject(&obj)) return false;
2582     }
2583     roots_[entry.index] = Map::cast(obj);
2584   }
2585
2586   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2587     if (!maybe_obj->ToObject(&obj)) return false;
2588   }
2589   set_undetectable_string_map(Map::cast(obj));
2590   Map::cast(obj)->set_is_undetectable();
2591
2592   { MaybeObject* maybe_obj =
2593         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2594     if (!maybe_obj->ToObject(&obj)) return false;
2595   }
2596   set_undetectable_ascii_string_map(Map::cast(obj));
2597   Map::cast(obj)->set_is_undetectable();
2598
2599   { MaybeObject* maybe_obj =
2600         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2601     if (!maybe_obj->ToObject(&obj)) return false;
2602   }
2603   set_fixed_double_array_map(Map::cast(obj));
2604
2605   { MaybeObject* maybe_obj =
2606         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2607     if (!maybe_obj->ToObject(&obj)) return false;
2608   }
2609   set_byte_array_map(Map::cast(obj));
2610
2611   { MaybeObject* maybe_obj =
2612         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2613     if (!maybe_obj->ToObject(&obj)) return false;
2614   }
2615   set_free_space_map(Map::cast(obj));
2616
2617   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2618     if (!maybe_obj->ToObject(&obj)) return false;
2619   }
2620   set_empty_byte_array(ByteArray::cast(obj));
2621
2622   { MaybeObject* maybe_obj =
2623         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2624     if (!maybe_obj->ToObject(&obj)) return false;
2625   }
2626   set_external_pixel_array_map(Map::cast(obj));
2627
2628   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2629                                          ExternalArray::kAlignedSize);
2630     if (!maybe_obj->ToObject(&obj)) return false;
2631   }
2632   set_external_byte_array_map(Map::cast(obj));
2633
2634   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2635                                          ExternalArray::kAlignedSize);
2636     if (!maybe_obj->ToObject(&obj)) return false;
2637   }
2638   set_external_unsigned_byte_array_map(Map::cast(obj));
2639
2640   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2641                                          ExternalArray::kAlignedSize);
2642     if (!maybe_obj->ToObject(&obj)) return false;
2643   }
2644   set_external_short_array_map(Map::cast(obj));
2645
2646   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2647                                          ExternalArray::kAlignedSize);
2648     if (!maybe_obj->ToObject(&obj)) return false;
2649   }
2650   set_external_unsigned_short_array_map(Map::cast(obj));
2651
2652   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2653                                          ExternalArray::kAlignedSize);
2654     if (!maybe_obj->ToObject(&obj)) return false;
2655   }
2656   set_external_int_array_map(Map::cast(obj));
2657
2658   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2659                                          ExternalArray::kAlignedSize);
2660     if (!maybe_obj->ToObject(&obj)) return false;
2661   }
2662   set_external_unsigned_int_array_map(Map::cast(obj));
2663
2664   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2665                                          ExternalArray::kAlignedSize);
2666     if (!maybe_obj->ToObject(&obj)) return false;
2667   }
2668   set_external_float_array_map(Map::cast(obj));
2669
2670   { MaybeObject* maybe_obj =
2671         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2672     if (!maybe_obj->ToObject(&obj)) return false;
2673   }
2674   set_non_strict_arguments_elements_map(Map::cast(obj));
2675
2676   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2677                                          ExternalArray::kAlignedSize);
2678     if (!maybe_obj->ToObject(&obj)) return false;
2679   }
2680   set_external_double_array_map(Map::cast(obj));
2681
2682   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2683     if (!maybe_obj->ToObject(&obj)) return false;
2684   }
2685   set_empty_external_byte_array(ExternalArray::cast(obj));
2686
2687   { MaybeObject* maybe_obj =
2688         AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2689     if (!maybe_obj->ToObject(&obj)) return false;
2690   }
2691   set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2692
2693   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2694     if (!maybe_obj->ToObject(&obj)) return false;
2695   }
2696   set_empty_external_short_array(ExternalArray::cast(obj));
2697
2698   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2699       kExternalUnsignedShortArray);
2700     if (!maybe_obj->ToObject(&obj)) return false;
2701   }
2702   set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2703
2704   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2705     if (!maybe_obj->ToObject(&obj)) return false;
2706   }
2707   set_empty_external_int_array(ExternalArray::cast(obj));
2708
2709   { MaybeObject* maybe_obj =
2710         AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2711     if (!maybe_obj->ToObject(&obj)) return false;
2712   }
2713   set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2714
2715   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2716     if (!maybe_obj->ToObject(&obj)) return false;
2717   }
2718   set_empty_external_float_array(ExternalArray::cast(obj));
2719
2720   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2721     if (!maybe_obj->ToObject(&obj)) return false;
2722   }
2723   set_empty_external_double_array(ExternalArray::cast(obj));
2724
2725   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2726     if (!maybe_obj->ToObject(&obj)) return false;
2727   }
2728   set_empty_external_pixel_array(ExternalArray::cast(obj));
2729
2730   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2731     if (!maybe_obj->ToObject(&obj)) return false;
2732   }
2733   set_code_map(Map::cast(obj));
2734
2735   { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2736     if (!maybe_obj->ToObject(&obj)) return false;
2737   }
2738   set_cell_map(Map::cast(obj));
2739
2740   { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2741                                          PropertyCell::kSize);
2742     if (!maybe_obj->ToObject(&obj)) return false;
2743   }
2744   set_global_property_cell_map(Map::cast(obj));
2745
2746   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2747     if (!maybe_obj->ToObject(&obj)) return false;
2748   }
2749   set_one_pointer_filler_map(Map::cast(obj));
2750
2751   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2752     if (!maybe_obj->ToObject(&obj)) return false;
2753   }
2754   set_two_pointer_filler_map(Map::cast(obj));
2755
2756   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2757     const StructTable& entry = struct_table[i];
2758     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2759       if (!maybe_obj->ToObject(&obj)) return false;
2760     }
2761     roots_[entry.index] = Map::cast(obj);
2762   }
2763
2764   { MaybeObject* maybe_obj =
2765         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2766     if (!maybe_obj->ToObject(&obj)) return false;
2767   }
2768   set_hash_table_map(Map::cast(obj));
2769
2770   { MaybeObject* maybe_obj =
2771         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2772     if (!maybe_obj->ToObject(&obj)) return false;
2773   }
2774   set_function_context_map(Map::cast(obj));
2775
2776   { MaybeObject* maybe_obj =
2777         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2778     if (!maybe_obj->ToObject(&obj)) return false;
2779   }
2780   set_catch_context_map(Map::cast(obj));
2781
2782   { MaybeObject* maybe_obj =
2783         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2784     if (!maybe_obj->ToObject(&obj)) return false;
2785   }
2786   set_with_context_map(Map::cast(obj));
2787
2788   { MaybeObject* maybe_obj =
2789         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2790     if (!maybe_obj->ToObject(&obj)) return false;
2791   }
2792   set_block_context_map(Map::cast(obj));
2793
2794   { MaybeObject* maybe_obj =
2795         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2796     if (!maybe_obj->ToObject(&obj)) return false;
2797   }
2798   set_module_context_map(Map::cast(obj));
2799
2800   { MaybeObject* maybe_obj =
2801         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2802     if (!maybe_obj->ToObject(&obj)) return false;
2803   }
2804   set_global_context_map(Map::cast(obj));
2805
2806   { MaybeObject* maybe_obj =
2807         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2808     if (!maybe_obj->ToObject(&obj)) return false;
2809   }
2810   Map* native_context_map = Map::cast(obj);
2811   native_context_map->set_dictionary_map(true);
2812   native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2813   set_native_context_map(native_context_map);
2814
2815   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2816                                          SharedFunctionInfo::kAlignedSize);
2817     if (!maybe_obj->ToObject(&obj)) return false;
2818   }
2819   set_shared_function_info_map(Map::cast(obj));
2820
2821   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2822                                          JSMessageObject::kSize);
2823     if (!maybe_obj->ToObject(&obj)) return false;
2824   }
2825   set_message_object_map(Map::cast(obj));
2826
2827   Map* external_map;
2828   { MaybeObject* maybe_obj =
2829         AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2830     if (!maybe_obj->To(&external_map)) return false;
2831   }
2832   external_map->set_is_extensible(false);
2833   set_external_map(external_map);
2834
2835   ASSERT(!InNewSpace(empty_fixed_array()));
2836   return true;
2837 }
2838
2839
2840 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2841   // Statically ensure that it is safe to allocate heap numbers in paged
2842   // spaces.
2843   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2844   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2845
2846   Object* result;
2847   { MaybeObject* maybe_result =
2848         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2849     if (!maybe_result->ToObject(&result)) return maybe_result;
2850   }
2851
2852   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2853   HeapNumber::cast(result)->set_value(value);
2854   return result;
2855 }
2856
2857
2858 MaybeObject* Heap::AllocateHeapNumber(double value) {
2859   // Use general version, if we're forced to always allocate.
2860   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2861
2862   // This version of AllocateHeapNumber is optimized for
2863   // allocation in new space.
2864   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2865   Object* result;
2866   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2867     if (!maybe_result->ToObject(&result)) return maybe_result;
2868   }
2869   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2870   HeapNumber::cast(result)->set_value(value);
2871   return result;
2872 }
2873
2874
2875 MaybeObject* Heap::AllocateCell(Object* value) {
2876   Object* result;
2877   { MaybeObject* maybe_result = AllocateRawCell();
2878     if (!maybe_result->ToObject(&result)) return maybe_result;
2879   }
2880   HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2881   Cell::cast(result)->set_value(value);
2882   return result;
2883 }
2884
2885
2886 MaybeObject* Heap::AllocatePropertyCell(Object* value) {
2887   Object* result;
2888   MaybeObject* maybe_result = AllocateRawPropertyCell();
2889   if (!maybe_result->ToObject(&result)) return maybe_result;
2890
2891   HeapObject::cast(result)->set_map_no_write_barrier(
2892       global_property_cell_map());
2893   PropertyCell* cell = PropertyCell::cast(result);
2894   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2895                            SKIP_WRITE_BARRIER);
2896   cell->set_value(value);
2897   cell->set_type(Type::None());
2898   maybe_result = cell->SetValueInferType(value);
2899   if (maybe_result->IsFailure()) return maybe_result;
2900   return result;
2901 }
2902
2903
2904 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2905   Box* result;
2906   MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2907   if (!maybe_result->To(&result)) return maybe_result;
2908   result->set_value(value);
2909   return result;
2910 }
2911
2912
2913 MaybeObject* Heap::AllocateAllocationSite() {
2914   Object* result;
2915   MaybeObject* maybe_result = Allocate(allocation_site_map(),
2916                                        OLD_POINTER_SPACE);
2917   if (!maybe_result->ToObject(&result)) return maybe_result;
2918   AllocationSite* site = AllocationSite::cast(result);
2919   site->Initialize();
2920
2921   // Link the site
2922   site->set_weak_next(allocation_sites_list());
2923   set_allocation_sites_list(site);
2924   return result;
2925 }
2926
2927
2928 MaybeObject* Heap::CreateOddball(const char* to_string,
2929                                  Object* to_number,
2930                                  byte kind) {
2931   Object* result;
2932   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2933     if (!maybe_result->ToObject(&result)) return maybe_result;
2934   }
2935   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2936 }
2937
2938
2939 bool Heap::CreateApiObjects() {
2940   Object* obj;
2941
2942   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2943     if (!maybe_obj->ToObject(&obj)) return false;
2944   }
2945   // Don't use Smi-only elements optimizations for objects with the neander
2946   // map. There are too many cases where element values are set directly with a
2947   // bottleneck to trap the Smi-only -> fast elements transition, and there
2948   // appears to be no benefit for optimize this case.
2949   Map* new_neander_map = Map::cast(obj);
2950   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2951   set_neander_map(new_neander_map);
2952
2953   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2954     if (!maybe_obj->ToObject(&obj)) return false;
2955   }
2956   Object* elements;
2957   { MaybeObject* maybe_elements = AllocateFixedArray(2);
2958     if (!maybe_elements->ToObject(&elements)) return false;
2959   }
2960   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2961   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2962   set_message_listeners(JSObject::cast(obj));
2963
2964   return true;
2965 }
2966
2967
2968 void Heap::CreateJSEntryStub() {
2969   JSEntryStub stub;
2970   set_js_entry_code(*stub.GetCode(isolate()));
2971 }
2972
2973
2974 void Heap::CreateJSConstructEntryStub() {
2975   JSConstructEntryStub stub;
2976   set_js_construct_entry_code(*stub.GetCode(isolate()));
2977 }
2978
2979
2980 void Heap::CreateFixedStubs() {
2981   // Here we create roots for fixed stubs. They are needed at GC
2982   // for cooking and uncooking (check out frames.cc).
2983   // The eliminates the need for doing dictionary lookup in the
2984   // stub cache for these stubs.
2985   HandleScope scope(isolate());
2986   // gcc-4.4 has problem generating correct code of following snippet:
2987   // {  JSEntryStub stub;
2988   //    js_entry_code_ = *stub.GetCode();
2989   // }
2990   // {  JSConstructEntryStub stub;
2991   //    js_construct_entry_code_ = *stub.GetCode();
2992   // }
2993   // To workaround the problem, make separate functions without inlining.
2994   Heap::CreateJSEntryStub();
2995   Heap::CreateJSConstructEntryStub();
2996
2997   // Create stubs that should be there, so we don't unexpectedly have to
2998   // create them if we need them during the creation of another stub.
2999   // Stub creation mixes raw pointers and handles in an unsafe manner so
3000   // we cannot create stubs while we are creating stubs.
3001   CodeStub::GenerateStubsAheadOfTime(isolate());
3002 }
3003
3004
3005 bool Heap::CreateInitialObjects() {
3006   Object* obj;
3007
3008   // The -0 value must be set before NumberFromDouble works.
3009   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3010     if (!maybe_obj->ToObject(&obj)) return false;
3011   }
3012   set_minus_zero_value(HeapNumber::cast(obj));
3013   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3014
3015   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3016     if (!maybe_obj->ToObject(&obj)) return false;
3017   }
3018   set_nan_value(HeapNumber::cast(obj));
3019
3020   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3021     if (!maybe_obj->ToObject(&obj)) return false;
3022   }
3023   set_infinity_value(HeapNumber::cast(obj));
3024
3025   // The hole has not been created yet, but we want to put something
3026   // predictable in the gaps in the string table, so lets make that Smi zero.
3027   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3028
3029   // Allocate initial string table.
3030   { MaybeObject* maybe_obj =
3031         StringTable::Allocate(this, kInitialStringTableSize);
3032     if (!maybe_obj->ToObject(&obj)) return false;
3033   }
3034   // Don't use set_string_table() due to asserts.
3035   roots_[kStringTableRootIndex] = obj;
3036
3037   // Finish initializing oddballs after creating the string table.
3038   { MaybeObject* maybe_obj =
3039         undefined_value()->Initialize("undefined",
3040                                       nan_value(),
3041                                       Oddball::kUndefined);
3042     if (!maybe_obj->ToObject(&obj)) return false;
3043   }
3044
3045   // Initialize the null_value.
3046   { MaybeObject* maybe_obj =
3047         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
3048     if (!maybe_obj->ToObject(&obj)) return false;
3049   }
3050
3051   { MaybeObject* maybe_obj = CreateOddball("true",
3052                                            Smi::FromInt(1),
3053                                            Oddball::kTrue);
3054     if (!maybe_obj->ToObject(&obj)) return false;
3055   }
3056   set_true_value(Oddball::cast(obj));
3057
3058   { MaybeObject* maybe_obj = CreateOddball("false",
3059                                            Smi::FromInt(0),
3060                                            Oddball::kFalse);
3061     if (!maybe_obj->ToObject(&obj)) return false;
3062   }
3063   set_false_value(Oddball::cast(obj));
3064
3065   { MaybeObject* maybe_obj = CreateOddball("hole",
3066                                            Smi::FromInt(-1),
3067                                            Oddball::kTheHole);
3068     if (!maybe_obj->ToObject(&obj)) return false;
3069   }
3070   set_the_hole_value(Oddball::cast(obj));
3071
3072   { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3073                                            Smi::FromInt(-1),
3074                                            Oddball::kUninitialized);
3075     if (!maybe_obj->ToObject(&obj)) return false;
3076   }
3077   set_uninitialized_value(Oddball::cast(obj));
3078
3079   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3080                                            Smi::FromInt(-4),
3081                                            Oddball::kArgumentMarker);
3082     if (!maybe_obj->ToObject(&obj)) return false;
3083   }
3084   set_arguments_marker(Oddball::cast(obj));
3085
3086   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3087                                            Smi::FromInt(-2),
3088                                            Oddball::kOther);
3089     if (!maybe_obj->ToObject(&obj)) return false;
3090   }
3091   set_no_interceptor_result_sentinel(obj);
3092
3093   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3094                                            Smi::FromInt(-3),
3095                                            Oddball::kOther);
3096     if (!maybe_obj->ToObject(&obj)) return false;
3097   }
3098   set_termination_exception(obj);
3099
3100   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3101     { MaybeObject* maybe_obj =
3102           InternalizeUtf8String(constant_string_table[i].contents);
3103       if (!maybe_obj->ToObject(&obj)) return false;
3104     }
3105     roots_[constant_string_table[i].index] = String::cast(obj);
3106   }
3107
3108   // Allocate the hidden string which is used to identify the hidden properties
3109   // in JSObjects. The hash code has a special value so that it will not match
3110   // the empty string when searching for the property. It cannot be part of the
3111   // loop above because it needs to be allocated manually with the special
3112   // hash code in place. The hash code for the hidden_string is zero to ensure
3113   // that it will always be at the first entry in property descriptors.
3114   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3115       OneByteVector("", 0), String::kEmptyStringHash);
3116     if (!maybe_obj->ToObject(&obj)) return false;
3117   }
3118   hidden_string_ = String::cast(obj);
3119
3120   // Allocate the code_stubs dictionary. The initial size is set to avoid
3121   // expanding the dictionary during bootstrapping.
3122   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3123     if (!maybe_obj->ToObject(&obj)) return false;
3124   }
3125   set_code_stubs(UnseededNumberDictionary::cast(obj));
3126
3127
3128   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3129   // is set to avoid expanding the dictionary during bootstrapping.
3130   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3131     if (!maybe_obj->ToObject(&obj)) return false;
3132   }
3133   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3134
3135   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3136     if (!maybe_obj->ToObject(&obj)) return false;
3137   }
3138   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3139
3140   set_instanceof_cache_function(Smi::FromInt(0));
3141   set_instanceof_cache_map(Smi::FromInt(0));
3142   set_instanceof_cache_answer(Smi::FromInt(0));
3143
3144   CreateFixedStubs();
3145
3146   // Allocate the dictionary of intrinsic function names.
3147   { MaybeObject* maybe_obj =
3148         NameDictionary::Allocate(this, Runtime::kNumFunctions);
3149     if (!maybe_obj->ToObject(&obj)) return false;
3150   }
3151   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3152                                                                        obj);
3153     if (!maybe_obj->ToObject(&obj)) return false;
3154   }
3155   set_intrinsic_function_names(NameDictionary::cast(obj));
3156
3157   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3158     if (!maybe_obj->ToObject(&obj)) return false;
3159   }
3160   set_number_string_cache(FixedArray::cast(obj));
3161
3162   // Allocate cache for single character one byte strings.
3163   { MaybeObject* maybe_obj =
3164         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3165     if (!maybe_obj->ToObject(&obj)) return false;
3166   }
3167   set_single_character_string_cache(FixedArray::cast(obj));
3168
3169   // Allocate cache for string split.
3170   { MaybeObject* maybe_obj = AllocateFixedArray(
3171       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3172     if (!maybe_obj->ToObject(&obj)) return false;
3173   }
3174   set_string_split_cache(FixedArray::cast(obj));
3175
3176   { MaybeObject* maybe_obj = AllocateFixedArray(
3177       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3178     if (!maybe_obj->ToObject(&obj)) return false;
3179   }
3180   set_regexp_multiple_cache(FixedArray::cast(obj));
3181
3182   // Allocate cache for external strings pointing to native source code.
3183   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3184     if (!maybe_obj->ToObject(&obj)) return false;
3185   }
3186   set_natives_source_cache(FixedArray::cast(obj));
3187
3188   // Allocate object to hold object observation state.
3189   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3190     if (!maybe_obj->ToObject(&obj)) return false;
3191   }
3192   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3193     if (!maybe_obj->ToObject(&obj)) return false;
3194   }
3195   set_observation_state(JSObject::cast(obj));
3196
3197   { MaybeObject* maybe_obj = AllocateSymbol();
3198     if (!maybe_obj->ToObject(&obj)) return false;
3199   }
3200   set_frozen_symbol(Symbol::cast(obj));
3201
3202   { MaybeObject* maybe_obj = AllocateSymbol();
3203     if (!maybe_obj->ToObject(&obj)) return false;
3204   }
3205   set_elements_transition_symbol(Symbol::cast(obj));
3206
3207   { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3208     if (!maybe_obj->ToObject(&obj)) return false;
3209   }
3210   SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3211   set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3212
3213   { MaybeObject* maybe_obj = AllocateSymbol();
3214     if (!maybe_obj->ToObject(&obj)) return false;
3215   }
3216   set_observed_symbol(Symbol::cast(obj));
3217
3218   // Handling of script id generation is in Factory::NewScript.
3219   set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3220
3221   // Initialize keyed lookup cache.
3222   isolate_->keyed_lookup_cache()->Clear();
3223
3224   // Initialize context slot cache.
3225   isolate_->context_slot_cache()->Clear();
3226
3227   // Initialize descriptor cache.
3228   isolate_->descriptor_lookup_cache()->Clear();
3229
3230   // Initialize compilation cache.
3231   isolate_->compilation_cache()->Clear();
3232
3233   return true;
3234 }
3235
3236
3237 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3238   RootListIndex writable_roots[] = {
3239     kStoreBufferTopRootIndex,
3240     kStackLimitRootIndex,
3241     kNumberStringCacheRootIndex,
3242     kInstanceofCacheFunctionRootIndex,
3243     kInstanceofCacheMapRootIndex,
3244     kInstanceofCacheAnswerRootIndex,
3245     kCodeStubsRootIndex,
3246     kNonMonomorphicCacheRootIndex,
3247     kPolymorphicCodeCacheRootIndex,
3248     kLastScriptIdRootIndex,
3249     kEmptyScriptRootIndex,
3250     kRealStackLimitRootIndex,
3251     kArgumentsAdaptorDeoptPCOffsetRootIndex,
3252     kConstructStubDeoptPCOffsetRootIndex,
3253     kGetterStubDeoptPCOffsetRootIndex,
3254     kSetterStubDeoptPCOffsetRootIndex,
3255     kStringTableRootIndex,
3256   };
3257
3258   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3259     if (root_index == writable_roots[i])
3260       return true;
3261   }
3262   return false;
3263 }
3264
3265
3266 bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
3267   return !RootCanBeWrittenAfterInitialization(root_index) &&
3268       !InNewSpace(roots_array_start()[root_index]);
3269 }
3270
3271
3272 Object* RegExpResultsCache::Lookup(Heap* heap,
3273                                    String* key_string,
3274                                    Object* key_pattern,
3275                                    ResultsCacheType type) {
3276   FixedArray* cache;
3277   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3278   if (type == STRING_SPLIT_SUBSTRINGS) {
3279     ASSERT(key_pattern->IsString());
3280     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3281     cache = heap->string_split_cache();
3282   } else {
3283     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3284     ASSERT(key_pattern->IsFixedArray());
3285     cache = heap->regexp_multiple_cache();
3286   }
3287
3288   uint32_t hash = key_string->Hash();
3289   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3290       ~(kArrayEntriesPerCacheEntry - 1));
3291   if (cache->get(index + kStringOffset) == key_string &&
3292       cache->get(index + kPatternOffset) == key_pattern) {
3293     return cache->get(index + kArrayOffset);
3294   }
3295   index =
3296       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3297   if (cache->get(index + kStringOffset) == key_string &&
3298       cache->get(index + kPatternOffset) == key_pattern) {
3299     return cache->get(index + kArrayOffset);
3300   }
3301   return Smi::FromInt(0);
3302 }
3303
3304
3305 void RegExpResultsCache::Enter(Heap* heap,
3306                                String* key_string,
3307                                Object* key_pattern,
3308                                FixedArray* value_array,
3309                                ResultsCacheType type) {
3310   FixedArray* cache;
3311   if (!key_string->IsInternalizedString()) return;
3312   if (type == STRING_SPLIT_SUBSTRINGS) {
3313     ASSERT(key_pattern->IsString());
3314     if (!key_pattern->IsInternalizedString()) return;
3315     cache = heap->string_split_cache();
3316   } else {
3317     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3318     ASSERT(key_pattern->IsFixedArray());
3319     cache = heap->regexp_multiple_cache();
3320   }
3321
3322   uint32_t hash = key_string->Hash();
3323   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3324       ~(kArrayEntriesPerCacheEntry - 1));
3325   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3326     cache->set(index + kStringOffset, key_string);
3327     cache->set(index + kPatternOffset, key_pattern);
3328     cache->set(index + kArrayOffset, value_array);
3329   } else {
3330     uint32_t index2 =
3331         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3332     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3333       cache->set(index2 + kStringOffset, key_string);
3334       cache->set(index2 + kPatternOffset, key_pattern);
3335       cache->set(index2 + kArrayOffset, value_array);
3336     } else {
3337       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3338       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3339       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3340       cache->set(index + kStringOffset, key_string);
3341       cache->set(index + kPatternOffset, key_pattern);
3342       cache->set(index + kArrayOffset, value_array);
3343     }
3344   }
3345   // If the array is a reasonably short list of substrings, convert it into a
3346   // list of internalized strings.
3347   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3348     for (int i = 0; i < value_array->length(); i++) {
3349       String* str = String::cast(value_array->get(i));
3350       Object* internalized_str;
3351       MaybeObject* maybe_string = heap->InternalizeString(str);
3352       if (maybe_string->ToObject(&internalized_str)) {
3353         value_array->set(i, internalized_str);
3354       }
3355     }
3356   }
3357   // Convert backing store to a copy-on-write array.
3358   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3359 }
3360
3361
3362 void RegExpResultsCache::Clear(FixedArray* cache) {
3363   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3364     cache->set(i, Smi::FromInt(0));
3365   }
3366 }
3367
3368
3369 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3370   MaybeObject* maybe_obj =
3371       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3372   return maybe_obj;
3373 }
3374
3375
3376 int Heap::FullSizeNumberStringCacheLength() {
3377   // Compute the size of the number string cache based on the max newspace size.
3378   // The number string cache has a minimum size based on twice the initial cache
3379   // size to ensure that it is bigger after being made 'full size'.
3380   int number_string_cache_size = max_semispace_size_ / 512;
3381   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3382                                  Min(0x4000, number_string_cache_size));
3383   // There is a string and a number per entry so the length is twice the number
3384   // of entries.
3385   return number_string_cache_size * 2;
3386 }
3387
3388
3389 void Heap::AllocateFullSizeNumberStringCache() {
3390   // The idea is to have a small number string cache in the snapshot to keep
3391   // boot-time memory usage down.  If we expand the number string cache already
3392   // while creating the snapshot then that didn't work out.
3393   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3394   MaybeObject* maybe_obj =
3395       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3396   Object* new_cache;
3397   if (maybe_obj->ToObject(&new_cache)) {
3398     // We don't bother to repopulate the cache with entries from the old cache.
3399     // It will be repopulated soon enough with new strings.
3400     set_number_string_cache(FixedArray::cast(new_cache));
3401   }
3402   // If allocation fails then we just return without doing anything.  It is only
3403   // a cache, so best effort is OK here.
3404 }
3405
3406
3407 void Heap::FlushNumberStringCache() {
3408   // Flush the number to string cache.
3409   int len = number_string_cache()->length();
3410   for (int i = 0; i < len; i++) {
3411     number_string_cache()->set_undefined(this, i);
3412   }
3413 }
3414
3415
3416 static inline int double_get_hash(double d) {
3417   DoubleRepresentation rep(d);
3418   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3419 }
3420
3421
3422 static inline int smi_get_hash(Smi* smi) {
3423   return smi->value();
3424 }
3425
3426
3427 Object* Heap::GetNumberStringCache(Object* number) {
3428   int hash;
3429   int mask = (number_string_cache()->length() >> 1) - 1;
3430   if (number->IsSmi()) {
3431     hash = smi_get_hash(Smi::cast(number)) & mask;
3432   } else {
3433     hash = double_get_hash(number->Number()) & mask;
3434   }
3435   Object* key = number_string_cache()->get(hash * 2);
3436   if (key == number) {
3437     return String::cast(number_string_cache()->get(hash * 2 + 1));
3438   } else if (key->IsHeapNumber() &&
3439              number->IsHeapNumber() &&
3440              key->Number() == number->Number()) {
3441     return String::cast(number_string_cache()->get(hash * 2 + 1));
3442   }
3443   return undefined_value();
3444 }
3445
3446
3447 void Heap::SetNumberStringCache(Object* number, String* string) {
3448   int hash;
3449   int mask = (number_string_cache()->length() >> 1) - 1;
3450   if (number->IsSmi()) {
3451     hash = smi_get_hash(Smi::cast(number)) & mask;
3452   } else {
3453     hash = double_get_hash(number->Number()) & mask;
3454   }
3455   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3456       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3457     // The first time we have a hash collision, we move to the full sized
3458     // number string cache.
3459     AllocateFullSizeNumberStringCache();
3460     return;
3461   }
3462   number_string_cache()->set(hash * 2, number);
3463   number_string_cache()->set(hash * 2 + 1, string);
3464 }
3465
3466
3467 MaybeObject* Heap::NumberToString(Object* number,
3468                                   bool check_number_string_cache,
3469                                   PretenureFlag pretenure) {
3470   isolate_->counters()->number_to_string_runtime()->Increment();
3471   if (check_number_string_cache) {
3472     Object* cached = GetNumberStringCache(number);
3473     if (cached != undefined_value()) {
3474       return cached;
3475     }
3476   }
3477
3478   char arr[100];
3479   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3480   const char* str;
3481   if (number->IsSmi()) {
3482     int num = Smi::cast(number)->value();
3483     str = IntToCString(num, buffer);
3484   } else {
3485     double num = HeapNumber::cast(number)->value();
3486     str = DoubleToCString(num, buffer);
3487   }
3488
3489   Object* js_string;
3490   MaybeObject* maybe_js_string =
3491       AllocateStringFromOneByte(CStrVector(str), pretenure);
3492   if (maybe_js_string->ToObject(&js_string)) {
3493     SetNumberStringCache(number, String::cast(js_string));
3494   }
3495   return maybe_js_string;
3496 }
3497
3498
3499 MaybeObject* Heap::Uint32ToString(uint32_t value,
3500                                   bool check_number_string_cache) {
3501   Object* number;
3502   MaybeObject* maybe = NumberFromUint32(value);
3503   if (!maybe->To<Object>(&number)) return maybe;
3504   return NumberToString(number, check_number_string_cache);
3505 }
3506
3507
3508 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3509   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3510 }
3511
3512
3513 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3514     ExternalArrayType array_type) {
3515   switch (array_type) {
3516     case kExternalByteArray:
3517       return kExternalByteArrayMapRootIndex;
3518     case kExternalUnsignedByteArray:
3519       return kExternalUnsignedByteArrayMapRootIndex;
3520     case kExternalShortArray:
3521       return kExternalShortArrayMapRootIndex;
3522     case kExternalUnsignedShortArray:
3523       return kExternalUnsignedShortArrayMapRootIndex;
3524     case kExternalIntArray:
3525       return kExternalIntArrayMapRootIndex;
3526     case kExternalUnsignedIntArray:
3527       return kExternalUnsignedIntArrayMapRootIndex;
3528     case kExternalFloatArray:
3529       return kExternalFloatArrayMapRootIndex;
3530     case kExternalDoubleArray:
3531       return kExternalDoubleArrayMapRootIndex;
3532     case kExternalPixelArray:
3533       return kExternalPixelArrayMapRootIndex;
3534     default:
3535       UNREACHABLE();
3536       return kUndefinedValueRootIndex;
3537   }
3538 }
3539
3540 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3541     ElementsKind elementsKind) {
3542   switch (elementsKind) {
3543     case EXTERNAL_BYTE_ELEMENTS:
3544       return kEmptyExternalByteArrayRootIndex;
3545     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3546       return kEmptyExternalUnsignedByteArrayRootIndex;
3547     case EXTERNAL_SHORT_ELEMENTS:
3548       return kEmptyExternalShortArrayRootIndex;
3549     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3550       return kEmptyExternalUnsignedShortArrayRootIndex;
3551     case EXTERNAL_INT_ELEMENTS:
3552       return kEmptyExternalIntArrayRootIndex;
3553     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3554       return kEmptyExternalUnsignedIntArrayRootIndex;
3555     case EXTERNAL_FLOAT_ELEMENTS:
3556       return kEmptyExternalFloatArrayRootIndex;
3557     case EXTERNAL_DOUBLE_ELEMENTS:
3558       return kEmptyExternalDoubleArrayRootIndex;
3559     case EXTERNAL_PIXEL_ELEMENTS:
3560       return kEmptyExternalPixelArrayRootIndex;
3561     default:
3562       UNREACHABLE();
3563       return kUndefinedValueRootIndex;
3564   }
3565 }
3566
3567
3568 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3569   return ExternalArray::cast(
3570       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3571 }
3572
3573
3574
3575
3576 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3577   // We need to distinguish the minus zero value and this cannot be
3578   // done after conversion to int. Doing this by comparing bit
3579   // patterns is faster than using fpclassify() et al.
3580   static const DoubleRepresentation minus_zero(-0.0);
3581
3582   DoubleRepresentation rep(value);
3583   if (rep.bits == minus_zero.bits) {
3584     return AllocateHeapNumber(-0.0, pretenure);
3585   }
3586
3587   int int_value = FastD2I(value);
3588   if (value == int_value && Smi::IsValid(int_value)) {
3589     return Smi::FromInt(int_value);
3590   }
3591
3592   // Materialize the value in the heap.
3593   return AllocateHeapNumber(value, pretenure);
3594 }
3595
3596
3597 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3598   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3599   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3600   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3601   Foreign* result;
3602   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3603   if (!maybe_result->To(&result)) return maybe_result;
3604   result->set_foreign_address(address);
3605   return result;
3606 }
3607
3608
3609 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3610   SharedFunctionInfo* share;
3611   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3612   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3613
3614   // Set pointer fields.
3615   share->set_name(name);
3616   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3617   share->set_code(illegal);
3618   share->set_optimized_code_map(Smi::FromInt(0));
3619   share->set_scope_info(ScopeInfo::Empty(isolate_));
3620   Code* construct_stub =
3621       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3622   share->set_construct_stub(construct_stub);
3623   share->set_instance_class_name(Object_string());
3624   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3625   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3626   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3627   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3628   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3629   share->set_ast_node_count(0);
3630   share->set_counters(0);
3631
3632   // Set integer fields (smi or int, depending on the architecture).
3633   share->set_length(0);
3634   share->set_formal_parameter_count(0);
3635   share->set_expected_nof_properties(0);
3636   share->set_num_literals(0);
3637   share->set_start_position_and_type(0);
3638   share->set_end_position(0);
3639   share->set_function_token_position(0);
3640   // All compiler hints default to false or 0.
3641   share->set_compiler_hints(0);
3642   share->set_opt_count(0);
3643
3644   return share;
3645 }
3646
3647
3648 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3649                                            JSArray* arguments,
3650                                            int start_position,
3651                                            int end_position,
3652                                            Object* script,
3653                                            Object* stack_trace,
3654                                            Object* stack_frames) {
3655   Object* result;
3656   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3657     if (!maybe_result->ToObject(&result)) return maybe_result;
3658   }
3659   JSMessageObject* message = JSMessageObject::cast(result);
3660   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3661   message->initialize_elements();
3662   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3663   message->set_type(type);
3664   message->set_arguments(arguments);
3665   message->set_start_position(start_position);
3666   message->set_end_position(end_position);
3667   message->set_script(script);
3668   message->set_stack_trace(stack_trace);
3669   message->set_stack_frames(stack_frames);
3670   return result;
3671 }
3672
3673
3674
3675 // Returns true for a character in a range.  Both limits are inclusive.
3676 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3677   // This makes uses of the the unsigned wraparound.
3678   return character - from <= to - from;
3679 }
3680
3681
3682 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3683     Heap* heap,
3684     uint16_t c1,
3685     uint16_t c2) {
3686   String* result;
3687   // Numeric strings have a different hash algorithm not known by
3688   // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3689   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3690       heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3691     return result;
3692   // Now we know the length is 2, we might as well make use of that fact
3693   // when building the new string.
3694   } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3695     // We can do this.
3696     ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3697     Object* result;
3698     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3699       if (!maybe_result->ToObject(&result)) return maybe_result;
3700     }
3701     uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3702     dest[0] = static_cast<uint8_t>(c1);
3703     dest[1] = static_cast<uint8_t>(c2);
3704     return result;
3705   } else {
3706     Object* result;
3707     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3708       if (!maybe_result->ToObject(&result)) return maybe_result;
3709     }
3710     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3711     dest[0] = c1;
3712     dest[1] = c2;
3713     return result;
3714   }
3715 }
3716
3717
3718 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3719   int first_length = first->length();
3720   if (first_length == 0) {
3721     return second;
3722   }
3723
3724   int second_length = second->length();
3725   if (second_length == 0) {
3726     return first;
3727   }
3728
3729   int length = first_length + second_length;
3730
3731   // Optimization for 2-byte strings often used as keys in a decompression
3732   // dictionary.  Check whether we already have the string in the string
3733   // table to prevent creation of many unneccesary strings.
3734   if (length == 2) {
3735     uint16_t c1 = first->Get(0);
3736     uint16_t c2 = second->Get(0);
3737     return MakeOrFindTwoCharacterString(this, c1, c2);
3738   }
3739
3740   bool first_is_one_byte = first->IsOneByteRepresentation();
3741   bool second_is_one_byte = second->IsOneByteRepresentation();
3742   bool is_one_byte = first_is_one_byte && second_is_one_byte;
3743   // Make sure that an out of memory exception is thrown if the length
3744   // of the new cons string is too large.
3745   if (length > String::kMaxLength || length < 0) {
3746     isolate()->context()->mark_out_of_memory();
3747     return Failure::OutOfMemoryException(0x4);
3748   }
3749
3750   bool is_one_byte_data_in_two_byte_string = false;
3751   if (!is_one_byte) {
3752     // At least one of the strings uses two-byte representation so we
3753     // can't use the fast case code for short ASCII strings below, but
3754     // we can try to save memory if all chars actually fit in ASCII.
3755     is_one_byte_data_in_two_byte_string =
3756         first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3757     if (is_one_byte_data_in_two_byte_string) {
3758       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3759     }
3760   }
3761
3762   // If the resulting string is small make a flat string.
3763   if (length < ConsString::kMinLength) {
3764     // Note that neither of the two inputs can be a slice because:
3765     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3766     ASSERT(first->IsFlat());
3767     ASSERT(second->IsFlat());
3768     if (is_one_byte) {
3769       Object* result;
3770       { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3771         if (!maybe_result->ToObject(&result)) return maybe_result;
3772       }
3773       // Copy the characters into the new object.
3774       uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3775       // Copy first part.
3776       const uint8_t* src;
3777       if (first->IsExternalString()) {
3778         src = ExternalAsciiString::cast(first)->GetChars();
3779       } else {
3780         src = SeqOneByteString::cast(first)->GetChars();
3781       }
3782       for (int i = 0; i < first_length; i++) *dest++ = src[i];
3783       // Copy second part.
3784       if (second->IsExternalString()) {
3785         src = ExternalAsciiString::cast(second)->GetChars();
3786       } else {
3787         src = SeqOneByteString::cast(second)->GetChars();
3788       }
3789       for (int i = 0; i < second_length; i++) *dest++ = src[i];
3790       return result;
3791     } else {
3792       if (is_one_byte_data_in_two_byte_string) {
3793         Object* result;
3794         { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3795           if (!maybe_result->ToObject(&result)) return maybe_result;
3796         }
3797         // Copy the characters into the new object.
3798         uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3799         String::WriteToFlat(first, dest, 0, first_length);
3800         String::WriteToFlat(second, dest + first_length, 0, second_length);
3801         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3802         return result;
3803       }
3804
3805       Object* result;
3806       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3807         if (!maybe_result->ToObject(&result)) return maybe_result;
3808       }
3809       // Copy the characters into the new object.
3810       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3811       String::WriteToFlat(first, dest, 0, first_length);
3812       String::WriteToFlat(second, dest + first_length, 0, second_length);
3813       return result;
3814     }
3815   }
3816
3817   Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3818       cons_ascii_string_map() : cons_string_map();
3819
3820   Object* result;
3821   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3822     if (!maybe_result->ToObject(&result)) return maybe_result;
3823   }
3824
3825   DisallowHeapAllocation no_gc;
3826   ConsString* cons_string = ConsString::cast(result);
3827   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3828   cons_string->set_length(length);
3829   cons_string->set_hash_field(String::kEmptyHashField);
3830   cons_string->set_first(first, mode);
3831   cons_string->set_second(second, mode);
3832   return result;
3833 }
3834
3835
3836 MaybeObject* Heap::AllocateSubString(String* buffer,
3837                                      int start,
3838                                      int end,
3839                                      PretenureFlag pretenure) {
3840   int length = end - start;
3841   if (length <= 0) {
3842     return empty_string();
3843   } else if (length == 1) {
3844     return LookupSingleCharacterStringFromCode(buffer->Get(start));
3845   } else if (length == 2) {
3846     // Optimization for 2-byte strings often used as keys in a decompression
3847     // dictionary.  Check whether we already have the string in the string
3848     // table to prevent creation of many unnecessary strings.
3849     uint16_t c1 = buffer->Get(start);
3850     uint16_t c2 = buffer->Get(start + 1);
3851     return MakeOrFindTwoCharacterString(this, c1, c2);
3852   }
3853
3854   // Make an attempt to flatten the buffer to reduce access time.
3855   buffer = buffer->TryFlattenGetString();
3856
3857   if (!FLAG_string_slices ||
3858       !buffer->IsFlat() ||
3859       length < SlicedString::kMinLength ||
3860       pretenure == TENURED) {
3861     Object* result;
3862     // WriteToFlat takes care of the case when an indirect string has a
3863     // different encoding from its underlying string.  These encodings may
3864     // differ because of externalization.
3865     bool is_one_byte = buffer->IsOneByteRepresentation();
3866     { MaybeObject* maybe_result = is_one_byte
3867                                   ? AllocateRawOneByteString(length, pretenure)
3868                                   : AllocateRawTwoByteString(length, pretenure);
3869       if (!maybe_result->ToObject(&result)) return maybe_result;
3870     }
3871     String* string_result = String::cast(result);
3872     // Copy the characters into the new object.
3873     if (is_one_byte) {
3874       ASSERT(string_result->IsOneByteRepresentation());
3875       uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3876       String::WriteToFlat(buffer, dest, start, end);
3877     } else {
3878       ASSERT(string_result->IsTwoByteRepresentation());
3879       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3880       String::WriteToFlat(buffer, dest, start, end);
3881     }
3882     return result;
3883   }
3884
3885   ASSERT(buffer->IsFlat());
3886 #if VERIFY_HEAP
3887   if (FLAG_verify_heap) {
3888     buffer->StringVerify();
3889   }
3890 #endif
3891
3892   Object* result;
3893   // When slicing an indirect string we use its encoding for a newly created
3894   // slice and don't check the encoding of the underlying string.  This is safe
3895   // even if the encodings are different because of externalization.  If an
3896   // indirect ASCII string is pointing to a two-byte string, the two-byte char
3897   // codes of the underlying string must still fit into ASCII (because
3898   // externalization must not change char codes).
3899   { Map* map = buffer->IsOneByteRepresentation()
3900                  ? sliced_ascii_string_map()
3901                  : sliced_string_map();
3902     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3903     if (!maybe_result->ToObject(&result)) return maybe_result;
3904   }
3905
3906   DisallowHeapAllocation no_gc;
3907   SlicedString* sliced_string = SlicedString::cast(result);
3908   sliced_string->set_length(length);
3909   sliced_string->set_hash_field(String::kEmptyHashField);
3910   if (buffer->IsConsString()) {
3911     ConsString* cons = ConsString::cast(buffer);
3912     ASSERT(cons->second()->length() == 0);
3913     sliced_string->set_parent(cons->first());
3914     sliced_string->set_offset(start);
3915   } else if (buffer->IsSlicedString()) {
3916     // Prevent nesting sliced strings.
3917     SlicedString* parent_slice = SlicedString::cast(buffer);
3918     sliced_string->set_parent(parent_slice->parent());
3919     sliced_string->set_offset(start + parent_slice->offset());
3920   } else {
3921     sliced_string->set_parent(buffer);
3922     sliced_string->set_offset(start);
3923   }
3924   ASSERT(sliced_string->parent()->IsSeqString() ||
3925          sliced_string->parent()->IsExternalString());
3926   return result;
3927 }
3928
3929
3930 MaybeObject* Heap::AllocateExternalStringFromAscii(
3931     const ExternalAsciiString::Resource* resource) {
3932   size_t length = resource->length();
3933   if (length > static_cast<size_t>(String::kMaxLength)) {
3934     isolate()->context()->mark_out_of_memory();
3935     return Failure::OutOfMemoryException(0x5);
3936   }
3937
3938   Map* map = external_ascii_string_map();
3939   Object* result;
3940   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3941     if (!maybe_result->ToObject(&result)) return maybe_result;
3942   }
3943
3944   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3945   external_string->set_length(static_cast<int>(length));
3946   external_string->set_hash_field(String::kEmptyHashField);
3947   external_string->set_resource(resource);
3948
3949   return result;
3950 }
3951
3952
3953 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3954     const ExternalTwoByteString::Resource* resource) {
3955   size_t length = resource->length();
3956   if (length > static_cast<size_t>(String::kMaxLength)) {
3957     isolate()->context()->mark_out_of_memory();
3958     return Failure::OutOfMemoryException(0x6);
3959   }
3960
3961   // For small strings we check whether the resource contains only
3962   // one byte characters.  If yes, we use a different string map.
3963   static const size_t kOneByteCheckLengthLimit = 32;
3964   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3965       String::IsOneByte(resource->data(), static_cast<int>(length));
3966   Map* map = is_one_byte ?
3967       external_string_with_one_byte_data_map() : external_string_map();
3968   Object* result;
3969   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3970     if (!maybe_result->ToObject(&result)) return maybe_result;
3971   }
3972
3973   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3974   external_string->set_length(static_cast<int>(length));
3975   external_string->set_hash_field(String::kEmptyHashField);
3976   external_string->set_resource(resource);
3977
3978   return result;
3979 }
3980
3981
3982 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3983   if (code <= String::kMaxOneByteCharCode) {
3984     Object* value = single_character_string_cache()->get(code);
3985     if (value != undefined_value()) return value;
3986
3987     uint8_t buffer[1];
3988     buffer[0] = static_cast<uint8_t>(code);
3989     Object* result;
3990     MaybeObject* maybe_result =
3991         InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3992
3993     if (!maybe_result->ToObject(&result)) return maybe_result;
3994     single_character_string_cache()->set(code, result);
3995     return result;
3996   }
3997
3998   Object* result;
3999   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
4000     if (!maybe_result->ToObject(&result)) return maybe_result;
4001   }
4002   String* answer = String::cast(result);
4003   answer->Set(0, code);
4004   return answer;
4005 }
4006
4007
4008 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
4009   if (length < 0 || length > ByteArray::kMaxLength) {
4010     return Failure::OutOfMemoryException(0x7);
4011   }
4012   if (pretenure == NOT_TENURED) {
4013     return AllocateByteArray(length);
4014   }
4015   int size = ByteArray::SizeFor(length);
4016   Object* result;
4017   { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
4018                    ? old_data_space_->AllocateRaw(size)
4019                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4020     if (!maybe_result->ToObject(&result)) return maybe_result;
4021   }
4022
4023   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4024       byte_array_map());
4025   reinterpret_cast<ByteArray*>(result)->set_length(length);
4026   return result;
4027 }
4028
4029
4030 MaybeObject* Heap::AllocateByteArray(int length) {
4031   if (length < 0 || length > ByteArray::kMaxLength) {
4032     return Failure::OutOfMemoryException(0x8);
4033   }
4034   int size = ByteArray::SizeFor(length);
4035   AllocationSpace space =
4036       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
4037   Object* result;
4038   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4039     if (!maybe_result->ToObject(&result)) return maybe_result;
4040   }
4041
4042   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4043       byte_array_map());
4044   reinterpret_cast<ByteArray*>(result)->set_length(length);
4045   return result;
4046 }
4047
4048
4049 void Heap::CreateFillerObjectAt(Address addr, int size) {
4050   if (size == 0) return;
4051   HeapObject* filler = HeapObject::FromAddress(addr);
4052   if (size == kPointerSize) {
4053     filler->set_map_no_write_barrier(one_pointer_filler_map());
4054   } else if (size == 2 * kPointerSize) {
4055     filler->set_map_no_write_barrier(two_pointer_filler_map());
4056   } else {
4057     filler->set_map_no_write_barrier(free_space_map());
4058     FreeSpace::cast(filler)->set_size(size);
4059   }
4060 }
4061
4062
4063 MaybeObject* Heap::AllocateExternalArray(int length,
4064                                          ExternalArrayType array_type,
4065                                          void* external_pointer,
4066                                          PretenureFlag pretenure) {
4067   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4068   Object* result;
4069   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
4070                                             space,
4071                                             OLD_DATA_SPACE);
4072     if (!maybe_result->ToObject(&result)) return maybe_result;
4073   }
4074
4075   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4076       MapForExternalArrayType(array_type));
4077   reinterpret_cast<ExternalArray*>(result)->set_length(length);
4078   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4079       external_pointer);
4080
4081   return result;
4082 }
4083
4084
4085 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4086                               Code::Flags flags,
4087                               Handle<Object> self_reference,
4088                               bool immovable,
4089                               bool crankshafted) {
4090   // Allocate ByteArray before the Code object, so that we do not risk
4091   // leaving uninitialized Code object (and breaking the heap).
4092   ByteArray* reloc_info;
4093   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4094   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4095
4096   // Compute size.
4097   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4098   int obj_size = Code::SizeFor(body_size);
4099   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4100   MaybeObject* maybe_result;
4101   // Large code objects and code objects which should stay at a fixed address
4102   // are allocated in large object space.
4103   HeapObject* result;
4104   bool force_lo_space = obj_size > code_space()->AreaSize();
4105   if (force_lo_space) {
4106     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4107   } else {
4108     maybe_result = code_space_->AllocateRaw(obj_size);
4109   }
4110   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4111
4112   if (immovable && !force_lo_space &&
4113       // Objects on the first page of each space are never moved.
4114       !code_space_->FirstPage()->Contains(result->address())) {
4115     // Discard the first code allocation, which was on a page where it could be
4116     // moved.
4117     CreateFillerObjectAt(result->address(), obj_size);
4118     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4119     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4120   }
4121
4122   // Initialize the object
4123   result->set_map_no_write_barrier(code_map());
4124   Code* code = Code::cast(result);
4125   ASSERT(!isolate_->code_range()->exists() ||
4126       isolate_->code_range()->contains(code->address()));
4127   code->set_instruction_size(desc.instr_size);
4128   code->set_relocation_info(reloc_info);
4129   code->set_flags(flags);
4130   if (code->is_call_stub() || code->is_keyed_call_stub()) {
4131     code->set_check_type(RECEIVER_MAP_CHECK);
4132   }
4133   code->set_is_crankshafted(crankshafted);
4134   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4135   code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4136   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4137   code->set_gc_metadata(Smi::FromInt(0));
4138   code->set_ic_age(global_ic_age_);
4139   code->set_prologue_offset(kPrologueOffsetNotSet);
4140   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4141     code->set_marked_for_deoptimization(false);
4142   }
4143   // Allow self references to created code object by patching the handle to
4144   // point to the newly allocated Code object.
4145   if (!self_reference.is_null()) {
4146     *(self_reference.location()) = code;
4147   }
4148   // Migrate generated code.
4149   // The generated code can contain Object** values (typically from handles)
4150   // that are dereferenced during the copy to point directly to the actual heap
4151   // objects. These pointers can include references to the code object itself,
4152   // through the self_reference parameter.
4153   code->CopyFrom(desc);
4154
4155 #ifdef VERIFY_HEAP
4156   if (FLAG_verify_heap) {
4157     code->Verify();
4158   }
4159 #endif
4160   return code;
4161 }
4162
4163
4164 MaybeObject* Heap::CopyCode(Code* code) {
4165   // Allocate an object the same size as the code object.
4166   int obj_size = code->Size();
4167   MaybeObject* maybe_result;
4168   if (obj_size > code_space()->AreaSize()) {
4169     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4170   } else {
4171     maybe_result = code_space_->AllocateRaw(obj_size);
4172   }
4173
4174   Object* result;
4175   if (!maybe_result->ToObject(&result)) return maybe_result;
4176
4177   // Copy code object.
4178   Address old_addr = code->address();
4179   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4180   CopyBlock(new_addr, old_addr, obj_size);
4181   // Relocate the copy.
4182   Code* new_code = Code::cast(result);
4183   ASSERT(!isolate_->code_range()->exists() ||
4184       isolate_->code_range()->contains(code->address()));
4185   new_code->Relocate(new_addr - old_addr);
4186   return new_code;
4187 }
4188
4189
4190 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4191   // Allocate ByteArray before the Code object, so that we do not risk
4192   // leaving uninitialized Code object (and breaking the heap).
4193   Object* reloc_info_array;
4194   { MaybeObject* maybe_reloc_info_array =
4195         AllocateByteArray(reloc_info.length(), TENURED);
4196     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4197       return maybe_reloc_info_array;
4198     }
4199   }
4200
4201   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4202
4203   int new_obj_size = Code::SizeFor(new_body_size);
4204
4205   Address old_addr = code->address();
4206
4207   size_t relocation_offset =
4208       static_cast<size_t>(code->instruction_end() - old_addr);
4209
4210   MaybeObject* maybe_result;
4211   if (new_obj_size > code_space()->AreaSize()) {
4212     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4213   } else {
4214     maybe_result = code_space_->AllocateRaw(new_obj_size);
4215   }
4216
4217   Object* result;
4218   if (!maybe_result->ToObject(&result)) return maybe_result;
4219
4220   // Copy code object.
4221   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4222
4223   // Copy header and instructions.
4224   CopyBytes(new_addr, old_addr, relocation_offset);
4225
4226   Code* new_code = Code::cast(result);
4227   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4228
4229   // Copy patched rinfo.
4230   CopyBytes(new_code->relocation_start(),
4231             reloc_info.start(),
4232             static_cast<size_t>(reloc_info.length()));
4233
4234   // Relocate the copy.
4235   ASSERT(!isolate_->code_range()->exists() ||
4236       isolate_->code_range()->contains(code->address()));
4237   new_code->Relocate(new_addr - old_addr);
4238
4239 #ifdef VERIFY_HEAP
4240   if (FLAG_verify_heap) {
4241     code->Verify();
4242   }
4243 #endif
4244   return new_code;
4245 }
4246
4247
4248 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4249     Handle<AllocationSite> allocation_site) {
4250   ASSERT(gc_state_ == NOT_IN_GC);
4251   ASSERT(map->instance_type() != MAP_TYPE);
4252   // If allocation failures are disallowed, we may allocate in a different
4253   // space when new space is full and the object is not a large object.
4254   AllocationSpace retry_space =
4255       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4256   int size = map->instance_size() + AllocationMemento::kSize;
4257   Object* result;
4258   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4259   if (!maybe_result->ToObject(&result)) return maybe_result;
4260   // No need for write barrier since object is white and map is in old space.
4261   HeapObject::cast(result)->set_map_no_write_barrier(map);
4262   AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4263       reinterpret_cast<Address>(result) + map->instance_size());
4264   alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4265   alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4266   return result;
4267 }
4268
4269
4270 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4271   ASSERT(gc_state_ == NOT_IN_GC);
4272   ASSERT(map->instance_type() != MAP_TYPE);
4273   // If allocation failures are disallowed, we may allocate in a different
4274   // space when new space is full and the object is not a large object.
4275   AllocationSpace retry_space =
4276       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4277   int size = map->instance_size();
4278   Object* result;
4279   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4280   if (!maybe_result->ToObject(&result)) return maybe_result;
4281   // No need for write barrier since object is white and map is in old space.
4282   HeapObject::cast(result)->set_map_no_write_barrier(map);
4283   return result;
4284 }
4285
4286
4287 void Heap::InitializeFunction(JSFunction* function,
4288                               SharedFunctionInfo* shared,
4289                               Object* prototype) {
4290   ASSERT(!prototype->IsMap());
4291   function->initialize_properties();
4292   function->initialize_elements();
4293   function->set_shared(shared);
4294   function->set_code(shared->code());
4295   function->set_prototype_or_initial_map(prototype);
4296   function->set_context(undefined_value());
4297   function->set_literals_or_bindings(empty_fixed_array());
4298   function->set_next_function_link(undefined_value());
4299 }
4300
4301
4302 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4303   // Make sure to use globals from the function's context, since the function
4304   // can be from a different context.
4305   Context* native_context = function->context()->native_context();
4306   Map* new_map;
4307   if (function->shared()->is_generator()) {
4308     // Generator prototypes can share maps since they don't have "constructor"
4309     // properties.
4310     new_map = native_context->generator_object_prototype_map();
4311   } else {
4312     // Each function prototype gets a fresh map to avoid unwanted sharing of
4313     // maps between prototypes of different constructors.
4314     JSFunction* object_function = native_context->object_function();
4315     ASSERT(object_function->has_initial_map());
4316     MaybeObject* maybe_map = object_function->initial_map()->Copy();
4317     if (!maybe_map->To(&new_map)) return maybe_map;
4318   }
4319
4320   Object* prototype;
4321   MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4322   if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4323
4324   if (!function->shared()->is_generator()) {
4325     MaybeObject* maybe_failure =
4326         JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4327             constructor_string(), function, DONT_ENUM);
4328     if (maybe_failure->IsFailure()) return maybe_failure;
4329   }
4330
4331   return prototype;
4332 }
4333
4334
4335 MaybeObject* Heap::AllocateFunction(Map* function_map,
4336                                     SharedFunctionInfo* shared,
4337                                     Object* prototype,
4338                                     PretenureFlag pretenure) {
4339   AllocationSpace space =
4340       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4341   Object* result;
4342   { MaybeObject* maybe_result = Allocate(function_map, space);
4343     if (!maybe_result->ToObject(&result)) return maybe_result;
4344   }
4345   InitializeFunction(JSFunction::cast(result), shared, prototype);
4346   return result;
4347 }
4348
4349
4350 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4351   // To get fast allocation and map sharing for arguments objects we
4352   // allocate them based on an arguments boilerplate.
4353
4354   JSObject* boilerplate;
4355   int arguments_object_size;
4356   bool strict_mode_callee = callee->IsJSFunction() &&
4357       !JSFunction::cast(callee)->shared()->is_classic_mode();
4358   if (strict_mode_callee) {
4359     boilerplate =
4360         isolate()->context()->native_context()->
4361             strict_mode_arguments_boilerplate();
4362     arguments_object_size = kArgumentsObjectSizeStrict;
4363   } else {
4364     boilerplate =
4365         isolate()->context()->native_context()->arguments_boilerplate();
4366     arguments_object_size = kArgumentsObjectSize;
4367   }
4368
4369   // This calls Copy directly rather than using Heap::AllocateRaw so we
4370   // duplicate the check here.
4371   ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4372
4373   // Check that the size of the boilerplate matches our
4374   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4375   // on the size being a known constant.
4376   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4377
4378   // Do the allocation.
4379   Object* result;
4380   { MaybeObject* maybe_result =
4381         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4382     if (!maybe_result->ToObject(&result)) return maybe_result;
4383   }
4384
4385   // Copy the content. The arguments boilerplate doesn't have any
4386   // fields that point to new space so it's safe to skip the write
4387   // barrier here.
4388   CopyBlock(HeapObject::cast(result)->address(),
4389             boilerplate->address(),
4390             JSObject::kHeaderSize);
4391
4392   // Set the length property.
4393   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4394                                                 Smi::FromInt(length),
4395                                                 SKIP_WRITE_BARRIER);
4396   // Set the callee property for non-strict mode arguments object only.
4397   if (!strict_mode_callee) {
4398     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4399                                                   callee);
4400   }
4401
4402   // Check the state of the object
4403   ASSERT(JSObject::cast(result)->HasFastProperties());
4404   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4405
4406   return result;
4407 }
4408
4409
4410 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4411   ASSERT(!fun->has_initial_map());
4412
4413   // First create a new map with the size and number of in-object properties
4414   // suggested by the function.
4415   InstanceType instance_type;
4416   int instance_size;
4417   int in_object_properties;
4418   if (fun->shared()->is_generator()) {
4419     instance_type = JS_GENERATOR_OBJECT_TYPE;
4420     instance_size = JSGeneratorObject::kSize;
4421     in_object_properties = 0;
4422   } else {
4423     instance_type = JS_OBJECT_TYPE;
4424     instance_size = fun->shared()->CalculateInstanceSize();
4425     in_object_properties = fun->shared()->CalculateInObjectProperties();
4426   }
4427   Map* map;
4428   MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4429   if (!maybe_map->To(&map)) return maybe_map;
4430
4431   // Fetch or allocate prototype.
4432   Object* prototype;
4433   if (fun->has_instance_prototype()) {
4434     prototype = fun->instance_prototype();
4435   } else {
4436     MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4437     if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4438   }
4439   map->set_inobject_properties(in_object_properties);
4440   map->set_unused_property_fields(in_object_properties);
4441   map->set_prototype(prototype);
4442   ASSERT(map->has_fast_object_elements());
4443
4444   if (!fun->shared()->is_generator()) {
4445     fun->shared()->StartInobjectSlackTracking(map);
4446   }
4447
4448   return map;
4449 }
4450
4451
4452 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4453                                      FixedArray* properties,
4454                                      Map* map) {
4455   obj->set_properties(properties);
4456   obj->initialize_elements();
4457   // TODO(1240798): Initialize the object's body using valid initial values
4458   // according to the object's initial map.  For example, if the map's
4459   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4460   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4461   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4462   // verification code has to cope with (temporarily) invalid objects.  See
4463   // for example, JSArray::JSArrayVerify).
4464   Object* filler;
4465   // We cannot always fill with one_pointer_filler_map because objects
4466   // created from API functions expect their internal fields to be initialized
4467   // with undefined_value.
4468   // Pre-allocated fields need to be initialized with undefined_value as well
4469   // so that object accesses before the constructor completes (e.g. in the
4470   // debugger) will not cause a crash.
4471   if (map->constructor()->IsJSFunction() &&
4472       JSFunction::cast(map->constructor())->shared()->
4473           IsInobjectSlackTrackingInProgress()) {
4474     // We might want to shrink the object later.
4475     ASSERT(obj->GetInternalFieldCount() == 0);
4476     filler = Heap::one_pointer_filler_map();
4477   } else {
4478     filler = Heap::undefined_value();
4479   }
4480   obj->InitializeBody(map, Heap::undefined_value(), filler);
4481 }
4482
4483
4484 MaybeObject* Heap::AllocateJSObjectFromMap(
4485     Map* map, PretenureFlag pretenure, bool allocate_properties) {
4486   // JSFunctions should be allocated using AllocateFunction to be
4487   // properly initialized.
4488   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4489
4490   // Both types of global objects should be allocated using
4491   // AllocateGlobalObject to be properly initialized.
4492   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4493   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4494
4495   // Allocate the backing storage for the properties.
4496   FixedArray* properties;
4497   if (allocate_properties) {
4498     int prop_size = map->InitialPropertiesLength();
4499     ASSERT(prop_size >= 0);
4500     { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4501       if (!maybe_properties->To(&properties)) return maybe_properties;
4502     }
4503   } else {
4504     properties = empty_fixed_array();
4505   }
4506
4507   // Allocate the JSObject.
4508   AllocationSpace space =
4509       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4510   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4511   Object* obj;
4512   MaybeObject* maybe_obj = Allocate(map, space);
4513   if (!maybe_obj->To(&obj)) return maybe_obj;
4514
4515   // Initialize the JSObject.
4516   InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4517   ASSERT(JSObject::cast(obj)->HasFastElements() ||
4518          JSObject::cast(obj)->HasExternalArrayElements());
4519   return obj;
4520 }
4521
4522
4523 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
4524     Map* map, Handle<AllocationSite> allocation_site) {
4525   // JSFunctions should be allocated using AllocateFunction to be
4526   // properly initialized.
4527   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4528
4529   // Both types of global objects should be allocated using
4530   // AllocateGlobalObject to be properly initialized.
4531   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4532   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4533
4534   // Allocate the backing storage for the properties.
4535   int prop_size = map->InitialPropertiesLength();
4536   ASSERT(prop_size >= 0);
4537   FixedArray* properties;
4538   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4539     if (!maybe_properties->To(&properties)) return maybe_properties;
4540   }
4541
4542   // Allocate the JSObject.
4543   AllocationSpace space = NEW_SPACE;
4544   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4545   Object* obj;
4546   MaybeObject* maybe_obj =
4547       AllocateWithAllocationSite(map, space, allocation_site);
4548   if (!maybe_obj->To(&obj)) return maybe_obj;
4549
4550   // Initialize the JSObject.
4551   InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4552   ASSERT(JSObject::cast(obj)->HasFastElements());
4553   return obj;
4554 }
4555
4556
4557 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4558                                     PretenureFlag pretenure) {
4559   // Allocate the initial map if absent.
4560   if (!constructor->has_initial_map()) {
4561     Object* initial_map;
4562     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4563       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4564     }
4565     constructor->set_initial_map(Map::cast(initial_map));
4566     Map::cast(initial_map)->set_constructor(constructor);
4567   }
4568   // Allocate the object based on the constructors initial map.
4569   MaybeObject* result = AllocateJSObjectFromMap(
4570       constructor->initial_map(), pretenure);
4571 #ifdef DEBUG
4572   // Make sure result is NOT a global object if valid.
4573   Object* non_failure;
4574   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4575 #endif
4576   return result;
4577 }
4578
4579
4580 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4581     Handle<AllocationSite> allocation_site) {
4582   // Allocate the initial map if absent.
4583   if (!constructor->has_initial_map()) {
4584     Object* initial_map;
4585     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4586       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4587     }
4588     constructor->set_initial_map(Map::cast(initial_map));
4589     Map::cast(initial_map)->set_constructor(constructor);
4590   }
4591   // Allocate the object based on the constructors initial map, or the payload
4592   // advice
4593   Map* initial_map = constructor->initial_map();
4594
4595   Smi* smi = Smi::cast(allocation_site->transition_info());
4596   ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4597   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4598   if (to_kind != initial_map->elements_kind()) {
4599     MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4600     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4601     // Possibly alter the mode, since we found an updated elements kind
4602     // in the type info cell.
4603     mode = AllocationSite::GetMode(to_kind);
4604   }
4605
4606   MaybeObject* result;
4607   if (mode == TRACK_ALLOCATION_SITE) {
4608     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4609         allocation_site);
4610   } else {
4611     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4612   }
4613 #ifdef DEBUG
4614   // Make sure result is NOT a global object if valid.
4615   Object* non_failure;
4616   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4617 #endif
4618   return result;
4619 }
4620
4621
4622 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4623   ASSERT(function->shared()->is_generator());
4624   Map *map;
4625   if (function->has_initial_map()) {
4626     map = function->initial_map();
4627   } else {
4628     // Allocate the initial map if absent.
4629     MaybeObject* maybe_map = AllocateInitialMap(function);
4630     if (!maybe_map->To(&map)) return maybe_map;
4631     function->set_initial_map(map);
4632     map->set_constructor(function);
4633   }
4634   ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4635   return AllocateJSObjectFromMap(map);
4636 }
4637
4638
4639 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4640   // Allocate a fresh map. Modules do not have a prototype.
4641   Map* map;
4642   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4643   if (!maybe_map->To(&map)) return maybe_map;
4644   // Allocate the object based on the map.
4645   JSModule* module;
4646   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4647   if (!maybe_module->To(&module)) return maybe_module;
4648   module->set_context(context);
4649   module->set_scope_info(scope_info);
4650   return module;
4651 }
4652
4653
4654 MaybeObject* Heap::AllocateJSArrayAndStorage(
4655     ElementsKind elements_kind,
4656     int length,
4657     int capacity,
4658     ArrayStorageAllocationMode mode,
4659     PretenureFlag pretenure) {
4660   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4661   JSArray* array;
4662   if (!maybe_array->To(&array)) return maybe_array;
4663
4664   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4665   // for performance reasons.
4666   ASSERT(capacity >= length);
4667
4668   if (capacity == 0) {
4669     array->set_length(Smi::FromInt(0));
4670     array->set_elements(empty_fixed_array());
4671     return array;
4672   }
4673
4674   FixedArrayBase* elms;
4675   MaybeObject* maybe_elms = NULL;
4676   if (IsFastDoubleElementsKind(elements_kind)) {
4677     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4678       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4679     } else {
4680       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4681       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4682     }
4683   } else {
4684     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4685     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4686       maybe_elms = AllocateUninitializedFixedArray(capacity);
4687     } else {
4688       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4689       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4690     }
4691   }
4692   if (!maybe_elms->To(&elms)) return maybe_elms;
4693
4694   array->set_elements(elms);
4695   array->set_length(Smi::FromInt(length));
4696   return array;
4697 }
4698
4699
4700 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4701     ElementsKind elements_kind,
4702     int length,
4703     int capacity,
4704     Handle<AllocationSite> allocation_site,
4705     ArrayStorageAllocationMode mode) {
4706   MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4707       allocation_site);
4708   JSArray* array;
4709   if (!maybe_array->To(&array)) return maybe_array;
4710   return AllocateJSArrayStorage(array, length, capacity, mode);
4711 }
4712
4713
4714 MaybeObject* Heap::AllocateJSArrayStorage(
4715     JSArray* array,
4716     int length,
4717     int capacity,
4718     ArrayStorageAllocationMode mode) {
4719   ASSERT(capacity >= length);
4720
4721   if (capacity == 0) {
4722     array->set_length(Smi::FromInt(0));
4723     array->set_elements(empty_fixed_array());
4724     return array;
4725   }
4726
4727   FixedArrayBase* elms;
4728   MaybeObject* maybe_elms = NULL;
4729   ElementsKind elements_kind = array->GetElementsKind();
4730   if (IsFastDoubleElementsKind(elements_kind)) {
4731     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4732       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4733     } else {
4734       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4735       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4736     }
4737   } else {
4738     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4739     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4740       maybe_elms = AllocateUninitializedFixedArray(capacity);
4741     } else {
4742       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4743       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4744     }
4745   }
4746   if (!maybe_elms->To(&elms)) return maybe_elms;
4747
4748   array->set_elements(elms);
4749   array->set_length(Smi::FromInt(length));
4750   return array;
4751 }
4752
4753
4754 MaybeObject* Heap::AllocateJSArrayWithElements(
4755     FixedArrayBase* elements,
4756     ElementsKind elements_kind,
4757     int length,
4758     PretenureFlag pretenure) {
4759   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4760   JSArray* array;
4761   if (!maybe_array->To(&array)) return maybe_array;
4762
4763   array->set_elements(elements);
4764   array->set_length(Smi::FromInt(length));
4765   array->ValidateElements();
4766   return array;
4767 }
4768
4769
4770 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4771   // Allocate map.
4772   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4773   // maps. Will probably depend on the identity of the handler object, too.
4774   Map* map;
4775   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4776   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4777   map->set_prototype(prototype);
4778
4779   // Allocate the proxy object.
4780   JSProxy* result;
4781   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4782   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4783   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4784   result->set_handler(handler);
4785   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4786   return result;
4787 }
4788
4789
4790 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4791                                            Object* call_trap,
4792                                            Object* construct_trap,
4793                                            Object* prototype) {
4794   // Allocate map.
4795   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4796   // maps. Will probably depend on the identity of the handler object, too.
4797   Map* map;
4798   MaybeObject* maybe_map_obj =
4799       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4800   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4801   map->set_prototype(prototype);
4802
4803   // Allocate the proxy object.
4804   JSFunctionProxy* result;
4805   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4806   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4807   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4808   result->set_handler(handler);
4809   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4810   result->set_call_trap(call_trap);
4811   result->set_construct_trap(construct_trap);
4812   return result;
4813 }
4814
4815
4816 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4817   ASSERT(constructor->has_initial_map());
4818   Map* map = constructor->initial_map();
4819   ASSERT(map->is_dictionary_map());
4820
4821   // Make sure no field properties are described in the initial map.
4822   // This guarantees us that normalizing the properties does not
4823   // require us to change property values to PropertyCells.
4824   ASSERT(map->NextFreePropertyIndex() == 0);
4825
4826   // Make sure we don't have a ton of pre-allocated slots in the
4827   // global objects. They will be unused once we normalize the object.
4828   ASSERT(map->unused_property_fields() == 0);
4829   ASSERT(map->inobject_properties() == 0);
4830
4831   // Initial size of the backing store to avoid resize of the storage during
4832   // bootstrapping. The size differs between the JS global object ad the
4833   // builtins object.
4834   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4835
4836   // Allocate a dictionary object for backing storage.
4837   NameDictionary* dictionary;
4838   MaybeObject* maybe_dictionary =
4839       NameDictionary::Allocate(
4840           this,
4841           map->NumberOfOwnDescriptors() * 2 + initial_size);
4842   if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4843
4844   // The global object might be created from an object template with accessors.
4845   // Fill these accessors into the dictionary.
4846   DescriptorArray* descs = map->instance_descriptors();
4847   for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4848     PropertyDetails details = descs->GetDetails(i);
4849     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
4850     PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4851     Object* value = descs->GetCallbacksObject(i);
4852     MaybeObject* maybe_value = AllocatePropertyCell(value);
4853     if (!maybe_value->ToObject(&value)) return maybe_value;
4854
4855     MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4856     if (!maybe_added->To(&dictionary)) return maybe_added;
4857   }
4858
4859   // Allocate the global object and initialize it with the backing store.
4860   JSObject* global;
4861   MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4862   if (!maybe_global->To(&global)) return maybe_global;
4863
4864   InitializeJSObjectFromMap(global, dictionary, map);
4865
4866   // Create a new map for the global object.
4867   Map* new_map;
4868   MaybeObject* maybe_map = map->CopyDropDescriptors();
4869   if (!maybe_map->To(&new_map)) return maybe_map;
4870   new_map->set_dictionary_map(true);
4871
4872   // Set up the global object as a normalized object.
4873   global->set_map(new_map);
4874   global->set_properties(dictionary);
4875
4876   // Make sure result is a global object with properties in dictionary.
4877   ASSERT(global->IsGlobalObject());
4878   ASSERT(!global->HasFastProperties());
4879   return global;
4880 }
4881
4882
4883 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4884   // Never used to copy functions.  If functions need to be copied we
4885   // have to be careful to clear the literals array.
4886   SLOW_ASSERT(!source->IsJSFunction());
4887
4888   // Make the clone.
4889   Map* map = source->map();
4890   int object_size = map->instance_size();
4891   Object* clone;
4892
4893   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4894
4895   // If we're forced to always allocate, we use the general allocation
4896   // functions which may leave us with an object in old space.
4897   if (always_allocate()) {
4898     { MaybeObject* maybe_clone =
4899           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4900       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4901     }
4902     Address clone_address = HeapObject::cast(clone)->address();
4903     CopyBlock(clone_address,
4904               source->address(),
4905               object_size);
4906     // Update write barrier for all fields that lie beyond the header.
4907     RecordWrites(clone_address,
4908                  JSObject::kHeaderSize,
4909                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4910   } else {
4911     wb_mode = SKIP_WRITE_BARRIER;
4912
4913     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4914       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4915     }
4916     SLOW_ASSERT(InNewSpace(clone));
4917     // Since we know the clone is allocated in new space, we can copy
4918     // the contents without worrying about updating the write barrier.
4919     CopyBlock(HeapObject::cast(clone)->address(),
4920               source->address(),
4921               object_size);
4922   }
4923
4924   SLOW_ASSERT(
4925       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4926   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4927   FixedArray* properties = FixedArray::cast(source->properties());
4928   // Update elements if necessary.
4929   if (elements->length() > 0) {
4930     Object* elem;
4931     { MaybeObject* maybe_elem;
4932       if (elements->map() == fixed_cow_array_map()) {
4933         maybe_elem = FixedArray::cast(elements);
4934       } else if (source->HasFastDoubleElements()) {
4935         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4936       } else {
4937         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4938       }
4939       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4940     }
4941     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4942   }
4943   // Update properties if necessary.
4944   if (properties->length() > 0) {
4945     Object* prop;
4946     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4947       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4948     }
4949     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4950   }
4951   // Return the new clone.
4952   return clone;
4953 }
4954
4955
4956 MaybeObject* Heap::CopyJSObjectWithAllocationSite(
4957     JSObject* source,
4958     AllocationSite* site) {
4959   // Never used to copy functions.  If functions need to be copied we
4960   // have to be careful to clear the literals array.
4961   SLOW_ASSERT(!source->IsJSFunction());
4962
4963   // Make the clone.
4964   Map* map = source->map();
4965   int object_size = map->instance_size();
4966   Object* clone;
4967
4968   ASSERT(map->CanTrackAllocationSite());
4969   ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4970   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4971
4972   // If we're forced to always allocate, we use the general allocation
4973   // functions which may leave us with an object in old space.
4974   int adjusted_object_size = object_size;
4975   if (always_allocate()) {
4976     // We'll only track origin if we are certain to allocate in new space
4977     const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4978     if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
4979       adjusted_object_size += AllocationMemento::kSize;
4980     }
4981
4982     { MaybeObject* maybe_clone =
4983           AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4984       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4985     }
4986     Address clone_address = HeapObject::cast(clone)->address();
4987     CopyBlock(clone_address,
4988               source->address(),
4989               object_size);
4990     // Update write barrier for all fields that lie beyond the header.
4991     int write_barrier_offset = adjusted_object_size > object_size
4992         ? JSArray::kSize + AllocationMemento::kSize
4993         : JSObject::kHeaderSize;
4994     if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4995       RecordWrites(clone_address,
4996                    write_barrier_offset,
4997                    (object_size - write_barrier_offset) / kPointerSize);
4998     }
4999
5000     // Track allocation site information, if we failed to allocate it inline.
5001     if (InNewSpace(clone) &&
5002         adjusted_object_size == object_size) {
5003       MaybeObject* maybe_alloc_memento =
5004           AllocateStruct(ALLOCATION_MEMENTO_TYPE);
5005       AllocationMemento* alloc_memento;
5006       if (maybe_alloc_memento->To(&alloc_memento)) {
5007         alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5008         alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5009       }
5010     }
5011   } else {
5012     wb_mode = SKIP_WRITE_BARRIER;
5013     adjusted_object_size += AllocationMemento::kSize;
5014
5015     { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
5016       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5017     }
5018     SLOW_ASSERT(InNewSpace(clone));
5019     // Since we know the clone is allocated in new space, we can copy
5020     // the contents without worrying about updating the write barrier.
5021     CopyBlock(HeapObject::cast(clone)->address(),
5022               source->address(),
5023               object_size);
5024   }
5025
5026   if (adjusted_object_size > object_size) {
5027     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5028         reinterpret_cast<Address>(clone) + object_size);
5029     alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5030     alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5031   }
5032
5033   SLOW_ASSERT(
5034       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5035   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5036   FixedArray* properties = FixedArray::cast(source->properties());
5037   // Update elements if necessary.
5038   if (elements->length() > 0) {
5039     Object* elem;
5040     { MaybeObject* maybe_elem;
5041       if (elements->map() == fixed_cow_array_map()) {
5042         maybe_elem = FixedArray::cast(elements);
5043       } else if (source->HasFastDoubleElements()) {
5044         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
5045       } else {
5046         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
5047       }
5048       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
5049     }
5050     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
5051   }
5052   // Update properties if necessary.
5053   if (properties->length() > 0) {
5054     Object* prop;
5055     { MaybeObject* maybe_prop = CopyFixedArray(properties);
5056       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
5057     }
5058     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
5059   }
5060   // Return the new clone.
5061   return clone;
5062 }
5063
5064
5065 MaybeObject* Heap::ReinitializeJSReceiver(
5066     JSReceiver* object, InstanceType type, int size) {
5067   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
5068
5069   // Allocate fresh map.
5070   // TODO(rossberg): Once we optimize proxies, cache these maps.
5071   Map* map;
5072   MaybeObject* maybe = AllocateMap(type, size);
5073   if (!maybe->To<Map>(&map)) return maybe;
5074
5075   // Check that the receiver has at least the size of the fresh object.
5076   int size_difference = object->map()->instance_size() - map->instance_size();
5077   ASSERT(size_difference >= 0);
5078
5079   map->set_prototype(object->map()->prototype());
5080
5081   // Allocate the backing storage for the properties.
5082   int prop_size = map->unused_property_fields() - map->inobject_properties();
5083   Object* properties;
5084   maybe = AllocateFixedArray(prop_size, TENURED);
5085   if (!maybe->ToObject(&properties)) return maybe;
5086
5087   // Functions require some allocation, which might fail here.
5088   SharedFunctionInfo* shared = NULL;
5089   if (type == JS_FUNCTION_TYPE) {
5090     String* name;
5091     maybe =
5092         InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
5093     if (!maybe->To<String>(&name)) return maybe;
5094     maybe = AllocateSharedFunctionInfo(name);
5095     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
5096   }
5097
5098   // Because of possible retries of this function after failure,
5099   // we must NOT fail after this point, where we have changed the type!
5100
5101   // Reset the map for the object.
5102   object->set_map(map);
5103   JSObject* jsobj = JSObject::cast(object);
5104
5105   // Reinitialize the object from the constructor map.
5106   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
5107
5108   // Functions require some minimal initialization.
5109   if (type == JS_FUNCTION_TYPE) {
5110     map->set_function_with_prototype(true);
5111     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
5112     JSFunction::cast(object)->set_context(
5113         isolate()->context()->native_context());
5114   }
5115
5116   // Put in filler if the new object is smaller than the old.
5117   if (size_difference > 0) {
5118     CreateFillerObjectAt(
5119         object->address() + map->instance_size(), size_difference);
5120   }
5121
5122   return object;
5123 }
5124
5125
5126 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5127                                              JSGlobalProxy* object) {
5128   ASSERT(constructor->has_initial_map());
5129   Map* map = constructor->initial_map();
5130
5131   // Check that the already allocated object has the same size and type as
5132   // objects allocated using the constructor.
5133   ASSERT(map->instance_size() == object->map()->instance_size());
5134   ASSERT(map->instance_type() == object->map()->instance_type());
5135
5136   // Allocate the backing storage for the properties.
5137   int prop_size = map->unused_property_fields() - map->inobject_properties();
5138   Object* properties;
5139   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5140     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5141   }
5142
5143   // Reset the map for the object.
5144   object->set_map(constructor->initial_map());
5145
5146   // Reinitialize the object from the constructor map.
5147   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5148   return object;
5149 }
5150
5151
5152 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5153                                            PretenureFlag pretenure) {
5154   int length = string.length();
5155   if (length == 1) {
5156     return Heap::LookupSingleCharacterStringFromCode(string[0]);
5157   }
5158   Object* result;
5159   { MaybeObject* maybe_result =
5160         AllocateRawOneByteString(string.length(), pretenure);
5161     if (!maybe_result->ToObject(&result)) return maybe_result;
5162   }
5163
5164   // Copy the characters into the new object.
5165   CopyChars(SeqOneByteString::cast(result)->GetChars(),
5166             string.start(),
5167             length);
5168   return result;
5169 }
5170
5171
5172 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5173                                               int non_ascii_start,
5174                                               PretenureFlag pretenure) {
5175   // Continue counting the number of characters in the UTF-8 string, starting
5176   // from the first non-ascii character or word.
5177   Access<UnicodeCache::Utf8Decoder>
5178       decoder(isolate_->unicode_cache()->utf8_decoder());
5179   decoder->Reset(string.start() + non_ascii_start,
5180                  string.length() - non_ascii_start);
5181   int utf16_length = decoder->Utf16Length();
5182   ASSERT(utf16_length > 0);
5183   // Allocate string.
5184   Object* result;
5185   {
5186     int chars = non_ascii_start + utf16_length;
5187     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5188     if (!maybe_result->ToObject(&result)) return maybe_result;
5189   }
5190   // Convert and copy the characters into the new object.
5191   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5192   // Copy ascii portion.
5193   uint16_t* data = twobyte->GetChars();
5194   if (non_ascii_start != 0) {
5195     const char* ascii_data = string.start();
5196     for (int i = 0; i < non_ascii_start; i++) {
5197       *data++ = *ascii_data++;
5198     }
5199   }
5200   // Now write the remainder.
5201   decoder->WriteUtf16(data, utf16_length);
5202   return result;
5203 }
5204
5205
5206 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5207                                              PretenureFlag pretenure) {
5208   // Check if the string is an ASCII string.
5209   Object* result;
5210   int length = string.length();
5211   const uc16* start = string.start();
5212
5213   if (String::IsOneByte(start, length)) {
5214     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5215     if (!maybe_result->ToObject(&result)) return maybe_result;
5216     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5217   } else {  // It's not a one byte string.
5218     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5219     if (!maybe_result->ToObject(&result)) return maybe_result;
5220     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5221   }
5222   return result;
5223 }
5224
5225
5226 Map* Heap::InternalizedStringMapForString(String* string) {
5227   // If the string is in new space it cannot be used as internalized.
5228   if (InNewSpace(string)) return NULL;
5229
5230   // Find the corresponding internalized string map for strings.
5231   switch (string->map()->instance_type()) {
5232     case STRING_TYPE: return internalized_string_map();
5233     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5234     case CONS_STRING_TYPE: return cons_internalized_string_map();
5235     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5236     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5237     case EXTERNAL_ASCII_STRING_TYPE:
5238       return external_ascii_internalized_string_map();
5239     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5240       return external_internalized_string_with_one_byte_data_map();
5241     case SHORT_EXTERNAL_STRING_TYPE:
5242       return short_external_internalized_string_map();
5243     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5244       return short_external_ascii_internalized_string_map();
5245     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5246       return short_external_internalized_string_with_one_byte_data_map();
5247     default: return NULL;  // No match found.
5248   }
5249 }
5250
5251
5252 static inline void WriteOneByteData(Vector<const char> vector,
5253                                     uint8_t* chars,
5254                                     int len) {
5255   // Only works for ascii.
5256   ASSERT(vector.length() == len);
5257   OS::MemCopy(chars, vector.start(), len);
5258 }
5259
5260 static inline void WriteTwoByteData(Vector<const char> vector,
5261                                     uint16_t* chars,
5262                                     int len) {
5263   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5264   unsigned stream_length = vector.length();
5265   while (stream_length != 0) {
5266     unsigned consumed = 0;
5267     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5268     ASSERT(c != unibrow::Utf8::kBadChar);
5269     ASSERT(consumed <= stream_length);
5270     stream_length -= consumed;
5271     stream += consumed;
5272     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5273       len -= 2;
5274       if (len < 0) break;
5275       *chars++ = unibrow::Utf16::LeadSurrogate(c);
5276       *chars++ = unibrow::Utf16::TrailSurrogate(c);
5277     } else {
5278       len -= 1;
5279       if (len < 0) break;
5280       *chars++ = c;
5281     }
5282   }
5283   ASSERT(stream_length == 0);
5284   ASSERT(len == 0);
5285 }
5286
5287
5288 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5289   ASSERT(s->length() == len);
5290   String::WriteToFlat(s, chars, 0, len);
5291 }
5292
5293
5294 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5295   ASSERT(s->length() == len);
5296   String::WriteToFlat(s, chars, 0, len);
5297 }
5298
5299
5300 template<bool is_one_byte, typename T>
5301 MaybeObject* Heap::AllocateInternalizedStringImpl(
5302     T t, int chars, uint32_t hash_field) {
5303   ASSERT(chars >= 0);
5304   // Compute map and object size.
5305   int size;
5306   Map* map;
5307
5308   if (is_one_byte) {
5309     if (chars > SeqOneByteString::kMaxLength) {
5310       return Failure::OutOfMemoryException(0x9);
5311     }
5312     map = ascii_internalized_string_map();
5313     size = SeqOneByteString::SizeFor(chars);
5314   } else {
5315     if (chars > SeqTwoByteString::kMaxLength) {
5316       return Failure::OutOfMemoryException(0xa);
5317     }
5318     map = internalized_string_map();
5319     size = SeqTwoByteString::SizeFor(chars);
5320   }
5321
5322   // Allocate string.
5323   Object* result;
5324   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5325                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5326                    : old_data_space_->AllocateRaw(size);
5327     if (!maybe_result->ToObject(&result)) return maybe_result;
5328   }
5329
5330   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5331   // Set length and hash fields of the allocated string.
5332   String* answer = String::cast(result);
5333   answer->set_length(chars);
5334   answer->set_hash_field(hash_field);
5335
5336   ASSERT_EQ(size, answer->Size());
5337
5338   if (is_one_byte) {
5339     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5340   } else {
5341     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5342   }
5343   return answer;
5344 }
5345
5346
5347 // Need explicit instantiations.
5348 template
5349 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5350 template
5351 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5352     String*, int, uint32_t);
5353 template
5354 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5355     Vector<const char>, int, uint32_t);
5356
5357
5358 MaybeObject* Heap::AllocateRawOneByteString(int length,
5359                                             PretenureFlag pretenure) {
5360   if (length < 0 || length > SeqOneByteString::kMaxLength) {
5361     return Failure::OutOfMemoryException(0xb);
5362   }
5363   int size = SeqOneByteString::SizeFor(length);
5364   ASSERT(size <= SeqOneByteString::kMaxSize);
5365   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5366   AllocationSpace retry_space = OLD_DATA_SPACE;
5367
5368   if (size > Page::kMaxNonCodeHeapObjectSize) {
5369     // Allocate in large object space, retry space will be ignored.
5370     space = LO_SPACE;
5371   }
5372
5373   Object* result;
5374   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5375     if (!maybe_result->ToObject(&result)) return maybe_result;
5376   }
5377
5378   // Partially initialize the object.
5379   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5380   String::cast(result)->set_length(length);
5381   String::cast(result)->set_hash_field(String::kEmptyHashField);
5382   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5383
5384   return result;
5385 }
5386
5387
5388 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5389                                             PretenureFlag pretenure) {
5390   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5391     return Failure::OutOfMemoryException(0xc);
5392   }
5393   int size = SeqTwoByteString::SizeFor(length);
5394   ASSERT(size <= SeqTwoByteString::kMaxSize);
5395   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5396   AllocationSpace retry_space = OLD_DATA_SPACE;
5397
5398   if (size > Page::kMaxNonCodeHeapObjectSize) {
5399     // Allocate in large object space, retry space will be ignored.
5400     space = LO_SPACE;
5401   }
5402
5403   Object* result;
5404   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5405     if (!maybe_result->ToObject(&result)) return maybe_result;
5406   }
5407
5408   // Partially initialize the object.
5409   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5410   String::cast(result)->set_length(length);
5411   String::cast(result)->set_hash_field(String::kEmptyHashField);
5412   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5413   return result;
5414 }
5415
5416
5417 MaybeObject* Heap::AllocateJSArray(
5418     ElementsKind elements_kind,
5419     PretenureFlag pretenure) {
5420   Context* native_context = isolate()->context()->native_context();
5421   JSFunction* array_function = native_context->array_function();
5422   Map* map = array_function->initial_map();
5423   Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5424   if (transition_map != NULL) map = transition_map;
5425   return AllocateJSObjectFromMap(map, pretenure);
5426 }
5427
5428
5429 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5430     ElementsKind elements_kind,
5431     Handle<AllocationSite> allocation_site) {
5432   Context* native_context = isolate()->context()->native_context();
5433   JSFunction* array_function = native_context->array_function();
5434   Map* map = array_function->initial_map();
5435   Object* maybe_map_array = native_context->js_array_maps();
5436   if (!maybe_map_array->IsUndefined()) {
5437     Object* maybe_transitioned_map =
5438         FixedArray::cast(maybe_map_array)->get(elements_kind);
5439     if (!maybe_transitioned_map->IsUndefined()) {
5440       map = Map::cast(maybe_transitioned_map);
5441     }
5442   }
5443   return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
5444 }
5445
5446
5447 MaybeObject* Heap::AllocateEmptyFixedArray() {
5448   int size = FixedArray::SizeFor(0);
5449   Object* result;
5450   { MaybeObject* maybe_result =
5451         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5452     if (!maybe_result->ToObject(&result)) return maybe_result;
5453   }
5454   // Initialize the object.
5455   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5456       fixed_array_map());
5457   reinterpret_cast<FixedArray*>(result)->set_length(0);
5458   return result;
5459 }
5460
5461
5462 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5463   return AllocateExternalArray(0, array_type, NULL, TENURED);
5464 }
5465
5466
5467 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5468   if (length < 0 || length > FixedArray::kMaxLength) {
5469     return Failure::OutOfMemoryException(0xd);
5470   }
5471   ASSERT(length > 0);
5472   // Use the general function if we're forced to always allocate.
5473   if (always_allocate()) return AllocateFixedArray(length, TENURED);
5474   // Allocate the raw data for a fixed array.
5475   int size = FixedArray::SizeFor(length);
5476   return size <= Page::kMaxNonCodeHeapObjectSize
5477       ? new_space_.AllocateRaw(size)
5478       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5479 }
5480
5481
5482 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5483   int len = src->length();
5484   Object* obj;
5485   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5486     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5487   }
5488   if (InNewSpace(obj)) {
5489     HeapObject* dst = HeapObject::cast(obj);
5490     dst->set_map_no_write_barrier(map);
5491     CopyBlock(dst->address() + kPointerSize,
5492               src->address() + kPointerSize,
5493               FixedArray::SizeFor(len) - kPointerSize);
5494     return obj;
5495   }
5496   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5497   FixedArray* result = FixedArray::cast(obj);
5498   result->set_length(len);
5499
5500   // Copy the content
5501   DisallowHeapAllocation no_gc;
5502   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5503   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5504   return result;
5505 }
5506
5507
5508 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5509                                                Map* map) {
5510   int len = src->length();
5511   Object* obj;
5512   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5513     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5514   }
5515   HeapObject* dst = HeapObject::cast(obj);
5516   dst->set_map_no_write_barrier(map);
5517   CopyBlock(
5518       dst->address() + FixedDoubleArray::kLengthOffset,
5519       src->address() + FixedDoubleArray::kLengthOffset,
5520       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5521   return obj;
5522 }
5523
5524
5525 MaybeObject* Heap::AllocateFixedArray(int length) {
5526   ASSERT(length >= 0);
5527   if (length == 0) return empty_fixed_array();
5528   Object* result;
5529   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5530     if (!maybe_result->ToObject(&result)) return maybe_result;
5531   }
5532   // Initialize header.
5533   FixedArray* array = reinterpret_cast<FixedArray*>(result);
5534   array->set_map_no_write_barrier(fixed_array_map());
5535   array->set_length(length);
5536   // Initialize body.
5537   ASSERT(!InNewSpace(undefined_value()));
5538   MemsetPointer(array->data_start(), undefined_value(), length);
5539   return result;
5540 }
5541
5542
5543 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5544   if (length < 0 || length > FixedArray::kMaxLength) {
5545     return Failure::OutOfMemoryException(0xe);
5546   }
5547   int size = FixedArray::SizeFor(length);
5548   AllocationSpace space =
5549       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5550   AllocationSpace retry_space = OLD_POINTER_SPACE;
5551
5552   if (size > Page::kMaxNonCodeHeapObjectSize) {
5553     // Allocate in large object space, retry space will be ignored.
5554     space = LO_SPACE;
5555   }
5556
5557   return AllocateRaw(size, space, retry_space);
5558 }
5559
5560
5561 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5562     Heap* heap,
5563     int length,
5564     PretenureFlag pretenure,
5565     Object* filler) {
5566   ASSERT(length >= 0);
5567   ASSERT(heap->empty_fixed_array()->IsFixedArray());
5568   if (length == 0) return heap->empty_fixed_array();
5569
5570   ASSERT(!heap->InNewSpace(filler));
5571   Object* result;
5572   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5573     if (!maybe_result->ToObject(&result)) return maybe_result;
5574   }
5575
5576   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5577   FixedArray* array = FixedArray::cast(result);
5578   array->set_length(length);
5579   MemsetPointer(array->data_start(), filler, length);
5580   return array;
5581 }
5582
5583
5584 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5585   return AllocateFixedArrayWithFiller(this,
5586                                       length,
5587                                       pretenure,
5588                                       undefined_value());
5589 }
5590
5591
5592 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5593                                                PretenureFlag pretenure) {
5594   return AllocateFixedArrayWithFiller(this,
5595                                       length,
5596                                       pretenure,
5597                                       the_hole_value());
5598 }
5599
5600
5601 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5602   if (length == 0) return empty_fixed_array();
5603
5604   Object* obj;
5605   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5606     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5607   }
5608
5609   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5610       fixed_array_map());
5611   FixedArray::cast(obj)->set_length(length);
5612   return obj;
5613 }
5614
5615
5616 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5617   int size = FixedDoubleArray::SizeFor(0);
5618   Object* result;
5619   { MaybeObject* maybe_result =
5620         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5621     if (!maybe_result->ToObject(&result)) return maybe_result;
5622   }
5623   // Initialize the object.
5624   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5625       fixed_double_array_map());
5626   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5627   return result;
5628 }
5629
5630
5631 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5632     int length,
5633     PretenureFlag pretenure) {
5634   if (length == 0) return empty_fixed_array();
5635
5636   Object* elements_object;
5637   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5638   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5639   FixedDoubleArray* elements =
5640       reinterpret_cast<FixedDoubleArray*>(elements_object);
5641
5642   elements->set_map_no_write_barrier(fixed_double_array_map());
5643   elements->set_length(length);
5644   return elements;
5645 }
5646
5647
5648 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5649     int length,
5650     PretenureFlag pretenure) {
5651   if (length == 0) return empty_fixed_array();
5652
5653   Object* elements_object;
5654   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5655   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5656   FixedDoubleArray* elements =
5657       reinterpret_cast<FixedDoubleArray*>(elements_object);
5658
5659   for (int i = 0; i < length; ++i) {
5660     elements->set_the_hole(i);
5661   }
5662
5663   elements->set_map_no_write_barrier(fixed_double_array_map());
5664   elements->set_length(length);
5665   return elements;
5666 }
5667
5668
5669 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5670                                                PretenureFlag pretenure) {
5671   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5672     return Failure::OutOfMemoryException(0xf);
5673   }
5674   int size = FixedDoubleArray::SizeFor(length);
5675   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5676   AllocationSpace retry_space = OLD_DATA_SPACE;
5677
5678 #ifndef V8_HOST_ARCH_64_BIT
5679   size += kPointerSize;
5680 #endif
5681
5682   if (size > Page::kMaxNonCodeHeapObjectSize) {
5683     // Allocate in large object space, retry space will be ignored.
5684     space = LO_SPACE;
5685   }
5686
5687   HeapObject* object;
5688   { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5689     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5690   }
5691
5692   return EnsureDoubleAligned(this, object, size);
5693 }
5694
5695
5696 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5697   Object* result;
5698   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5699     if (!maybe_result->ToObject(&result)) return maybe_result;
5700   }
5701   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5702       hash_table_map());
5703   ASSERT(result->IsHashTable());
5704   return result;
5705 }
5706
5707
5708 MaybeObject* Heap::AllocateSymbol() {
5709   // Statically ensure that it is safe to allocate symbols in paged spaces.
5710   STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5711
5712   Object* result;
5713   MaybeObject* maybe =
5714       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5715   if (!maybe->ToObject(&result)) return maybe;
5716
5717   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5718
5719   // Generate a random hash value.
5720   int hash;
5721   int attempts = 0;
5722   do {
5723     hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5724     attempts++;
5725   } while (hash == 0 && attempts < 30);
5726   if (hash == 0) hash = 1;  // never return 0
5727
5728   Symbol::cast(result)->set_hash_field(
5729       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5730   Symbol::cast(result)->set_name(undefined_value());
5731
5732   ASSERT(result->IsSymbol());
5733   return result;
5734 }
5735
5736
5737 MaybeObject* Heap::AllocateNativeContext() {
5738   Object* result;
5739   { MaybeObject* maybe_result =
5740         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5741     if (!maybe_result->ToObject(&result)) return maybe_result;
5742   }
5743   Context* context = reinterpret_cast<Context*>(result);
5744   context->set_map_no_write_barrier(native_context_map());
5745   context->set_js_array_maps(undefined_value());
5746   ASSERT(context->IsNativeContext());
5747   ASSERT(result->IsContext());
5748   return result;
5749 }
5750
5751
5752 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5753                                          ScopeInfo* scope_info) {
5754   Object* result;
5755   { MaybeObject* maybe_result =
5756         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5757     if (!maybe_result->ToObject(&result)) return maybe_result;
5758   }
5759   Context* context = reinterpret_cast<Context*>(result);
5760   context->set_map_no_write_barrier(global_context_map());
5761   context->set_closure(function);
5762   context->set_previous(function->context());
5763   context->set_extension(scope_info);
5764   context->set_global_object(function->context()->global_object());
5765   ASSERT(context->IsGlobalContext());
5766   ASSERT(result->IsContext());
5767   return context;
5768 }
5769
5770
5771 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5772   Object* result;
5773   { MaybeObject* maybe_result =
5774         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5775     if (!maybe_result->ToObject(&result)) return maybe_result;
5776   }
5777   Context* context = reinterpret_cast<Context*>(result);
5778   context->set_map_no_write_barrier(module_context_map());
5779   // Instance link will be set later.
5780   context->set_extension(Smi::FromInt(0));
5781   return context;
5782 }
5783
5784
5785 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5786   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5787   Object* result;
5788   { MaybeObject* maybe_result = AllocateFixedArray(length);
5789     if (!maybe_result->ToObject(&result)) return maybe_result;
5790   }
5791   Context* context = reinterpret_cast<Context*>(result);
5792   context->set_map_no_write_barrier(function_context_map());
5793   context->set_closure(function);
5794   context->set_previous(function->context());
5795   context->set_extension(Smi::FromInt(0));
5796   context->set_global_object(function->context()->global_object());
5797   return context;
5798 }
5799
5800
5801 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5802                                         Context* previous,
5803                                         String* name,
5804                                         Object* thrown_object) {
5805   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5806   Object* result;
5807   { MaybeObject* maybe_result =
5808         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5809     if (!maybe_result->ToObject(&result)) return maybe_result;
5810   }
5811   Context* context = reinterpret_cast<Context*>(result);
5812   context->set_map_no_write_barrier(catch_context_map());
5813   context->set_closure(function);
5814   context->set_previous(previous);
5815   context->set_extension(name);
5816   context->set_global_object(previous->global_object());
5817   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5818   return context;
5819 }
5820
5821
5822 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5823                                        Context* previous,
5824                                        JSReceiver* extension) {
5825   Object* result;
5826   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5827     if (!maybe_result->ToObject(&result)) return maybe_result;
5828   }
5829   Context* context = reinterpret_cast<Context*>(result);
5830   context->set_map_no_write_barrier(with_context_map());
5831   context->set_closure(function);
5832   context->set_previous(previous);
5833   context->set_extension(extension);
5834   context->set_global_object(previous->global_object());
5835   return context;
5836 }
5837
5838
5839 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5840                                         Context* previous,
5841                                         ScopeInfo* scope_info) {
5842   Object* result;
5843   { MaybeObject* maybe_result =
5844         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5845     if (!maybe_result->ToObject(&result)) return maybe_result;
5846   }
5847   Context* context = reinterpret_cast<Context*>(result);
5848   context->set_map_no_write_barrier(block_context_map());
5849   context->set_closure(function);
5850   context->set_previous(previous);
5851   context->set_extension(scope_info);
5852   context->set_global_object(previous->global_object());
5853   return context;
5854 }
5855
5856
5857 MaybeObject* Heap::AllocateScopeInfo(int length) {
5858   FixedArray* scope_info;
5859   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5860   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5861   scope_info->set_map_no_write_barrier(scope_info_map());
5862   return scope_info;
5863 }
5864
5865
5866 MaybeObject* Heap::AllocateExternal(void* value) {
5867   Foreign* foreign;
5868   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5869     if (!maybe_result->To(&foreign)) return maybe_result;
5870   }
5871   JSObject* external;
5872   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5873     if (!maybe_result->To(&external)) return maybe_result;
5874   }
5875   external->SetInternalField(0, foreign);
5876   return external;
5877 }
5878
5879
5880 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5881   Map* map;
5882   switch (type) {
5883 #define MAKE_CASE(NAME, Name, name) \
5884     case NAME##_TYPE: map = name##_map(); break;
5885 STRUCT_LIST(MAKE_CASE)
5886 #undef MAKE_CASE
5887     default:
5888       UNREACHABLE();
5889       return Failure::InternalError();
5890   }
5891   int size = map->instance_size();
5892   AllocationSpace space =
5893       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5894   Object* result;
5895   { MaybeObject* maybe_result = Allocate(map, space);
5896     if (!maybe_result->ToObject(&result)) return maybe_result;
5897   }
5898   Struct::cast(result)->InitializeBody(size);
5899   return result;
5900 }
5901
5902
5903 bool Heap::IsHeapIterable() {
5904   return (!old_pointer_space()->was_swept_conservatively() &&
5905           !old_data_space()->was_swept_conservatively());
5906 }
5907
5908
5909 void Heap::EnsureHeapIsIterable() {
5910   ASSERT(AllowHeapAllocation::IsAllowed());
5911   if (!IsHeapIterable()) {
5912     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5913   }
5914   ASSERT(IsHeapIterable());
5915 }
5916
5917
5918 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5919   incremental_marking()->Step(step_size,
5920                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5921
5922   if (incremental_marking()->IsComplete()) {
5923     bool uncommit = false;
5924     if (gc_count_at_last_idle_gc_ == gc_count_) {
5925       // No GC since the last full GC, the mutator is probably not active.
5926       isolate_->compilation_cache()->Clear();
5927       uncommit = true;
5928     }
5929     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5930     mark_sweeps_since_idle_round_started_++;
5931     gc_count_at_last_idle_gc_ = gc_count_;
5932     if (uncommit) {
5933       new_space_.Shrink();
5934       UncommitFromSpace();
5935     }
5936   }
5937 }
5938
5939
5940 bool Heap::IdleNotification(int hint) {
5941   // Hints greater than this value indicate that
5942   // the embedder is requesting a lot of GC work.
5943   const int kMaxHint = 1000;
5944   const int kMinHintForIncrementalMarking = 10;
5945   // Minimal hint that allows to do full GC.
5946   const int kMinHintForFullGC = 100;
5947   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5948   // The size factor is in range [5..250]. The numbers here are chosen from
5949   // experiments. If you changes them, make sure to test with
5950   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5951   intptr_t step_size =
5952       size_factor * IncrementalMarking::kAllocatedThreshold;
5953
5954   if (contexts_disposed_ > 0) {
5955     if (hint >= kMaxHint) {
5956       // The embedder is requesting a lot of GC work after context disposal,
5957       // we age inline caches so that they don't keep objects from
5958       // the old context alive.
5959       AgeInlineCaches();
5960     }
5961     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5962     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5963         incremental_marking()->IsStopped()) {
5964       HistogramTimerScope scope(isolate_->counters()->gc_context());
5965       CollectAllGarbage(kReduceMemoryFootprintMask,
5966                         "idle notification: contexts disposed");
5967     } else {
5968       AdvanceIdleIncrementalMarking(step_size);
5969       contexts_disposed_ = 0;
5970     }
5971     // After context disposal there is likely a lot of garbage remaining, reset
5972     // the idle notification counters in order to trigger more incremental GCs
5973     // on subsequent idle notifications.
5974     StartIdleRound();
5975     return false;
5976   }
5977
5978   if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5979     return IdleGlobalGC();
5980   }
5981
5982   // By doing small chunks of GC work in each IdleNotification,
5983   // perform a round of incremental GCs and after that wait until
5984   // the mutator creates enough garbage to justify a new round.
5985   // An incremental GC progresses as follows:
5986   // 1. many incremental marking steps,
5987   // 2. one old space mark-sweep-compact,
5988   // 3. many lazy sweep steps.
5989   // Use mark-sweep-compact events to count incremental GCs in a round.
5990
5991   if (incremental_marking()->IsStopped()) {
5992     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5993         !IsSweepingComplete() &&
5994         !AdvanceSweepers(static_cast<int>(step_size))) {
5995       return false;
5996     }
5997   }
5998
5999   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6000     if (EnoughGarbageSinceLastIdleRound()) {
6001       StartIdleRound();
6002     } else {
6003       return true;
6004     }
6005   }
6006
6007   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
6008                               mark_sweeps_since_idle_round_started_;
6009
6010   if (incremental_marking()->IsStopped()) {
6011     // If there are no more than two GCs left in this idle round and we are
6012     // allowed to do a full GC, then make those GCs full in order to compact
6013     // the code space.
6014     // TODO(ulan): Once we enable code compaction for incremental marking,
6015     // we can get rid of this special case and always start incremental marking.
6016     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
6017       CollectAllGarbage(kReduceMemoryFootprintMask,
6018                         "idle notification: finalize idle round");
6019       mark_sweeps_since_idle_round_started_++;
6020     } else if (hint > kMinHintForIncrementalMarking) {
6021       incremental_marking()->Start();
6022     }
6023   }
6024   if (!incremental_marking()->IsStopped() &&
6025       hint > kMinHintForIncrementalMarking) {
6026     AdvanceIdleIncrementalMarking(step_size);
6027   }
6028
6029   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6030     FinishIdleRound();
6031     return true;
6032   }
6033
6034   return false;
6035 }
6036
6037
6038 bool Heap::IdleGlobalGC() {
6039   static const int kIdlesBeforeScavenge = 4;
6040   static const int kIdlesBeforeMarkSweep = 7;
6041   static const int kIdlesBeforeMarkCompact = 8;
6042   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
6043   static const unsigned int kGCsBetweenCleanup = 4;
6044
6045   if (!last_idle_notification_gc_count_init_) {
6046     last_idle_notification_gc_count_ = gc_count_;
6047     last_idle_notification_gc_count_init_ = true;
6048   }
6049
6050   bool uncommit = true;
6051   bool finished = false;
6052
6053   // Reset the number of idle notifications received when a number of
6054   // GCs have taken place. This allows another round of cleanup based
6055   // on idle notifications if enough work has been carried out to
6056   // provoke a number of garbage collections.
6057   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
6058     number_idle_notifications_ =
6059         Min(number_idle_notifications_ + 1, kMaxIdleCount);
6060   } else {
6061     number_idle_notifications_ = 0;
6062     last_idle_notification_gc_count_ = gc_count_;
6063   }
6064
6065   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
6066     CollectGarbage(NEW_SPACE, "idle notification");
6067     new_space_.Shrink();
6068     last_idle_notification_gc_count_ = gc_count_;
6069   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
6070     // Before doing the mark-sweep collections we clear the
6071     // compilation cache to avoid hanging on to source code and
6072     // generated code for cached functions.
6073     isolate_->compilation_cache()->Clear();
6074
6075     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6076     new_space_.Shrink();
6077     last_idle_notification_gc_count_ = gc_count_;
6078
6079   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
6080     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6081     new_space_.Shrink();
6082     last_idle_notification_gc_count_ = gc_count_;
6083     number_idle_notifications_ = 0;
6084     finished = true;
6085   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
6086     // If we have received more than kIdlesBeforeMarkCompact idle
6087     // notifications we do not perform any cleanup because we don't
6088     // expect to gain much by doing so.
6089     finished = true;
6090   }
6091
6092   if (uncommit) UncommitFromSpace();
6093
6094   return finished;
6095 }
6096
6097
6098 #ifdef DEBUG
6099
6100 void Heap::Print() {
6101   if (!HasBeenSetUp()) return;
6102   isolate()->PrintStack(stdout);
6103   AllSpaces spaces(this);
6104   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6105     space->Print();
6106   }
6107 }
6108
6109
6110 void Heap::ReportCodeStatistics(const char* title) {
6111   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6112   PagedSpace::ResetCodeStatistics();
6113   // We do not look for code in new space, map space, or old space.  If code
6114   // somehow ends up in those spaces, we would miss it here.
6115   code_space_->CollectCodeStatistics();
6116   lo_space_->CollectCodeStatistics();
6117   PagedSpace::ReportCodeStatistics();
6118 }
6119
6120
6121 // This function expects that NewSpace's allocated objects histogram is
6122 // populated (via a call to CollectStatistics or else as a side effect of a
6123 // just-completed scavenge collection).
6124 void Heap::ReportHeapStatistics(const char* title) {
6125   USE(title);
6126   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6127          title, gc_count_);
6128   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6129          old_generation_allocation_limit_);
6130
6131   PrintF("\n");
6132   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6133   isolate_->global_handles()->PrintStats();
6134   PrintF("\n");
6135
6136   PrintF("Heap statistics : ");
6137   isolate_->memory_allocator()->ReportStatistics();
6138   PrintF("To space : ");
6139   new_space_.ReportStatistics();
6140   PrintF("Old pointer space : ");
6141   old_pointer_space_->ReportStatistics();
6142   PrintF("Old data space : ");
6143   old_data_space_->ReportStatistics();
6144   PrintF("Code space : ");
6145   code_space_->ReportStatistics();
6146   PrintF("Map space : ");
6147   map_space_->ReportStatistics();
6148   PrintF("Cell space : ");
6149   cell_space_->ReportStatistics();
6150   PrintF("PropertyCell space : ");
6151   property_cell_space_->ReportStatistics();
6152   PrintF("Large object space : ");
6153   lo_space_->ReportStatistics();
6154   PrintF(">>>>>> ========================================= >>>>>>\n");
6155 }
6156
6157 #endif  // DEBUG
6158
6159 bool Heap::Contains(HeapObject* value) {
6160   return Contains(value->address());
6161 }
6162
6163
6164 bool Heap::Contains(Address addr) {
6165   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6166   return HasBeenSetUp() &&
6167     (new_space_.ToSpaceContains(addr) ||
6168      old_pointer_space_->Contains(addr) ||
6169      old_data_space_->Contains(addr) ||
6170      code_space_->Contains(addr) ||
6171      map_space_->Contains(addr) ||
6172      cell_space_->Contains(addr) ||
6173      property_cell_space_->Contains(addr) ||
6174      lo_space_->SlowContains(addr));
6175 }
6176
6177
6178 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6179   return InSpace(value->address(), space);
6180 }
6181
6182
6183 bool Heap::InSpace(Address addr, AllocationSpace space) {
6184   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6185   if (!HasBeenSetUp()) return false;
6186
6187   switch (space) {
6188     case NEW_SPACE:
6189       return new_space_.ToSpaceContains(addr);
6190     case OLD_POINTER_SPACE:
6191       return old_pointer_space_->Contains(addr);
6192     case OLD_DATA_SPACE:
6193       return old_data_space_->Contains(addr);
6194     case CODE_SPACE:
6195       return code_space_->Contains(addr);
6196     case MAP_SPACE:
6197       return map_space_->Contains(addr);
6198     case CELL_SPACE:
6199       return cell_space_->Contains(addr);
6200     case PROPERTY_CELL_SPACE:
6201       return property_cell_space_->Contains(addr);
6202     case LO_SPACE:
6203       return lo_space_->SlowContains(addr);
6204   }
6205
6206   return false;
6207 }
6208
6209
6210 #ifdef VERIFY_HEAP
6211 void Heap::Verify() {
6212   CHECK(HasBeenSetUp());
6213
6214   store_buffer()->Verify();
6215
6216   VerifyPointersVisitor visitor;
6217   IterateRoots(&visitor, VISIT_ONLY_STRONG);
6218
6219   new_space_.Verify();
6220
6221   old_pointer_space_->Verify(&visitor);
6222   map_space_->Verify(&visitor);
6223
6224   VerifyPointersVisitor no_dirty_regions_visitor;
6225   old_data_space_->Verify(&no_dirty_regions_visitor);
6226   code_space_->Verify(&no_dirty_regions_visitor);
6227   cell_space_->Verify(&no_dirty_regions_visitor);
6228   property_cell_space_->Verify(&no_dirty_regions_visitor);
6229
6230   lo_space_->Verify();
6231 }
6232 #endif
6233
6234
6235 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6236   Object* result = NULL;
6237   Object* new_table;
6238   { MaybeObject* maybe_new_table =
6239         string_table()->LookupUtf8String(string, &result);
6240     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6241   }
6242   // Can't use set_string_table because StringTable::cast knows that
6243   // StringTable is a singleton and checks for identity.
6244   roots_[kStringTableRootIndex] = new_table;
6245   ASSERT(result != NULL);
6246   return result;
6247 }
6248
6249
6250 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6251   Object* result = NULL;
6252   Object* new_table;
6253   { MaybeObject* maybe_new_table =
6254         string_table()->LookupOneByteString(string, &result);
6255     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6256   }
6257   // Can't use set_string_table because StringTable::cast knows that
6258   // StringTable is a singleton and checks for identity.
6259   roots_[kStringTableRootIndex] = new_table;
6260   ASSERT(result != NULL);
6261   return result;
6262 }
6263
6264
6265 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6266                                      int from,
6267                                      int length) {
6268   Object* result = NULL;
6269   Object* new_table;
6270   { MaybeObject* maybe_new_table =
6271         string_table()->LookupSubStringOneByteString(string,
6272                                                    from,
6273                                                    length,
6274                                                    &result);
6275     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6276   }
6277   // Can't use set_string_table because StringTable::cast knows that
6278   // StringTable is a singleton and checks for identity.
6279   roots_[kStringTableRootIndex] = new_table;
6280   ASSERT(result != NULL);
6281   return result;
6282 }
6283
6284
6285 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6286   Object* result = NULL;
6287   Object* new_table;
6288   { MaybeObject* maybe_new_table =
6289         string_table()->LookupTwoByteString(string, &result);
6290     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6291   }
6292   // Can't use set_string_table because StringTable::cast knows that
6293   // StringTable is a singleton and checks for identity.
6294   roots_[kStringTableRootIndex] = new_table;
6295   ASSERT(result != NULL);
6296   return result;
6297 }
6298
6299
6300 MaybeObject* Heap::InternalizeString(String* string) {
6301   if (string->IsInternalizedString()) return string;
6302   Object* result = NULL;
6303   Object* new_table;
6304   { MaybeObject* maybe_new_table =
6305         string_table()->LookupString(string, &result);
6306     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6307   }
6308   // Can't use set_string_table because StringTable::cast knows that
6309   // StringTable is a singleton and checks for identity.
6310   roots_[kStringTableRootIndex] = new_table;
6311   ASSERT(result != NULL);
6312   return result;
6313 }
6314
6315
6316 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6317   if (string->IsInternalizedString()) {
6318     *result = string;
6319     return true;
6320   }
6321   return string_table()->LookupStringIfExists(string, result);
6322 }
6323
6324
6325 void Heap::ZapFromSpace() {
6326   NewSpacePageIterator it(new_space_.FromSpaceStart(),
6327                           new_space_.FromSpaceEnd());
6328   while (it.has_next()) {
6329     NewSpacePage* page = it.next();
6330     for (Address cursor = page->area_start(), limit = page->area_end();
6331          cursor < limit;
6332          cursor += kPointerSize) {
6333       Memory::Address_at(cursor) = kFromSpaceZapValue;
6334     }
6335   }
6336 }
6337
6338
6339 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6340                                              Address end,
6341                                              ObjectSlotCallback callback) {
6342   Address slot_address = start;
6343
6344   // We are not collecting slots on new space objects during mutation
6345   // thus we have to scan for pointers to evacuation candidates when we
6346   // promote objects. But we should not record any slots in non-black
6347   // objects. Grey object's slots would be rescanned.
6348   // White object might not survive until the end of collection
6349   // it would be a violation of the invariant to record it's slots.
6350   bool record_slots = false;
6351   if (incremental_marking()->IsCompacting()) {
6352     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6353     record_slots = Marking::IsBlack(mark_bit);
6354   }
6355
6356   while (slot_address < end) {
6357     Object** slot = reinterpret_cast<Object**>(slot_address);
6358     Object* object = *slot;
6359     // If the store buffer becomes overfull we mark pages as being exempt from
6360     // the store buffer.  These pages are scanned to find pointers that point
6361     // to the new space.  In that case we may hit newly promoted objects and
6362     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6363     if (object->IsHeapObject()) {
6364       if (Heap::InFromSpace(object)) {
6365         callback(reinterpret_cast<HeapObject**>(slot),
6366                  HeapObject::cast(object));
6367         Object* new_object = *slot;
6368         if (InNewSpace(new_object)) {
6369           SLOW_ASSERT(Heap::InToSpace(new_object));
6370           SLOW_ASSERT(new_object->IsHeapObject());
6371           store_buffer_.EnterDirectlyIntoStoreBuffer(
6372               reinterpret_cast<Address>(slot));
6373         }
6374         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6375       } else if (record_slots &&
6376                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6377         mark_compact_collector()->RecordSlot(slot, slot, object);
6378       }
6379     }
6380     slot_address += kPointerSize;
6381   }
6382 }
6383
6384
6385 #ifdef DEBUG
6386 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6387
6388
6389 bool IsAMapPointerAddress(Object** addr) {
6390   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6391   int mod = a % Map::kSize;
6392   return mod >= Map::kPointerFieldsBeginOffset &&
6393          mod < Map::kPointerFieldsEndOffset;
6394 }
6395
6396
6397 bool EverythingsAPointer(Object** addr) {
6398   return true;
6399 }
6400
6401
6402 static void CheckStoreBuffer(Heap* heap,
6403                              Object** current,
6404                              Object** limit,
6405                              Object**** store_buffer_position,
6406                              Object*** store_buffer_top,
6407                              CheckStoreBufferFilter filter,
6408                              Address special_garbage_start,
6409                              Address special_garbage_end) {
6410   Map* free_space_map = heap->free_space_map();
6411   for ( ; current < limit; current++) {
6412     Object* o = *current;
6413     Address current_address = reinterpret_cast<Address>(current);
6414     // Skip free space.
6415     if (o == free_space_map) {
6416       Address current_address = reinterpret_cast<Address>(current);
6417       FreeSpace* free_space =
6418           FreeSpace::cast(HeapObject::FromAddress(current_address));
6419       int skip = free_space->Size();
6420       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6421       ASSERT(skip > 0);
6422       current_address += skip - kPointerSize;
6423       current = reinterpret_cast<Object**>(current_address);
6424       continue;
6425     }
6426     // Skip the current linear allocation space between top and limit which is
6427     // unmarked with the free space map, but can contain junk.
6428     if (current_address == special_garbage_start &&
6429         special_garbage_end != special_garbage_start) {
6430       current_address = special_garbage_end - kPointerSize;
6431       current = reinterpret_cast<Object**>(current_address);
6432       continue;
6433     }
6434     if (!(*filter)(current)) continue;
6435     ASSERT(current_address < special_garbage_start ||
6436            current_address >= special_garbage_end);
6437     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6438     // We have to check that the pointer does not point into new space
6439     // without trying to cast it to a heap object since the hash field of
6440     // a string can contain values like 1 and 3 which are tagged null
6441     // pointers.
6442     if (!heap->InNewSpace(o)) continue;
6443     while (**store_buffer_position < current &&
6444            *store_buffer_position < store_buffer_top) {
6445       (*store_buffer_position)++;
6446     }
6447     if (**store_buffer_position != current ||
6448         *store_buffer_position == store_buffer_top) {
6449       Object** obj_start = current;
6450       while (!(*obj_start)->IsMap()) obj_start--;
6451       UNREACHABLE();
6452     }
6453   }
6454 }
6455
6456
6457 // Check that the store buffer contains all intergenerational pointers by
6458 // scanning a page and ensuring that all pointers to young space are in the
6459 // store buffer.
6460 void Heap::OldPointerSpaceCheckStoreBuffer() {
6461   OldSpace* space = old_pointer_space();
6462   PageIterator pages(space);
6463
6464   store_buffer()->SortUniq();
6465
6466   while (pages.has_next()) {
6467     Page* page = pages.next();
6468     Object** current = reinterpret_cast<Object**>(page->area_start());
6469
6470     Address end = page->area_end();
6471
6472     Object*** store_buffer_position = store_buffer()->Start();
6473     Object*** store_buffer_top = store_buffer()->Top();
6474
6475     Object** limit = reinterpret_cast<Object**>(end);
6476     CheckStoreBuffer(this,
6477                      current,
6478                      limit,
6479                      &store_buffer_position,
6480                      store_buffer_top,
6481                      &EverythingsAPointer,
6482                      space->top(),
6483                      space->limit());
6484   }
6485 }
6486
6487
6488 void Heap::MapSpaceCheckStoreBuffer() {
6489   MapSpace* space = map_space();
6490   PageIterator pages(space);
6491
6492   store_buffer()->SortUniq();
6493
6494   while (pages.has_next()) {
6495     Page* page = pages.next();
6496     Object** current = reinterpret_cast<Object**>(page->area_start());
6497
6498     Address end = page->area_end();
6499
6500     Object*** store_buffer_position = store_buffer()->Start();
6501     Object*** store_buffer_top = store_buffer()->Top();
6502
6503     Object** limit = reinterpret_cast<Object**>(end);
6504     CheckStoreBuffer(this,
6505                      current,
6506                      limit,
6507                      &store_buffer_position,
6508                      store_buffer_top,
6509                      &IsAMapPointerAddress,
6510                      space->top(),
6511                      space->limit());
6512   }
6513 }
6514
6515
6516 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6517   LargeObjectIterator it(lo_space());
6518   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6519     // We only have code, sequential strings, or fixed arrays in large
6520     // object space, and only fixed arrays can possibly contain pointers to
6521     // the young generation.
6522     if (object->IsFixedArray()) {
6523       Object*** store_buffer_position = store_buffer()->Start();
6524       Object*** store_buffer_top = store_buffer()->Top();
6525       Object** current = reinterpret_cast<Object**>(object->address());
6526       Object** limit =
6527           reinterpret_cast<Object**>(object->address() + object->Size());
6528       CheckStoreBuffer(this,
6529                        current,
6530                        limit,
6531                        &store_buffer_position,
6532                        store_buffer_top,
6533                        &EverythingsAPointer,
6534                        NULL,
6535                        NULL);
6536     }
6537   }
6538 }
6539 #endif
6540
6541
6542 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6543   IterateStrongRoots(v, mode);
6544   IterateWeakRoots(v, mode);
6545 }
6546
6547
6548 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6549   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6550   v->Synchronize(VisitorSynchronization::kStringTable);
6551   if (mode != VISIT_ALL_IN_SCAVENGE &&
6552       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6553     // Scavenge collections have special processing for this.
6554     external_string_table_.Iterate(v);
6555   }
6556   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6557 }
6558
6559
6560 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6561   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6562   v->Synchronize(VisitorSynchronization::kStrongRootList);
6563
6564   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6565   v->Synchronize(VisitorSynchronization::kInternalizedString);
6566
6567   isolate_->bootstrapper()->Iterate(v);
6568   v->Synchronize(VisitorSynchronization::kBootstrapper);
6569   isolate_->Iterate(v);
6570   v->Synchronize(VisitorSynchronization::kTop);
6571   Relocatable::Iterate(v);
6572   v->Synchronize(VisitorSynchronization::kRelocatable);
6573
6574 #ifdef ENABLE_DEBUGGER_SUPPORT
6575   isolate_->debug()->Iterate(v);
6576   if (isolate_->deoptimizer_data() != NULL) {
6577     isolate_->deoptimizer_data()->Iterate(v);
6578   }
6579 #endif
6580   v->Synchronize(VisitorSynchronization::kDebug);
6581   isolate_->compilation_cache()->Iterate(v);
6582   v->Synchronize(VisitorSynchronization::kCompilationCache);
6583
6584   // Iterate over local handles in handle scopes.
6585   isolate_->handle_scope_implementer()->Iterate(v);
6586   isolate_->IterateDeferredHandles(v);
6587   v->Synchronize(VisitorSynchronization::kHandleScope);
6588
6589   // Iterate over the builtin code objects and code stubs in the
6590   // heap. Note that it is not necessary to iterate over code objects
6591   // on scavenge collections.
6592   if (mode != VISIT_ALL_IN_SCAVENGE) {
6593     isolate_->builtins()->IterateBuiltins(v);
6594   }
6595   v->Synchronize(VisitorSynchronization::kBuiltins);
6596
6597   // Iterate over global handles.
6598   switch (mode) {
6599     case VISIT_ONLY_STRONG:
6600       isolate_->global_handles()->IterateStrongRoots(v);
6601       break;
6602     case VISIT_ALL_IN_SCAVENGE:
6603       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6604       break;
6605     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6606     case VISIT_ALL:
6607       isolate_->global_handles()->IterateAllRoots(v);
6608       break;
6609   }
6610   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6611
6612   // Iterate over eternal handles.
6613   if (mode == VISIT_ALL_IN_SCAVENGE) {
6614     isolate_->eternal_handles()->IterateNewSpaceRoots(v);
6615   } else {
6616     isolate_->eternal_handles()->IterateAllRoots(v);
6617   }
6618   v->Synchronize(VisitorSynchronization::kEternalHandles);
6619
6620   // Iterate over pointers being held by inactive threads.
6621   isolate_->thread_manager()->Iterate(v);
6622   v->Synchronize(VisitorSynchronization::kThreadManager);
6623
6624   // Iterate over the pointers the Serialization/Deserialization code is
6625   // holding.
6626   // During garbage collection this keeps the partial snapshot cache alive.
6627   // During deserialization of the startup snapshot this creates the partial
6628   // snapshot cache and deserializes the objects it refers to.  During
6629   // serialization this does nothing, since the partial snapshot cache is
6630   // empty.  However the next thing we do is create the partial snapshot,
6631   // filling up the partial snapshot cache with objects it needs as we go.
6632   SerializerDeserializer::Iterate(v);
6633   // We don't do a v->Synchronize call here, because in debug mode that will
6634   // output a flag to the snapshot.  However at this point the serializer and
6635   // deserializer are deliberately a little unsynchronized (see above) so the
6636   // checking of the sync flag in the snapshot would fail.
6637 }
6638
6639
6640 // TODO(1236194): Since the heap size is configurable on the command line
6641 // and through the API, we should gracefully handle the case that the heap
6642 // size is not big enough to fit all the initial objects.
6643 bool Heap::ConfigureHeap(int max_semispace_size,
6644                          intptr_t max_old_gen_size,
6645                          intptr_t max_executable_size) {
6646   if (HasBeenSetUp()) return false;
6647
6648   if (FLAG_stress_compaction) {
6649     // This will cause more frequent GCs when stressing.
6650     max_semispace_size_ = Page::kPageSize;
6651   }
6652
6653   if (max_semispace_size > 0) {
6654     if (max_semispace_size < Page::kPageSize) {
6655       max_semispace_size = Page::kPageSize;
6656       if (FLAG_trace_gc) {
6657         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6658                  Page::kPageSize >> 10);
6659       }
6660     }
6661     max_semispace_size_ = max_semispace_size;
6662   }
6663
6664   if (Snapshot::IsEnabled()) {
6665     // If we are using a snapshot we always reserve the default amount
6666     // of memory for each semispace because code in the snapshot has
6667     // write-barrier code that relies on the size and alignment of new
6668     // space.  We therefore cannot use a larger max semispace size
6669     // than the default reserved semispace size.
6670     if (max_semispace_size_ > reserved_semispace_size_) {
6671       max_semispace_size_ = reserved_semispace_size_;
6672       if (FLAG_trace_gc) {
6673         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6674                  reserved_semispace_size_ >> 10);
6675       }
6676     }
6677   } else {
6678     // If we are not using snapshots we reserve space for the actual
6679     // max semispace size.
6680     reserved_semispace_size_ = max_semispace_size_;
6681   }
6682
6683   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6684   if (max_executable_size > 0) {
6685     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6686   }
6687
6688   // The max executable size must be less than or equal to the max old
6689   // generation size.
6690   if (max_executable_size_ > max_old_generation_size_) {
6691     max_executable_size_ = max_old_generation_size_;
6692   }
6693
6694   // The new space size must be a power of two to support single-bit testing
6695   // for containment.
6696   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6697   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6698   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6699
6700   // The external allocation limit should be below 256 MB on all architectures
6701   // to avoid unnecessary low memory notifications, as that is the threshold
6702   // for some embedders.
6703   external_allocation_limit_ = 12 * max_semispace_size_;
6704   ASSERT(external_allocation_limit_ <= 256 * MB);
6705
6706   // The old generation is paged and needs at least one page for each space.
6707   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6708   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6709                                                        Page::kPageSize),
6710                                  RoundUp(max_old_generation_size_,
6711                                          Page::kPageSize));
6712
6713   configured_ = true;
6714   return true;
6715 }
6716
6717
6718 bool Heap::ConfigureHeapDefault() {
6719   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6720                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6721                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6722 }
6723
6724
6725 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6726   *stats->start_marker = HeapStats::kStartMarker;
6727   *stats->end_marker = HeapStats::kEndMarker;
6728   *stats->new_space_size = new_space_.SizeAsInt();
6729   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6730   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6731   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6732   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6733   *stats->old_data_space_capacity = old_data_space_->Capacity();
6734   *stats->code_space_size = code_space_->SizeOfObjects();
6735   *stats->code_space_capacity = code_space_->Capacity();
6736   *stats->map_space_size = map_space_->SizeOfObjects();
6737   *stats->map_space_capacity = map_space_->Capacity();
6738   *stats->cell_space_size = cell_space_->SizeOfObjects();
6739   *stats->cell_space_capacity = cell_space_->Capacity();
6740   *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6741   *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6742   *stats->lo_space_size = lo_space_->Size();
6743   isolate_->global_handles()->RecordStats(stats);
6744   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6745   *stats->memory_allocator_capacity =
6746       isolate()->memory_allocator()->Size() +
6747       isolate()->memory_allocator()->Available();
6748   *stats->os_error = OS::GetLastError();
6749       isolate()->memory_allocator()->Available();
6750   if (take_snapshot) {
6751     HeapIterator iterator(this);
6752     for (HeapObject* obj = iterator.next();
6753          obj != NULL;
6754          obj = iterator.next()) {
6755       InstanceType type = obj->map()->instance_type();
6756       ASSERT(0 <= type && type <= LAST_TYPE);
6757       stats->objects_per_type[type]++;
6758       stats->size_per_type[type] += obj->Size();
6759     }
6760   }
6761 }
6762
6763
6764 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6765   return old_pointer_space_->SizeOfObjects()
6766       + old_data_space_->SizeOfObjects()
6767       + code_space_->SizeOfObjects()
6768       + map_space_->SizeOfObjects()
6769       + cell_space_->SizeOfObjects()
6770       + property_cell_space_->SizeOfObjects()
6771       + lo_space_->SizeOfObjects();
6772 }
6773
6774
6775 intptr_t Heap::PromotedExternalMemorySize() {
6776   if (amount_of_external_allocated_memory_
6777       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6778   return amount_of_external_allocated_memory_
6779       - amount_of_external_allocated_memory_at_last_global_gc_;
6780 }
6781
6782
6783 V8_DECLARE_ONCE(initialize_gc_once);
6784
6785 static void InitializeGCOnce() {
6786   InitializeScavengingVisitorsTables();
6787   NewSpaceScavenger::Initialize();
6788   MarkCompactCollector::Initialize();
6789 }
6790
6791
6792 bool Heap::SetUp() {
6793 #ifdef DEBUG
6794   allocation_timeout_ = FLAG_gc_interval;
6795 #endif
6796
6797   // Initialize heap spaces and initial maps and objects. Whenever something
6798   // goes wrong, just return false. The caller should check the results and
6799   // call Heap::TearDown() to release allocated memory.
6800   //
6801   // If the heap is not yet configured (e.g. through the API), configure it.
6802   // Configuration is based on the flags new-space-size (really the semispace
6803   // size) and old-space-size if set or the initial values of semispace_size_
6804   // and old_generation_size_ otherwise.
6805   if (!configured_) {
6806     if (!ConfigureHeapDefault()) return false;
6807   }
6808
6809   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6810
6811   MarkMapPointersAsEncoded(false);
6812
6813   // Set up memory allocator.
6814   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6815       return false;
6816
6817   // Set up new space.
6818   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6819     return false;
6820   }
6821
6822   // Initialize old pointer space.
6823   old_pointer_space_ =
6824       new OldSpace(this,
6825                    max_old_generation_size_,
6826                    OLD_POINTER_SPACE,
6827                    NOT_EXECUTABLE);
6828   if (old_pointer_space_ == NULL) return false;
6829   if (!old_pointer_space_->SetUp()) return false;
6830
6831   // Initialize old data space.
6832   old_data_space_ =
6833       new OldSpace(this,
6834                    max_old_generation_size_,
6835                    OLD_DATA_SPACE,
6836                    NOT_EXECUTABLE);
6837   if (old_data_space_ == NULL) return false;
6838   if (!old_data_space_->SetUp()) return false;
6839
6840   // Initialize the code space, set its maximum capacity to the old
6841   // generation size. It needs executable memory.
6842   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6843   // virtual address space, so that they can call each other with near calls.
6844   if (code_range_size_ > 0) {
6845     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6846       return false;
6847     }
6848   }
6849
6850   code_space_ =
6851       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6852   if (code_space_ == NULL) return false;
6853   if (!code_space_->SetUp()) return false;
6854
6855   // Initialize map space.
6856   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6857   if (map_space_ == NULL) return false;
6858   if (!map_space_->SetUp()) return false;
6859
6860   // Initialize simple cell space.
6861   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6862   if (cell_space_ == NULL) return false;
6863   if (!cell_space_->SetUp()) return false;
6864
6865   // Initialize global property cell space.
6866   property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6867                                                PROPERTY_CELL_SPACE);
6868   if (property_cell_space_ == NULL) return false;
6869   if (!property_cell_space_->SetUp()) return false;
6870
6871   // The large object code space may contain code or data.  We set the memory
6872   // to be non-executable here for safety, but this means we need to enable it
6873   // explicitly when allocating large code objects.
6874   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6875   if (lo_space_ == NULL) return false;
6876   if (!lo_space_->SetUp()) return false;
6877
6878   // Set up the seed that is used to randomize the string hash function.
6879   ASSERT(hash_seed() == 0);
6880   if (FLAG_randomize_hashes) {
6881     if (FLAG_hash_seed == 0) {
6882       set_hash_seed(
6883           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6884     } else {
6885       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6886     }
6887   }
6888
6889   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6890   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6891
6892   store_buffer()->SetUp();
6893
6894   if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6895 #ifdef DEBUG
6896   relocation_mutex_locked_by_optimizer_thread_ = false;
6897 #endif  // DEBUG
6898
6899   return true;
6900 }
6901
6902
6903 bool Heap::CreateHeapObjects() {
6904   // Create initial maps.
6905   if (!CreateInitialMaps()) return false;
6906   if (!CreateApiObjects()) return false;
6907
6908   // Create initial objects
6909   if (!CreateInitialObjects()) return false;
6910
6911   native_contexts_list_ = undefined_value();
6912   array_buffers_list_ = undefined_value();
6913   allocation_sites_list_ = undefined_value();
6914   return true;
6915 }
6916
6917
6918 void Heap::SetStackLimits() {
6919   ASSERT(isolate_ != NULL);
6920   ASSERT(isolate_ == isolate());
6921   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6922   // something that looks like an out of range Smi to the GC.
6923
6924   // Set up the special root array entries containing the stack limits.
6925   // These are actually addresses, but the tag makes the GC ignore it.
6926   roots_[kStackLimitRootIndex] =
6927       reinterpret_cast<Object*>(
6928           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6929   roots_[kRealStackLimitRootIndex] =
6930       reinterpret_cast<Object*>(
6931           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6932 }
6933
6934
6935 void Heap::TearDown() {
6936 #ifdef VERIFY_HEAP
6937   if (FLAG_verify_heap) {
6938     Verify();
6939   }
6940 #endif
6941
6942   if (FLAG_print_cumulative_gc_stat) {
6943     PrintF("\n");
6944     PrintF("gc_count=%d ", gc_count_);
6945     PrintF("mark_sweep_count=%d ", ms_count_);
6946     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6947     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6948     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6949     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6950            get_max_alive_after_gc());
6951     PrintF("total_marking_time=%.1f ", marking_time());
6952     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6953     PrintF("\n\n");
6954   }
6955
6956   TearDownArrayBuffers();
6957
6958   isolate_->global_handles()->TearDown();
6959
6960   external_string_table_.TearDown();
6961
6962   mark_compact_collector()->TearDown();
6963
6964   new_space_.TearDown();
6965
6966   if (old_pointer_space_ != NULL) {
6967     old_pointer_space_->TearDown();
6968     delete old_pointer_space_;
6969     old_pointer_space_ = NULL;
6970   }
6971
6972   if (old_data_space_ != NULL) {
6973     old_data_space_->TearDown();
6974     delete old_data_space_;
6975     old_data_space_ = NULL;
6976   }
6977
6978   if (code_space_ != NULL) {
6979     code_space_->TearDown();
6980     delete code_space_;
6981     code_space_ = NULL;
6982   }
6983
6984   if (map_space_ != NULL) {
6985     map_space_->TearDown();
6986     delete map_space_;
6987     map_space_ = NULL;
6988   }
6989
6990   if (cell_space_ != NULL) {
6991     cell_space_->TearDown();
6992     delete cell_space_;
6993     cell_space_ = NULL;
6994   }
6995
6996   if (property_cell_space_ != NULL) {
6997     property_cell_space_->TearDown();
6998     delete property_cell_space_;
6999     property_cell_space_ = NULL;
7000   }
7001
7002   if (lo_space_ != NULL) {
7003     lo_space_->TearDown();
7004     delete lo_space_;
7005     lo_space_ = NULL;
7006   }
7007
7008   store_buffer()->TearDown();
7009   incremental_marking()->TearDown();
7010
7011   isolate_->memory_allocator()->TearDown();
7012
7013   delete relocation_mutex_;
7014 }
7015
7016
7017 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
7018   ASSERT(callback != NULL);
7019   GCPrologueCallbackPair pair(callback, gc_type);
7020   ASSERT(!gc_prologue_callbacks_.Contains(pair));
7021   return gc_prologue_callbacks_.Add(pair);
7022 }
7023
7024
7025 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
7026   ASSERT(callback != NULL);
7027   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
7028     if (gc_prologue_callbacks_[i].callback == callback) {
7029       gc_prologue_callbacks_.Remove(i);
7030       return;
7031     }
7032   }
7033   UNREACHABLE();
7034 }
7035
7036
7037 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
7038   ASSERT(callback != NULL);
7039   GCEpilogueCallbackPair pair(callback, gc_type);
7040   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
7041   return gc_epilogue_callbacks_.Add(pair);
7042 }
7043
7044
7045 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
7046   ASSERT(callback != NULL);
7047   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
7048     if (gc_epilogue_callbacks_[i].callback == callback) {
7049       gc_epilogue_callbacks_.Remove(i);
7050       return;
7051     }
7052   }
7053   UNREACHABLE();
7054 }
7055
7056
7057 #ifdef DEBUG
7058
7059 class PrintHandleVisitor: public ObjectVisitor {
7060  public:
7061   void VisitPointers(Object** start, Object** end) {
7062     for (Object** p = start; p < end; p++)
7063       PrintF("  handle %p to %p\n",
7064              reinterpret_cast<void*>(p),
7065              reinterpret_cast<void*>(*p));
7066   }
7067 };
7068
7069
7070 void Heap::PrintHandles() {
7071   PrintF("Handles:\n");
7072   PrintHandleVisitor v;
7073   isolate_->handle_scope_implementer()->Iterate(&v);
7074 }
7075
7076 #endif
7077
7078
7079 Space* AllSpaces::next() {
7080   switch (counter_++) {
7081     case NEW_SPACE:
7082       return heap_->new_space();
7083     case OLD_POINTER_SPACE:
7084       return heap_->old_pointer_space();
7085     case OLD_DATA_SPACE:
7086       return heap_->old_data_space();
7087     case CODE_SPACE:
7088       return heap_->code_space();
7089     case MAP_SPACE:
7090       return heap_->map_space();
7091     case CELL_SPACE:
7092       return heap_->cell_space();
7093     case PROPERTY_CELL_SPACE:
7094       return heap_->property_cell_space();
7095     case LO_SPACE:
7096       return heap_->lo_space();
7097     default:
7098       return NULL;
7099   }
7100 }
7101
7102
7103 PagedSpace* PagedSpaces::next() {
7104   switch (counter_++) {
7105     case OLD_POINTER_SPACE:
7106       return heap_->old_pointer_space();
7107     case OLD_DATA_SPACE:
7108       return heap_->old_data_space();
7109     case CODE_SPACE:
7110       return heap_->code_space();
7111     case MAP_SPACE:
7112       return heap_->map_space();
7113     case CELL_SPACE:
7114       return heap_->cell_space();
7115     case PROPERTY_CELL_SPACE:
7116       return heap_->property_cell_space();
7117     default:
7118       return NULL;
7119   }
7120 }
7121
7122
7123
7124 OldSpace* OldSpaces::next() {
7125   switch (counter_++) {
7126     case OLD_POINTER_SPACE:
7127       return heap_->old_pointer_space();
7128     case OLD_DATA_SPACE:
7129       return heap_->old_data_space();
7130     case CODE_SPACE:
7131       return heap_->code_space();
7132     default:
7133       return NULL;
7134   }
7135 }
7136
7137
7138 SpaceIterator::SpaceIterator(Heap* heap)
7139     : heap_(heap),
7140       current_space_(FIRST_SPACE),
7141       iterator_(NULL),
7142       size_func_(NULL) {
7143 }
7144
7145
7146 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7147     : heap_(heap),
7148       current_space_(FIRST_SPACE),
7149       iterator_(NULL),
7150       size_func_(size_func) {
7151 }
7152
7153
7154 SpaceIterator::~SpaceIterator() {
7155   // Delete active iterator if any.
7156   delete iterator_;
7157 }
7158
7159
7160 bool SpaceIterator::has_next() {
7161   // Iterate until no more spaces.
7162   return current_space_ != LAST_SPACE;
7163 }
7164
7165
7166 ObjectIterator* SpaceIterator::next() {
7167   if (iterator_ != NULL) {
7168     delete iterator_;
7169     iterator_ = NULL;
7170     // Move to the next space
7171     current_space_++;
7172     if (current_space_ > LAST_SPACE) {
7173       return NULL;
7174     }
7175   }
7176
7177   // Return iterator for the new current space.
7178   return CreateIterator();
7179 }
7180
7181
7182 // Create an iterator for the space to iterate.
7183 ObjectIterator* SpaceIterator::CreateIterator() {
7184   ASSERT(iterator_ == NULL);
7185
7186   switch (current_space_) {
7187     case NEW_SPACE:
7188       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7189       break;
7190     case OLD_POINTER_SPACE:
7191       iterator_ =
7192           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7193       break;
7194     case OLD_DATA_SPACE:
7195       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7196       break;
7197     case CODE_SPACE:
7198       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7199       break;
7200     case MAP_SPACE:
7201       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7202       break;
7203     case CELL_SPACE:
7204       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7205       break;
7206     case PROPERTY_CELL_SPACE:
7207       iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7208                                          size_func_);
7209       break;
7210     case LO_SPACE:
7211       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7212       break;
7213   }
7214
7215   // Return the newly allocated iterator;
7216   ASSERT(iterator_ != NULL);
7217   return iterator_;
7218 }
7219
7220
7221 class HeapObjectsFilter {
7222  public:
7223   virtual ~HeapObjectsFilter() {}
7224   virtual bool SkipObject(HeapObject* object) = 0;
7225 };
7226
7227
7228 class UnreachableObjectsFilter : public HeapObjectsFilter {
7229  public:
7230   UnreachableObjectsFilter() {
7231     MarkReachableObjects();
7232   }
7233
7234   ~UnreachableObjectsFilter() {
7235     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7236   }
7237
7238   bool SkipObject(HeapObject* object) {
7239     MarkBit mark_bit = Marking::MarkBitFrom(object);
7240     return !mark_bit.Get();
7241   }
7242
7243  private:
7244   class MarkingVisitor : public ObjectVisitor {
7245    public:
7246     MarkingVisitor() : marking_stack_(10) {}
7247
7248     void VisitPointers(Object** start, Object** end) {
7249       for (Object** p = start; p < end; p++) {
7250         if (!(*p)->IsHeapObject()) continue;
7251         HeapObject* obj = HeapObject::cast(*p);
7252         MarkBit mark_bit = Marking::MarkBitFrom(obj);
7253         if (!mark_bit.Get()) {
7254           mark_bit.Set();
7255           marking_stack_.Add(obj);
7256         }
7257       }
7258     }
7259
7260     void TransitiveClosure() {
7261       while (!marking_stack_.is_empty()) {
7262         HeapObject* obj = marking_stack_.RemoveLast();
7263         obj->Iterate(this);
7264       }
7265     }
7266
7267    private:
7268     List<HeapObject*> marking_stack_;
7269   };
7270
7271   void MarkReachableObjects() {
7272     Heap* heap = Isolate::Current()->heap();
7273     MarkingVisitor visitor;
7274     heap->IterateRoots(&visitor, VISIT_ALL);
7275     visitor.TransitiveClosure();
7276   }
7277
7278   DisallowHeapAllocation no_allocation_;
7279 };
7280
7281
7282 HeapIterator::HeapIterator(Heap* heap)
7283     : heap_(heap),
7284       filtering_(HeapIterator::kNoFiltering),
7285       filter_(NULL) {
7286   Init();
7287 }
7288
7289
7290 HeapIterator::HeapIterator(Heap* heap,
7291                            HeapIterator::HeapObjectsFiltering filtering)
7292     : heap_(heap),
7293       filtering_(filtering),
7294       filter_(NULL) {
7295   Init();
7296 }
7297
7298
7299 HeapIterator::~HeapIterator() {
7300   Shutdown();
7301 }
7302
7303
7304 void HeapIterator::Init() {
7305   // Start the iteration.
7306   space_iterator_ = new SpaceIterator(heap_);
7307   switch (filtering_) {
7308     case kFilterUnreachable:
7309       filter_ = new UnreachableObjectsFilter;
7310       break;
7311     default:
7312       break;
7313   }
7314   object_iterator_ = space_iterator_->next();
7315 }
7316
7317
7318 void HeapIterator::Shutdown() {
7319 #ifdef DEBUG
7320   // Assert that in filtering mode we have iterated through all
7321   // objects. Otherwise, heap will be left in an inconsistent state.
7322   if (filtering_ != kNoFiltering) {
7323     ASSERT(object_iterator_ == NULL);
7324   }
7325 #endif
7326   // Make sure the last iterator is deallocated.
7327   delete space_iterator_;
7328   space_iterator_ = NULL;
7329   object_iterator_ = NULL;
7330   delete filter_;
7331   filter_ = NULL;
7332 }
7333
7334
7335 HeapObject* HeapIterator::next() {
7336   if (filter_ == NULL) return NextObject();
7337
7338   HeapObject* obj = NextObject();
7339   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7340   return obj;
7341 }
7342
7343
7344 HeapObject* HeapIterator::NextObject() {
7345   // No iterator means we are done.
7346   if (object_iterator_ == NULL) return NULL;
7347
7348   if (HeapObject* obj = object_iterator_->next_object()) {
7349     // If the current iterator has more objects we are fine.
7350     return obj;
7351   } else {
7352     // Go though the spaces looking for one that has objects.
7353     while (space_iterator_->has_next()) {
7354       object_iterator_ = space_iterator_->next();
7355       if (HeapObject* obj = object_iterator_->next_object()) {
7356         return obj;
7357       }
7358     }
7359   }
7360   // Done with the last space.
7361   object_iterator_ = NULL;
7362   return NULL;
7363 }
7364
7365
7366 void HeapIterator::reset() {
7367   // Restart the iterator.
7368   Shutdown();
7369   Init();
7370 }
7371
7372
7373 #ifdef DEBUG
7374
7375 Object* const PathTracer::kAnyGlobalObject = NULL;
7376
7377 class PathTracer::MarkVisitor: public ObjectVisitor {
7378  public:
7379   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7380   void VisitPointers(Object** start, Object** end) {
7381     // Scan all HeapObject pointers in [start, end)
7382     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7383       if ((*p)->IsHeapObject())
7384         tracer_->MarkRecursively(p, this);
7385     }
7386   }
7387
7388  private:
7389   PathTracer* tracer_;
7390 };
7391
7392
7393 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7394  public:
7395   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7396   void VisitPointers(Object** start, Object** end) {
7397     // Scan all HeapObject pointers in [start, end)
7398     for (Object** p = start; p < end; p++) {
7399       if ((*p)->IsHeapObject())
7400         tracer_->UnmarkRecursively(p, this);
7401     }
7402   }
7403
7404  private:
7405   PathTracer* tracer_;
7406 };
7407
7408
7409 void PathTracer::VisitPointers(Object** start, Object** end) {
7410   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7411   // Visit all HeapObject pointers in [start, end)
7412   for (Object** p = start; !done && (p < end); p++) {
7413     if ((*p)->IsHeapObject()) {
7414       TracePathFrom(p);
7415       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7416     }
7417   }
7418 }
7419
7420
7421 void PathTracer::Reset() {
7422   found_target_ = false;
7423   object_stack_.Clear();
7424 }
7425
7426
7427 void PathTracer::TracePathFrom(Object** root) {
7428   ASSERT((search_target_ == kAnyGlobalObject) ||
7429          search_target_->IsHeapObject());
7430   found_target_in_trace_ = false;
7431   Reset();
7432
7433   MarkVisitor mark_visitor(this);
7434   MarkRecursively(root, &mark_visitor);
7435
7436   UnmarkVisitor unmark_visitor(this);
7437   UnmarkRecursively(root, &unmark_visitor);
7438
7439   ProcessResults();
7440 }
7441
7442
7443 static bool SafeIsNativeContext(HeapObject* obj) {
7444   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7445 }
7446
7447
7448 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7449   if (!(*p)->IsHeapObject()) return;
7450
7451   HeapObject* obj = HeapObject::cast(*p);
7452
7453   Object* map = obj->map();
7454
7455   if (!map->IsHeapObject()) return;  // visited before
7456
7457   if (found_target_in_trace_) return;  // stop if target found
7458   object_stack_.Add(obj);
7459   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7460       (obj == search_target_)) {
7461     found_target_in_trace_ = true;
7462     found_target_ = true;
7463     return;
7464   }
7465
7466   bool is_native_context = SafeIsNativeContext(obj);
7467
7468   // not visited yet
7469   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7470
7471   Address map_addr = map_p->address();
7472
7473   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7474
7475   // Scan the object body.
7476   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7477     // This is specialized to scan Context's properly.
7478     Object** start = reinterpret_cast<Object**>(obj->address() +
7479                                                 Context::kHeaderSize);
7480     Object** end = reinterpret_cast<Object**>(obj->address() +
7481         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7482     mark_visitor->VisitPointers(start, end);
7483   } else {
7484     obj->IterateBody(map_p->instance_type(),
7485                      obj->SizeFromMap(map_p),
7486                      mark_visitor);
7487   }
7488
7489   // Scan the map after the body because the body is a lot more interesting
7490   // when doing leak detection.
7491   MarkRecursively(&map, mark_visitor);
7492
7493   if (!found_target_in_trace_)  // don't pop if found the target
7494     object_stack_.RemoveLast();
7495 }
7496
7497
7498 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7499   if (!(*p)->IsHeapObject()) return;
7500
7501   HeapObject* obj = HeapObject::cast(*p);
7502
7503   Object* map = obj->map();
7504
7505   if (map->IsHeapObject()) return;  // unmarked already
7506
7507   Address map_addr = reinterpret_cast<Address>(map);
7508
7509   map_addr -= kMarkTag;
7510
7511   ASSERT_TAG_ALIGNED(map_addr);
7512
7513   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7514
7515   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7516
7517   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7518
7519   obj->IterateBody(Map::cast(map_p)->instance_type(),
7520                    obj->SizeFromMap(Map::cast(map_p)),
7521                    unmark_visitor);
7522 }
7523
7524
7525 void PathTracer::ProcessResults() {
7526   if (found_target_) {
7527     PrintF("=====================================\n");
7528     PrintF("====        Path to object       ====\n");
7529     PrintF("=====================================\n\n");
7530
7531     ASSERT(!object_stack_.is_empty());
7532     for (int i = 0; i < object_stack_.length(); i++) {
7533       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7534       Object* obj = object_stack_[i];
7535       obj->Print();
7536     }
7537     PrintF("=====================================\n");
7538   }
7539 }
7540
7541
7542 // Triggers a depth-first traversal of reachable objects from one
7543 // given root object and finds a path to a specific heap object and
7544 // prints it.
7545 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7546   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7547   tracer.VisitPointer(&root);
7548 }
7549
7550
7551 // Triggers a depth-first traversal of reachable objects from roots
7552 // and finds a path to a specific heap object and prints it.
7553 void Heap::TracePathToObject(Object* target) {
7554   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7555   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7556 }
7557
7558
7559 // Triggers a depth-first traversal of reachable objects from roots
7560 // and finds a path to any global object and prints it. Useful for
7561 // determining the source for leaks of global objects.
7562 void Heap::TracePathToGlobal() {
7563   PathTracer tracer(PathTracer::kAnyGlobalObject,
7564                     PathTracer::FIND_ALL,
7565                     VISIT_ALL);
7566   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7567 }
7568 #endif
7569
7570
7571 static intptr_t CountTotalHolesSize(Heap* heap) {
7572   intptr_t holes_size = 0;
7573   OldSpaces spaces(heap);
7574   for (OldSpace* space = spaces.next();
7575        space != NULL;
7576        space = spaces.next()) {
7577     holes_size += space->Waste() + space->Available();
7578   }
7579   return holes_size;
7580 }
7581
7582
7583 GCTracer::GCTracer(Heap* heap,
7584                    const char* gc_reason,
7585                    const char* collector_reason)
7586     : start_time_(0.0),
7587       start_object_size_(0),
7588       start_memory_size_(0),
7589       gc_count_(0),
7590       full_gc_count_(0),
7591       allocated_since_last_gc_(0),
7592       spent_in_mutator_(0),
7593       promoted_objects_size_(0),
7594       nodes_died_in_new_space_(0),
7595       nodes_copied_in_new_space_(0),
7596       nodes_promoted_(0),
7597       heap_(heap),
7598       gc_reason_(gc_reason),
7599       collector_reason_(collector_reason) {
7600   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7601   start_time_ = OS::TimeCurrentMillis();
7602   start_object_size_ = heap_->SizeOfObjects();
7603   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7604
7605   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7606     scopes_[i] = 0;
7607   }
7608
7609   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7610
7611   allocated_since_last_gc_ =
7612       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7613
7614   if (heap_->last_gc_end_timestamp_ > 0) {
7615     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7616   }
7617
7618   steps_count_ = heap_->incremental_marking()->steps_count();
7619   steps_took_ = heap_->incremental_marking()->steps_took();
7620   longest_step_ = heap_->incremental_marking()->longest_step();
7621   steps_count_since_last_gc_ =
7622       heap_->incremental_marking()->steps_count_since_last_gc();
7623   steps_took_since_last_gc_ =
7624       heap_->incremental_marking()->steps_took_since_last_gc();
7625 }
7626
7627
7628 GCTracer::~GCTracer() {
7629   // Printf ONE line iff flag is set.
7630   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7631
7632   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7633
7634   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7635   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7636
7637   double time = heap_->last_gc_end_timestamp_ - start_time_;
7638
7639   // Update cumulative GC statistics if required.
7640   if (FLAG_print_cumulative_gc_stat) {
7641     heap_->total_gc_time_ms_ += time;
7642     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7643     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7644                                      heap_->alive_after_last_gc_);
7645     if (!first_gc) {
7646       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7647                                    spent_in_mutator_);
7648     }
7649   } else if (FLAG_trace_gc_verbose) {
7650     heap_->total_gc_time_ms_ += time;
7651   }
7652
7653   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7654
7655   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7656
7657   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7658   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7659
7660   if (!FLAG_trace_gc_nvp) {
7661     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7662
7663     double end_memory_size_mb =
7664         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7665
7666     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7667            CollectorString(),
7668            static_cast<double>(start_object_size_) / MB,
7669            static_cast<double>(start_memory_size_) / MB,
7670            SizeOfHeapObjects(),
7671            end_memory_size_mb);
7672
7673     if (external_time > 0) PrintF("%d / ", external_time);
7674     PrintF("%.1f ms", time);
7675     if (steps_count_ > 0) {
7676       if (collector_ == SCAVENGER) {
7677         PrintF(" (+ %.1f ms in %d steps since last GC)",
7678                steps_took_since_last_gc_,
7679                steps_count_since_last_gc_);
7680       } else {
7681         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7682                    "biggest step %.1f ms)",
7683                steps_took_,
7684                steps_count_,
7685                longest_step_);
7686       }
7687     }
7688
7689     if (gc_reason_ != NULL) {
7690       PrintF(" [%s]", gc_reason_);
7691     }
7692
7693     if (collector_reason_ != NULL) {
7694       PrintF(" [%s]", collector_reason_);
7695     }
7696
7697     PrintF(".\n");
7698   } else {
7699     PrintF("pause=%.1f ", time);
7700     PrintF("mutator=%.1f ", spent_in_mutator_);
7701     PrintF("gc=");
7702     switch (collector_) {
7703       case SCAVENGER:
7704         PrintF("s");
7705         break;
7706       case MARK_COMPACTOR:
7707         PrintF("ms");
7708         break;
7709       default:
7710         UNREACHABLE();
7711     }
7712     PrintF(" ");
7713
7714     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7715     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7716     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7717     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7718     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7719     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7720     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7721     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7722     PrintF("compaction_ptrs=%.1f ",
7723         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7724     PrintF("intracompaction_ptrs=%.1f ",
7725         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7726     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7727     PrintF("weakcollection_process=%.1f ",
7728         scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7729     PrintF("weakcollection_clear=%.1f ",
7730         scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7731
7732     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7733     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7734     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7735            in_free_list_or_wasted_before_gc_);
7736     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7737
7738     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7739     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7740     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7741     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7742     PrintF("nodes_promoted=%d ", nodes_promoted_);
7743
7744     if (collector_ == SCAVENGER) {
7745       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7746       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7747     } else {
7748       PrintF("stepscount=%d ", steps_count_);
7749       PrintF("stepstook=%.1f ", steps_took_);
7750       PrintF("longeststep=%.1f ", longest_step_);
7751     }
7752
7753     PrintF("\n");
7754   }
7755
7756   heap_->PrintShortHeapStatistics();
7757 }
7758
7759
7760 const char* GCTracer::CollectorString() {
7761   switch (collector_) {
7762     case SCAVENGER:
7763       return "Scavenge";
7764     case MARK_COMPACTOR:
7765       return "Mark-sweep";
7766   }
7767   return "Unknown GC";
7768 }
7769
7770
7771 int KeyedLookupCache::Hash(Map* map, Name* name) {
7772   // Uses only lower 32 bits if pointers are larger.
7773   uintptr_t addr_hash =
7774       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7775   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7776 }
7777
7778
7779 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7780   int index = (Hash(map, name) & kHashMask);
7781   for (int i = 0; i < kEntriesPerBucket; i++) {
7782     Key& key = keys_[index + i];
7783     if ((key.map == map) && key.name->Equals(name)) {
7784       return field_offsets_[index + i];
7785     }
7786   }
7787   return kNotFound;
7788 }
7789
7790
7791 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7792   if (!name->IsUniqueName()) {
7793     String* internalized_string;
7794     if (!HEAP->InternalizeStringIfExists(
7795             String::cast(name), &internalized_string)) {
7796       return;
7797     }
7798     name = internalized_string;
7799   }
7800   // This cache is cleared only between mark compact passes, so we expect the
7801   // cache to only contain old space names.
7802   ASSERT(!HEAP->InNewSpace(name));
7803
7804   int index = (Hash(map, name) & kHashMask);
7805   // After a GC there will be free slots, so we use them in order (this may
7806   // help to get the most frequently used one in position 0).
7807   for (int i = 0; i< kEntriesPerBucket; i++) {
7808     Key& key = keys_[index];
7809     Object* free_entry_indicator = NULL;
7810     if (key.map == free_entry_indicator) {
7811       key.map = map;
7812       key.name = name;
7813       field_offsets_[index + i] = field_offset;
7814       return;
7815     }
7816   }
7817   // No free entry found in this bucket, so we move them all down one and
7818   // put the new entry at position zero.
7819   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7820     Key& key = keys_[index + i];
7821     Key& key2 = keys_[index + i - 1];
7822     key = key2;
7823     field_offsets_[index + i] = field_offsets_[index + i - 1];
7824   }
7825
7826   // Write the new first entry.
7827   Key& key = keys_[index];
7828   key.map = map;
7829   key.name = name;
7830   field_offsets_[index] = field_offset;
7831 }
7832
7833
7834 void KeyedLookupCache::Clear() {
7835   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7836 }
7837
7838
7839 void DescriptorLookupCache::Clear() {
7840   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7841 }
7842
7843
7844 #ifdef DEBUG
7845 void Heap::GarbageCollectionGreedyCheck() {
7846   ASSERT(FLAG_gc_greedy);
7847   if (isolate_->bootstrapper()->IsActive()) return;
7848   if (disallow_allocation_failure()) return;
7849   CollectGarbage(NEW_SPACE);
7850 }
7851 #endif
7852
7853
7854 TranscendentalCache::SubCache::SubCache(Type t)
7855   : type_(t),
7856     isolate_(Isolate::Current()) {
7857   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7858   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7859   for (int i = 0; i < kCacheSize; i++) {
7860     elements_[i].in[0] = in0;
7861     elements_[i].in[1] = in1;
7862     elements_[i].output = NULL;
7863   }
7864 }
7865
7866
7867 void TranscendentalCache::Clear() {
7868   for (int i = 0; i < kNumberOfCaches; i++) {
7869     if (caches_[i] != NULL) {
7870       delete caches_[i];
7871       caches_[i] = NULL;
7872     }
7873   }
7874 }
7875
7876
7877 void ExternalStringTable::CleanUp() {
7878   int last = 0;
7879   for (int i = 0; i < new_space_strings_.length(); ++i) {
7880     if (new_space_strings_[i] == heap_->the_hole_value()) {
7881       continue;
7882     }
7883     if (heap_->InNewSpace(new_space_strings_[i])) {
7884       new_space_strings_[last++] = new_space_strings_[i];
7885     } else {
7886       old_space_strings_.Add(new_space_strings_[i]);
7887     }
7888   }
7889   new_space_strings_.Rewind(last);
7890   new_space_strings_.Trim();
7891
7892   last = 0;
7893   for (int i = 0; i < old_space_strings_.length(); ++i) {
7894     if (old_space_strings_[i] == heap_->the_hole_value()) {
7895       continue;
7896     }
7897     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7898     old_space_strings_[last++] = old_space_strings_[i];
7899   }
7900   old_space_strings_.Rewind(last);
7901   old_space_strings_.Trim();
7902 #ifdef VERIFY_HEAP
7903   if (FLAG_verify_heap) {
7904     Verify();
7905   }
7906 #endif
7907 }
7908
7909
7910 void ExternalStringTable::TearDown() {
7911   new_space_strings_.Free();
7912   old_space_strings_.Free();
7913 }
7914
7915
7916 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7917   chunk->set_next_chunk(chunks_queued_for_free_);
7918   chunks_queued_for_free_ = chunk;
7919 }
7920
7921
7922 void Heap::FreeQueuedChunks() {
7923   if (chunks_queued_for_free_ == NULL) return;
7924   MemoryChunk* next;
7925   MemoryChunk* chunk;
7926   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7927     next = chunk->next_chunk();
7928     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7929
7930     if (chunk->owner()->identity() == LO_SPACE) {
7931       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7932       // If FromAnyPointerAddress encounters a slot that belongs to a large
7933       // chunk queued for deletion it will fail to find the chunk because
7934       // it try to perform a search in the list of pages owned by of the large
7935       // object space and queued chunks were detached from that list.
7936       // To work around this we split large chunk into normal kPageSize aligned
7937       // pieces and initialize size, owner and flags field of every piece.
7938       // If FromAnyPointerAddress encounters a slot that belongs to one of
7939       // these smaller pieces it will treat it as a slot on a normal Page.
7940       Address chunk_end = chunk->address() + chunk->size();
7941       MemoryChunk* inner = MemoryChunk::FromAddress(
7942           chunk->address() + Page::kPageSize);
7943       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7944       while (inner <= inner_last) {
7945         // Size of a large chunk is always a multiple of
7946         // OS::AllocateAlignment() so there is always
7947         // enough space for a fake MemoryChunk header.
7948         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7949         // Guard against overflow.
7950         if (area_end < inner->address()) area_end = chunk_end;
7951         inner->SetArea(inner->address(), area_end);
7952         inner->set_size(Page::kPageSize);
7953         inner->set_owner(lo_space());
7954         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7955         inner = MemoryChunk::FromAddress(
7956             inner->address() + Page::kPageSize);
7957       }
7958     }
7959   }
7960   isolate_->heap()->store_buffer()->Compact();
7961   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7962   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7963     next = chunk->next_chunk();
7964     isolate_->memory_allocator()->Free(chunk);
7965   }
7966   chunks_queued_for_free_ = NULL;
7967 }
7968
7969
7970 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7971   uintptr_t p = reinterpret_cast<uintptr_t>(page);
7972   // Tag the page pointer to make it findable in the dump file.
7973   if (compacted) {
7974     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7975   } else {
7976     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7977   }
7978   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7979       reinterpret_cast<Address>(p);
7980   remembered_unmapped_pages_index_++;
7981   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7982 }
7983
7984
7985 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7986   memset(object_counts_, 0, sizeof(object_counts_));
7987   memset(object_sizes_, 0, sizeof(object_sizes_));
7988   if (clear_last_time_stats) {
7989     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7990     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7991   }
7992 }
7993
7994
7995 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7996
7997
7998 void Heap::CheckpointObjectStats() {
7999   ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
8000   Counters* counters = isolate()->counters();
8001 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
8002   counters->count_of_##name()->Increment(                                      \
8003       static_cast<int>(object_counts_[name]));                                 \
8004   counters->count_of_##name()->Decrement(                                      \
8005       static_cast<int>(object_counts_last_time_[name]));                       \
8006   counters->size_of_##name()->Increment(                                       \
8007       static_cast<int>(object_sizes_[name]));                                  \
8008   counters->size_of_##name()->Decrement(                                       \
8009       static_cast<int>(object_sizes_last_time_[name]));
8010   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8011 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8012   int index;
8013 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
8014   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
8015   counters->count_of_CODE_TYPE_##name()->Increment(       \
8016       static_cast<int>(object_counts_[index]));           \
8017   counters->count_of_CODE_TYPE_##name()->Decrement(       \
8018       static_cast<int>(object_counts_last_time_[index])); \
8019   counters->size_of_CODE_TYPE_##name()->Increment(        \
8020       static_cast<int>(object_sizes_[index]));            \
8021   counters->size_of_CODE_TYPE_##name()->Decrement(        \
8022       static_cast<int>(object_sizes_last_time_[index]));
8023   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8024 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8025 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
8026   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
8027   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
8028       static_cast<int>(object_counts_[index]));           \
8029   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
8030       static_cast<int>(object_counts_last_time_[index])); \
8031   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
8032       static_cast<int>(object_sizes_[index]));            \
8033   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
8034       static_cast<int>(object_sizes_last_time_[index]));
8035   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8036 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8037
8038   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8039   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8040   ClearObjectStats();
8041 }
8042
8043
8044 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8045   if (FLAG_parallel_recompilation) {
8046     heap_->relocation_mutex_->Lock();
8047 #ifdef DEBUG
8048     heap_->relocation_mutex_locked_by_optimizer_thread_ =
8049         heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8050 #endif  // DEBUG
8051   }
8052 }
8053
8054 } }  // namespace v8::internal