v8: upgrade to v8 3.20.9
[platform/upstream/nodejs.git] / deps / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
36 #include "debug.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "mark-compact.h"
42 #include "natives.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
45 #include "once.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
48 #include "snapshot.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
51 #include "v8utils.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
56 #endif
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
60 #endif
61
62 namespace v8 {
63 namespace internal {
64
65
66 Heap::Heap()
67     : isolate_(NULL),
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if V8_TARGET_ARCH_X64
71 #define LUMP_OF_MEMORY (2 * MB)
72       code_range_size_(512*MB),
73 #else
74 #define LUMP_OF_MEMORY MB
75       code_range_size_(0),
76 #endif
77 #if defined(ANDROID) || V8_TARGET_ARCH_MIPS
78       reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79       max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80       initial_semispace_size_(Page::kPageSize),
81       max_old_generation_size_(192*MB),
82       max_executable_size_(max_old_generation_size_),
83 #else
84       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86       initial_semispace_size_(Page::kPageSize),
87       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88       max_executable_size_(256l * LUMP_OF_MEMORY),
89 #endif
90
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95       survived_since_last_expansion_(0),
96       sweep_generation_(0),
97       always_allocate_scope_depth_(0),
98       linear_allocation_scope_depth_(0),
99       contexts_disposed_(0),
100       global_ic_age_(0),
101       flush_monomorphic_ics_(false),
102       scan_on_scavenge_pages_(0),
103       new_space_(this),
104       old_pointer_space_(NULL),
105       old_data_space_(NULL),
106       code_space_(NULL),
107       map_space_(NULL),
108       cell_space_(NULL),
109       property_cell_space_(NULL),
110       lo_space_(NULL),
111       gc_state_(NOT_IN_GC),
112       gc_post_processing_depth_(0),
113       ms_count_(0),
114       gc_count_(0),
115       remembered_unmapped_pages_index_(0),
116       unflattened_strings_length_(0),
117 #ifdef DEBUG
118       allocation_timeout_(0),
119       disallow_allocation_failure_(false),
120 #endif  // DEBUG
121       new_space_high_promotion_mode_active_(false),
122       old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
123       size_of_old_gen_at_last_old_space_gc_(0),
124       external_allocation_limit_(0),
125       amount_of_external_allocated_memory_(0),
126       amount_of_external_allocated_memory_at_last_global_gc_(0),
127       old_gen_exhausted_(false),
128       store_buffer_rebuilder_(store_buffer()),
129       hidden_string_(NULL),
130       global_gc_prologue_callback_(NULL),
131       global_gc_epilogue_callback_(NULL),
132       gc_safe_size_of_old_object_(NULL),
133       total_regexp_code_generated_(0),
134       tracer_(NULL),
135       young_survivors_after_last_gc_(0),
136       high_survival_rate_period_length_(0),
137       low_survival_rate_period_length_(0),
138       survival_rate_(0),
139       previous_survival_rate_trend_(Heap::STABLE),
140       survival_rate_trend_(Heap::STABLE),
141       max_gc_pause_(0.0),
142       total_gc_time_ms_(0.0),
143       max_alive_after_gc_(0),
144       min_in_mutator_(kMaxInt),
145       alive_after_last_gc_(0),
146       last_gc_end_timestamp_(0.0),
147       marking_time_(0.0),
148       sweeping_time_(0.0),
149       store_buffer_(this),
150       marking_(this),
151       incremental_marking_(this),
152       number_idle_notifications_(0),
153       last_idle_notification_gc_count_(0),
154       last_idle_notification_gc_count_init_(false),
155       mark_sweeps_since_idle_round_started_(0),
156       gc_count_at_last_idle_gc_(0),
157       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
158       gcs_since_last_deopt_(0),
159 #ifdef VERIFY_HEAP
160       no_weak_embedded_maps_verification_scope_depth_(0),
161 #endif
162       promotion_queue_(this),
163       configured_(false),
164       chunks_queued_for_free_(NULL),
165       relocation_mutex_(NULL) {
166   // Allow build-time customization of the max semispace size. Building
167   // V8 with snapshots and a non-default max semispace size is much
168   // easier if you can define it as part of the build environment.
169 #if defined(V8_MAX_SEMISPACE_SIZE)
170   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
171 #endif
172
173   intptr_t max_virtual = OS::MaxVirtualMemory();
174
175   if (max_virtual > 0) {
176     if (code_range_size_ > 0) {
177       // Reserve no more than 1/8 of the memory for the code range.
178       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
179     }
180   }
181
182   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
183   native_contexts_list_ = NULL;
184   array_buffers_list_ = Smi::FromInt(0);
185   allocation_sites_list_ = Smi::FromInt(0);
186   mark_compact_collector_.heap_ = this;
187   external_string_table_.heap_ = this;
188   // Put a dummy entry in the remembered pages so we can find the list the
189   // minidump even if there are no real unmapped pages.
190   RememberUnmappedPage(NULL, false);
191
192   ClearObjectStats(true);
193 }
194
195
196 intptr_t Heap::Capacity() {
197   if (!HasBeenSetUp()) return 0;
198
199   return new_space_.Capacity() +
200       old_pointer_space_->Capacity() +
201       old_data_space_->Capacity() +
202       code_space_->Capacity() +
203       map_space_->Capacity() +
204       cell_space_->Capacity() +
205       property_cell_space_->Capacity();
206 }
207
208
209 intptr_t Heap::CommittedMemory() {
210   if (!HasBeenSetUp()) return 0;
211
212   return new_space_.CommittedMemory() +
213       old_pointer_space_->CommittedMemory() +
214       old_data_space_->CommittedMemory() +
215       code_space_->CommittedMemory() +
216       map_space_->CommittedMemory() +
217       cell_space_->CommittedMemory() +
218       property_cell_space_->CommittedMemory() +
219       lo_space_->Size();
220 }
221
222
223 size_t Heap::CommittedPhysicalMemory() {
224   if (!HasBeenSetUp()) return 0;
225
226   return new_space_.CommittedPhysicalMemory() +
227       old_pointer_space_->CommittedPhysicalMemory() +
228       old_data_space_->CommittedPhysicalMemory() +
229       code_space_->CommittedPhysicalMemory() +
230       map_space_->CommittedPhysicalMemory() +
231       cell_space_->CommittedPhysicalMemory() +
232       property_cell_space_->CommittedPhysicalMemory() +
233       lo_space_->CommittedPhysicalMemory();
234 }
235
236
237 intptr_t Heap::CommittedMemoryExecutable() {
238   if (!HasBeenSetUp()) return 0;
239
240   return isolate()->memory_allocator()->SizeExecutable();
241 }
242
243
244 intptr_t Heap::Available() {
245   if (!HasBeenSetUp()) return 0;
246
247   return new_space_.Available() +
248       old_pointer_space_->Available() +
249       old_data_space_->Available() +
250       code_space_->Available() +
251       map_space_->Available() +
252       cell_space_->Available() +
253       property_cell_space_->Available();
254 }
255
256
257 bool Heap::HasBeenSetUp() {
258   return old_pointer_space_ != NULL &&
259          old_data_space_ != NULL &&
260          code_space_ != NULL &&
261          map_space_ != NULL &&
262          cell_space_ != NULL &&
263          property_cell_space_ != NULL &&
264          lo_space_ != NULL;
265 }
266
267
268 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
269   if (IntrusiveMarking::IsMarked(object)) {
270     return IntrusiveMarking::SizeOfMarkedObject(object);
271   }
272   return object->SizeFromMap(object->map());
273 }
274
275
276 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
277                                               const char** reason) {
278   // Is global GC requested?
279   if (space != NEW_SPACE) {
280     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
281     *reason = "GC in old space requested";
282     return MARK_COMPACTOR;
283   }
284
285   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
286     *reason = "GC in old space forced by flags";
287     return MARK_COMPACTOR;
288   }
289
290   // Is enough data promoted to justify a global GC?
291   if (OldGenerationAllocationLimitReached()) {
292     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
293     *reason = "promotion limit reached";
294     return MARK_COMPACTOR;
295   }
296
297   // Have allocation in OLD and LO failed?
298   if (old_gen_exhausted_) {
299     isolate_->counters()->
300         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
301     *reason = "old generations exhausted";
302     return MARK_COMPACTOR;
303   }
304
305   // Is there enough space left in OLD to guarantee that a scavenge can
306   // succeed?
307   //
308   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
309   // for object promotion. It counts only the bytes that the memory
310   // allocator has not yet allocated from the OS and assigned to any space,
311   // and does not count available bytes already in the old space or code
312   // space.  Undercounting is safe---we may get an unrequested full GC when
313   // a scavenge would have succeeded.
314   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
315     isolate_->counters()->
316         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
317     *reason = "scavenge might not succeed";
318     return MARK_COMPACTOR;
319   }
320
321   // Default
322   *reason = NULL;
323   return SCAVENGER;
324 }
325
326
327 // TODO(1238405): Combine the infrastructure for --heap-stats and
328 // --log-gc to avoid the complicated preprocessor and flag testing.
329 void Heap::ReportStatisticsBeforeGC() {
330   // Heap::ReportHeapStatistics will also log NewSpace statistics when
331   // compiled --log-gc is set.  The following logic is used to avoid
332   // double logging.
333 #ifdef DEBUG
334   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
335   if (FLAG_heap_stats) {
336     ReportHeapStatistics("Before GC");
337   } else if (FLAG_log_gc) {
338     new_space_.ReportStatistics();
339   }
340   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
341 #else
342   if (FLAG_log_gc) {
343     new_space_.CollectStatistics();
344     new_space_.ReportStatistics();
345     new_space_.ClearHistograms();
346   }
347 #endif  // DEBUG
348 }
349
350
351 void Heap::PrintShortHeapStatistics() {
352   if (!FLAG_trace_gc_verbose) return;
353   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
354                ", available: %6" V8_PTR_PREFIX "d KB\n",
355            isolate_->memory_allocator()->Size() / KB,
356            isolate_->memory_allocator()->Available() / KB);
357   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
358                ", available: %6" V8_PTR_PREFIX "d KB"
359                ", committed: %6" V8_PTR_PREFIX "d KB\n",
360            new_space_.Size() / KB,
361            new_space_.Available() / KB,
362            new_space_.CommittedMemory() / KB);
363   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
364                ", available: %6" V8_PTR_PREFIX "d KB"
365                ", committed: %6" V8_PTR_PREFIX "d KB\n",
366            old_pointer_space_->SizeOfObjects() / KB,
367            old_pointer_space_->Available() / KB,
368            old_pointer_space_->CommittedMemory() / KB);
369   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
370                ", available: %6" V8_PTR_PREFIX "d KB"
371                ", committed: %6" V8_PTR_PREFIX "d KB\n",
372            old_data_space_->SizeOfObjects() / KB,
373            old_data_space_->Available() / KB,
374            old_data_space_->CommittedMemory() / KB);
375   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
376                ", available: %6" V8_PTR_PREFIX "d KB"
377                ", committed: %6" V8_PTR_PREFIX "d KB\n",
378            code_space_->SizeOfObjects() / KB,
379            code_space_->Available() / KB,
380            code_space_->CommittedMemory() / KB);
381   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
382                ", available: %6" V8_PTR_PREFIX "d KB"
383                ", committed: %6" V8_PTR_PREFIX "d KB\n",
384            map_space_->SizeOfObjects() / KB,
385            map_space_->Available() / KB,
386            map_space_->CommittedMemory() / KB);
387   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
388                ", available: %6" V8_PTR_PREFIX "d KB"
389                ", committed: %6" V8_PTR_PREFIX "d KB\n",
390            cell_space_->SizeOfObjects() / KB,
391            cell_space_->Available() / KB,
392            cell_space_->CommittedMemory() / KB);
393   PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
394                ", available: %6" V8_PTR_PREFIX "d KB"
395                ", committed: %6" V8_PTR_PREFIX "d KB\n",
396            property_cell_space_->SizeOfObjects() / KB,
397            property_cell_space_->Available() / KB,
398            property_cell_space_->CommittedMemory() / KB);
399   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
400                ", available: %6" V8_PTR_PREFIX "d KB"
401                ", committed: %6" V8_PTR_PREFIX "d KB\n",
402            lo_space_->SizeOfObjects() / KB,
403            lo_space_->Available() / KB,
404            lo_space_->CommittedMemory() / KB);
405   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
406                ", available: %6" V8_PTR_PREFIX "d KB"
407                ", committed: %6" V8_PTR_PREFIX "d KB\n",
408            this->SizeOfObjects() / KB,
409            this->Available() / KB,
410            this->CommittedMemory() / KB);
411   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
412            amount_of_external_allocated_memory_ / KB);
413   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
414 }
415
416
417 // TODO(1238405): Combine the infrastructure for --heap-stats and
418 // --log-gc to avoid the complicated preprocessor and flag testing.
419 void Heap::ReportStatisticsAfterGC() {
420   // Similar to the before GC, we use some complicated logic to ensure that
421   // NewSpace statistics are logged exactly once when --log-gc is turned on.
422 #if defined(DEBUG)
423   if (FLAG_heap_stats) {
424     new_space_.CollectStatistics();
425     ReportHeapStatistics("After GC");
426   } else if (FLAG_log_gc) {
427     new_space_.ReportStatistics();
428   }
429 #else
430   if (FLAG_log_gc) new_space_.ReportStatistics();
431 #endif  // DEBUG
432 }
433
434
435 void Heap::GarbageCollectionPrologue() {
436   {  AllowHeapAllocation for_the_first_part_of_prologue;
437     isolate_->transcendental_cache()->Clear();
438     ClearJSFunctionResultCaches();
439     gc_count_++;
440     unflattened_strings_length_ = 0;
441
442     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
443       mark_compact_collector()->EnableCodeFlushing(true);
444     }
445
446 #ifdef VERIFY_HEAP
447     if (FLAG_verify_heap) {
448       Verify();
449     }
450 #endif
451   }
452
453 #ifdef DEBUG
454   ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
455
456   if (FLAG_gc_verbose) Print();
457
458   ReportStatisticsBeforeGC();
459 #endif  // DEBUG
460
461   store_buffer()->GCPrologue();
462 }
463
464
465 intptr_t Heap::SizeOfObjects() {
466   intptr_t total = 0;
467   AllSpaces spaces(this);
468   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
469     total += space->SizeOfObjects();
470   }
471   return total;
472 }
473
474
475 void Heap::RepairFreeListsAfterBoot() {
476   PagedSpaces spaces(this);
477   for (PagedSpace* space = spaces.next();
478        space != NULL;
479        space = spaces.next()) {
480     space->RepairFreeListsAfterBoot();
481   }
482 }
483
484
485 void Heap::GarbageCollectionEpilogue() {
486   store_buffer()->GCEpilogue();
487
488   // In release mode, we only zap the from space under heap verification.
489   if (Heap::ShouldZapGarbage()) {
490     ZapFromSpace();
491   }
492
493 #ifdef VERIFY_HEAP
494   if (FLAG_verify_heap) {
495     Verify();
496   }
497 #endif
498
499   AllowHeapAllocation for_the_rest_of_the_epilogue;
500
501 #ifdef DEBUG
502   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
503   if (FLAG_print_handles) PrintHandles();
504   if (FLAG_gc_verbose) Print();
505   if (FLAG_code_stats) ReportCodeStatistics("After GC");
506 #endif
507   if (FLAG_deopt_every_n_garbage_collections > 0) {
508     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
509       Deoptimizer::DeoptimizeAll(isolate());
510       gcs_since_last_deopt_ = 0;
511     }
512   }
513
514   isolate_->counters()->alive_after_last_gc()->Set(
515       static_cast<int>(SizeOfObjects()));
516
517   isolate_->counters()->string_table_capacity()->Set(
518       string_table()->Capacity());
519   isolate_->counters()->number_of_symbols()->Set(
520       string_table()->NumberOfElements());
521
522   if (CommittedMemory() > 0) {
523     isolate_->counters()->external_fragmentation_total()->AddSample(
524         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
525
526     isolate_->counters()->heap_fraction_map_space()->AddSample(
527         static_cast<int>(
528             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
529     isolate_->counters()->heap_fraction_cell_space()->AddSample(
530         static_cast<int>(
531             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
532     isolate_->counters()->heap_fraction_property_cell_space()->
533         AddSample(static_cast<int>(
534             (property_cell_space()->CommittedMemory() * 100.0) /
535             CommittedMemory()));
536
537     isolate_->counters()->heap_sample_total_committed()->AddSample(
538         static_cast<int>(CommittedMemory() / KB));
539     isolate_->counters()->heap_sample_total_used()->AddSample(
540         static_cast<int>(SizeOfObjects() / KB));
541     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
542         static_cast<int>(map_space()->CommittedMemory() / KB));
543     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
544         static_cast<int>(cell_space()->CommittedMemory() / KB));
545     isolate_->counters()->
546         heap_sample_property_cell_space_committed()->
547             AddSample(static_cast<int>(
548                 property_cell_space()->CommittedMemory() / KB));
549   }
550
551 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
552   isolate_->counters()->space##_bytes_available()->Set(                        \
553       static_cast<int>(space()->Available()));                                 \
554   isolate_->counters()->space##_bytes_committed()->Set(                        \
555       static_cast<int>(space()->CommittedMemory()));                           \
556   isolate_->counters()->space##_bytes_used()->Set(                             \
557       static_cast<int>(space()->SizeOfObjects()));
558 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
559   if (space()->CommittedMemory() > 0) {                                        \
560     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
561         static_cast<int>(100 -                                                 \
562             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
563   }
564 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
565   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
566   UPDATE_FRAGMENTATION_FOR_SPACE(space)
567
568   UPDATE_COUNTERS_FOR_SPACE(new_space)
569   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
570   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
571   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
572   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
573   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
574   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
575   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
576 #undef UPDATE_COUNTERS_FOR_SPACE
577 #undef UPDATE_FRAGMENTATION_FOR_SPACE
578 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
579
580 #if defined(DEBUG)
581   ReportStatisticsAfterGC();
582 #endif  // DEBUG
583 #ifdef ENABLE_DEBUGGER_SUPPORT
584   isolate_->debug()->AfterGarbageCollection();
585 #endif  // ENABLE_DEBUGGER_SUPPORT
586 }
587
588
589 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
590   // Since we are ignoring the return value, the exact choice of space does
591   // not matter, so long as we do not specify NEW_SPACE, which would not
592   // cause a full GC.
593   mark_compact_collector_.SetFlags(flags);
594   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
595   mark_compact_collector_.SetFlags(kNoGCFlags);
596 }
597
598
599 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
600   // Since we are ignoring the return value, the exact choice of space does
601   // not matter, so long as we do not specify NEW_SPACE, which would not
602   // cause a full GC.
603   // Major GC would invoke weak handle callbacks on weakly reachable
604   // handles, but won't collect weakly reachable objects until next
605   // major GC.  Therefore if we collect aggressively and weak handle callback
606   // has been invoked, we rerun major GC to release objects which become
607   // garbage.
608   // Note: as weak callbacks can execute arbitrary code, we cannot
609   // hope that eventually there will be no weak callbacks invocations.
610   // Therefore stop recollecting after several attempts.
611   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
612                                      kReduceMemoryFootprintMask);
613   isolate_->compilation_cache()->Clear();
614   const int kMaxNumberOfAttempts = 7;
615   const int kMinNumberOfAttempts = 2;
616   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
617     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
618         attempt + 1 >= kMinNumberOfAttempts) {
619       break;
620     }
621   }
622   mark_compact_collector()->SetFlags(kNoGCFlags);
623   new_space_.Shrink();
624   UncommitFromSpace();
625   incremental_marking()->UncommitMarkingDeque();
626 }
627
628
629 bool Heap::CollectGarbage(AllocationSpace space,
630                           GarbageCollector collector,
631                           const char* gc_reason,
632                           const char* collector_reason) {
633   // The VM is in the GC state until exiting this function.
634   VMState<GC> state(isolate_);
635
636 #ifdef DEBUG
637   // Reset the allocation timeout to the GC interval, but make sure to
638   // allow at least a few allocations after a collection. The reason
639   // for this is that we have a lot of allocation sequences and we
640   // assume that a garbage collection will allow the subsequent
641   // allocation attempts to go through.
642   allocation_timeout_ = Max(6, FLAG_gc_interval);
643 #endif
644
645   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
646     if (FLAG_trace_incremental_marking) {
647       PrintF("[IncrementalMarking] Scavenge during marking.\n");
648     }
649   }
650
651   if (collector == MARK_COMPACTOR &&
652       !mark_compact_collector()->abort_incremental_marking() &&
653       !incremental_marking()->IsStopped() &&
654       !incremental_marking()->should_hurry() &&
655       FLAG_incremental_marking_steps) {
656     // Make progress in incremental marking.
657     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
658     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
659                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
660     if (!incremental_marking()->IsComplete()) {
661       if (FLAG_trace_incremental_marking) {
662         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
663       }
664       collector = SCAVENGER;
665       collector_reason = "incremental marking delaying mark-sweep";
666     }
667   }
668
669   bool next_gc_likely_to_collect_more = false;
670
671   { GCTracer tracer(this, gc_reason, collector_reason);
672     ASSERT(AllowHeapAllocation::IsAllowed());
673     DisallowHeapAllocation no_allocation_during_gc;
674     GarbageCollectionPrologue();
675     // The GC count was incremented in the prologue.  Tell the tracer about
676     // it.
677     tracer.set_gc_count(gc_count_);
678
679     // Tell the tracer which collector we've selected.
680     tracer.set_collector(collector);
681
682     {
683       HistogramTimerScope histogram_timer_scope(
684           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
685                                    : isolate_->counters()->gc_compactor());
686       next_gc_likely_to_collect_more =
687           PerformGarbageCollection(collector, &tracer);
688     }
689
690     GarbageCollectionEpilogue();
691   }
692
693   // Start incremental marking for the next cycle. The heap snapshot
694   // generator needs incremental marking to stay off after it aborted.
695   if (!mark_compact_collector()->abort_incremental_marking() &&
696       incremental_marking()->IsStopped() &&
697       incremental_marking()->WorthActivating() &&
698       NextGCIsLikelyToBeFull()) {
699     incremental_marking()->Start();
700   }
701
702   return next_gc_likely_to_collect_more;
703 }
704
705
706 int Heap::NotifyContextDisposed() {
707   if (FLAG_parallel_recompilation) {
708     // Flush the queued recompilation tasks.
709     isolate()->optimizing_compiler_thread()->Flush();
710   }
711   flush_monomorphic_ics_ = true;
712   return ++contexts_disposed_;
713 }
714
715
716 void Heap::PerformScavenge() {
717   GCTracer tracer(this, NULL, NULL);
718   if (incremental_marking()->IsStopped()) {
719     PerformGarbageCollection(SCAVENGER, &tracer);
720   } else {
721     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
722   }
723 }
724
725
726 void Heap::MoveElements(FixedArray* array,
727                         int dst_index,
728                         int src_index,
729                         int len) {
730   if (len == 0) return;
731
732   ASSERT(array->map() != HEAP->fixed_cow_array_map());
733   Object** dst_objects = array->data_start() + dst_index;
734   OS::MemMove(dst_objects,
735               array->data_start() + src_index,
736               len * kPointerSize);
737   if (!InNewSpace(array)) {
738     for (int i = 0; i < len; i++) {
739       // TODO(hpayer): check store buffer for entries
740       if (InNewSpace(dst_objects[i])) {
741         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
742       }
743     }
744   }
745   incremental_marking()->RecordWrites(array);
746 }
747
748
749 #ifdef VERIFY_HEAP
750 // Helper class for verifying the string table.
751 class StringTableVerifier : public ObjectVisitor {
752  public:
753   void VisitPointers(Object** start, Object** end) {
754     // Visit all HeapObject pointers in [start, end).
755     for (Object** p = start; p < end; p++) {
756       if ((*p)->IsHeapObject()) {
757         // Check that the string is actually internalized.
758         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
759               (*p)->IsInternalizedString());
760       }
761     }
762   }
763 };
764
765
766 static void VerifyStringTable() {
767   StringTableVerifier verifier;
768   HEAP->string_table()->IterateElements(&verifier);
769 }
770 #endif  // VERIFY_HEAP
771
772
773 static bool AbortIncrementalMarkingAndCollectGarbage(
774     Heap* heap,
775     AllocationSpace space,
776     const char* gc_reason = NULL) {
777   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
778   bool result = heap->CollectGarbage(space, gc_reason);
779   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
780   return result;
781 }
782
783
784 void Heap::ReserveSpace(
785     int *sizes,
786     Address *locations_out) {
787   bool gc_performed = true;
788   int counter = 0;
789   static const int kThreshold = 20;
790   while (gc_performed && counter++ < kThreshold) {
791     gc_performed = false;
792     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
793     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
794       if (sizes[space] != 0) {
795         MaybeObject* allocation;
796         if (space == NEW_SPACE) {
797           allocation = new_space()->AllocateRaw(sizes[space]);
798         } else {
799           allocation = paged_space(space)->AllocateRaw(sizes[space]);
800         }
801         FreeListNode* node;
802         if (!allocation->To<FreeListNode>(&node)) {
803           if (space == NEW_SPACE) {
804             Heap::CollectGarbage(NEW_SPACE,
805                                  "failed to reserve space in the new space");
806           } else {
807             AbortIncrementalMarkingAndCollectGarbage(
808                 this,
809                 static_cast<AllocationSpace>(space),
810                 "failed to reserve space in paged space");
811           }
812           gc_performed = true;
813           break;
814         } else {
815           // Mark with a free list node, in case we have a GC before
816           // deserializing.
817           node->set_size(this, sizes[space]);
818           locations_out[space] = node->address();
819         }
820       }
821     }
822   }
823
824   if (gc_performed) {
825     // Failed to reserve the space after several attempts.
826     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
827   }
828 }
829
830
831 void Heap::EnsureFromSpaceIsCommitted() {
832   if (new_space_.CommitFromSpaceIfNeeded()) return;
833
834   // Committing memory to from space failed.
835   // Memory is exhausted and we will die.
836   V8::FatalProcessOutOfMemory("Committing semi space failed.");
837 }
838
839
840 void Heap::ClearJSFunctionResultCaches() {
841   if (isolate_->bootstrapper()->IsActive()) return;
842
843   Object* context = native_contexts_list_;
844   while (!context->IsUndefined()) {
845     // Get the caches for this context. GC can happen when the context
846     // is not fully initialized, so the caches can be undefined.
847     Object* caches_or_undefined =
848         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
849     if (!caches_or_undefined->IsUndefined()) {
850       FixedArray* caches = FixedArray::cast(caches_or_undefined);
851       // Clear the caches:
852       int length = caches->length();
853       for (int i = 0; i < length; i++) {
854         JSFunctionResultCache::cast(caches->get(i))->Clear();
855       }
856     }
857     // Get the next context:
858     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
859   }
860 }
861
862
863 void Heap::ClearNormalizedMapCaches() {
864   if (isolate_->bootstrapper()->IsActive() &&
865       !incremental_marking()->IsMarking()) {
866     return;
867   }
868
869   Object* context = native_contexts_list_;
870   while (!context->IsUndefined()) {
871     // GC can happen when the context is not fully initialized,
872     // so the cache can be undefined.
873     Object* cache =
874         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
875     if (!cache->IsUndefined()) {
876       NormalizedMapCache::cast(cache)->Clear();
877     }
878     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
879   }
880 }
881
882
883 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
884   double survival_rate =
885       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
886       start_new_space_size;
887
888   if (survival_rate > kYoungSurvivalRateHighThreshold) {
889     high_survival_rate_period_length_++;
890   } else {
891     high_survival_rate_period_length_ = 0;
892   }
893
894   if (survival_rate < kYoungSurvivalRateLowThreshold) {
895     low_survival_rate_period_length_++;
896   } else {
897     low_survival_rate_period_length_ = 0;
898   }
899
900   double survival_rate_diff = survival_rate_ - survival_rate;
901
902   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
903     set_survival_rate_trend(DECREASING);
904   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
905     set_survival_rate_trend(INCREASING);
906   } else {
907     set_survival_rate_trend(STABLE);
908   }
909
910   survival_rate_ = survival_rate;
911 }
912
913 bool Heap::PerformGarbageCollection(GarbageCollector collector,
914                                     GCTracer* tracer) {
915   bool next_gc_likely_to_collect_more = false;
916
917   if (collector != SCAVENGER) {
918     PROFILE(isolate_, CodeMovingGCEvent());
919   }
920
921 #ifdef VERIFY_HEAP
922   if (FLAG_verify_heap) {
923     VerifyStringTable();
924   }
925 #endif
926
927   GCType gc_type =
928       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
929
930   {
931     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
932     VMState<EXTERNAL> state(isolate_);
933     HandleScope handle_scope(isolate_);
934     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
935   }
936
937   EnsureFromSpaceIsCommitted();
938
939   int start_new_space_size = Heap::new_space()->SizeAsInt();
940
941   if (IsHighSurvivalRate()) {
942     // We speed up the incremental marker if it is running so that it
943     // does not fall behind the rate of promotion, which would cause a
944     // constantly growing old space.
945     incremental_marking()->NotifyOfHighPromotionRate();
946   }
947
948   if (collector == MARK_COMPACTOR) {
949     // Perform mark-sweep with optional compaction.
950     MarkCompact(tracer);
951     sweep_generation_++;
952
953     UpdateSurvivalRateTrend(start_new_space_size);
954
955     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
956
957     old_generation_allocation_limit_ =
958         OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
959
960     old_gen_exhausted_ = false;
961   } else {
962     tracer_ = tracer;
963     Scavenge();
964     tracer_ = NULL;
965
966     UpdateSurvivalRateTrend(start_new_space_size);
967   }
968
969   if (!new_space_high_promotion_mode_active_ &&
970       new_space_.Capacity() == new_space_.MaximumCapacity() &&
971       IsStableOrIncreasingSurvivalTrend() &&
972       IsHighSurvivalRate()) {
973     // Stable high survival rates even though young generation is at
974     // maximum capacity indicates that most objects will be promoted.
975     // To decrease scavenger pauses and final mark-sweep pauses, we
976     // have to limit maximal capacity of the young generation.
977     SetNewSpaceHighPromotionModeActive(true);
978     if (FLAG_trace_gc) {
979       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
980                new_space_.InitialCapacity() / MB);
981     }
982     // Support for global pre-tenuring uses the high promotion mode as a
983     // heuristic indicator of whether to pretenure or not, we trigger
984     // deoptimization here to take advantage of pre-tenuring as soon as
985     // possible.
986     if (FLAG_pretenuring) {
987       isolate_->stack_guard()->FullDeopt();
988     }
989   } else if (new_space_high_promotion_mode_active_ &&
990       IsStableOrDecreasingSurvivalTrend() &&
991       IsLowSurvivalRate()) {
992     // Decreasing low survival rates might indicate that the above high
993     // promotion mode is over and we should allow the young generation
994     // to grow again.
995     SetNewSpaceHighPromotionModeActive(false);
996     if (FLAG_trace_gc) {
997       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
998                new_space_.MaximumCapacity() / MB);
999     }
1000     // Trigger deoptimization here to turn off pre-tenuring as soon as
1001     // possible.
1002     if (FLAG_pretenuring) {
1003       isolate_->stack_guard()->FullDeopt();
1004     }
1005   }
1006
1007   if (new_space_high_promotion_mode_active_ &&
1008       new_space_.Capacity() > new_space_.InitialCapacity()) {
1009     new_space_.Shrink();
1010   }
1011
1012   isolate_->counters()->objs_since_last_young()->Set(0);
1013
1014   // Callbacks that fire after this point might trigger nested GCs and
1015   // restart incremental marking, the assertion can't be moved down.
1016   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1017
1018   gc_post_processing_depth_++;
1019   { AllowHeapAllocation allow_allocation;
1020     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1021     next_gc_likely_to_collect_more =
1022         isolate_->global_handles()->PostGarbageCollectionProcessing(
1023             collector, tracer);
1024   }
1025   gc_post_processing_depth_--;
1026
1027   // Update relocatables.
1028   Relocatable::PostGarbageCollectionProcessing();
1029
1030   if (collector == MARK_COMPACTOR) {
1031     // Register the amount of external allocated memory.
1032     amount_of_external_allocated_memory_at_last_global_gc_ =
1033         amount_of_external_allocated_memory_;
1034   }
1035
1036   {
1037     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1038     VMState<EXTERNAL> state(isolate_);
1039     HandleScope handle_scope(isolate_);
1040     CallGCEpilogueCallbacks(gc_type);
1041   }
1042
1043 #ifdef VERIFY_HEAP
1044   if (FLAG_verify_heap) {
1045     VerifyStringTable();
1046   }
1047 #endif
1048
1049   return next_gc_likely_to_collect_more;
1050 }
1051
1052
1053 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1054   if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1055     global_gc_prologue_callback_();
1056   }
1057   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1058     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1059       gc_prologue_callbacks_[i].callback(gc_type, flags);
1060     }
1061   }
1062 }
1063
1064
1065 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1066   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1067     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1068       gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1069     }
1070   }
1071   if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1072     global_gc_epilogue_callback_();
1073   }
1074 }
1075
1076
1077 void Heap::MarkCompact(GCTracer* tracer) {
1078   gc_state_ = MARK_COMPACT;
1079   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1080
1081   mark_compact_collector_.Prepare(tracer);
1082
1083   ms_count_++;
1084   tracer->set_full_gc_count(ms_count_);
1085
1086   MarkCompactPrologue();
1087
1088   mark_compact_collector_.CollectGarbage();
1089
1090   LOG(isolate_, ResourceEvent("markcompact", "end"));
1091
1092   gc_state_ = NOT_IN_GC;
1093
1094   isolate_->counters()->objs_since_last_full()->Set(0);
1095
1096   contexts_disposed_ = 0;
1097
1098   flush_monomorphic_ics_ = false;
1099 }
1100
1101
1102 void Heap::MarkCompactPrologue() {
1103   // At any old GC clear the keyed lookup cache to enable collection of unused
1104   // maps.
1105   isolate_->keyed_lookup_cache()->Clear();
1106   isolate_->context_slot_cache()->Clear();
1107   isolate_->descriptor_lookup_cache()->Clear();
1108   RegExpResultsCache::Clear(string_split_cache());
1109   RegExpResultsCache::Clear(regexp_multiple_cache());
1110
1111   isolate_->compilation_cache()->MarkCompactPrologue();
1112
1113   CompletelyClearInstanceofCache();
1114
1115   FlushNumberStringCache();
1116   if (FLAG_cleanup_code_caches_at_gc) {
1117     polymorphic_code_cache()->set_cache(undefined_value());
1118   }
1119
1120   ClearNormalizedMapCaches();
1121 }
1122
1123
1124 // Helper class for copying HeapObjects
1125 class ScavengeVisitor: public ObjectVisitor {
1126  public:
1127   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1128
1129   void VisitPointer(Object** p) { ScavengePointer(p); }
1130
1131   void VisitPointers(Object** start, Object** end) {
1132     // Copy all HeapObject pointers in [start, end)
1133     for (Object** p = start; p < end; p++) ScavengePointer(p);
1134   }
1135
1136  private:
1137   void ScavengePointer(Object** p) {
1138     Object* object = *p;
1139     if (!heap_->InNewSpace(object)) return;
1140     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1141                          reinterpret_cast<HeapObject*>(object));
1142   }
1143
1144   Heap* heap_;
1145 };
1146
1147
1148 #ifdef VERIFY_HEAP
1149 // Visitor class to verify pointers in code or data space do not point into
1150 // new space.
1151 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1152  public:
1153   void VisitPointers(Object** start, Object**end) {
1154     for (Object** current = start; current < end; current++) {
1155       if ((*current)->IsHeapObject()) {
1156         CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1157       }
1158     }
1159   }
1160 };
1161
1162
1163 static void VerifyNonPointerSpacePointers() {
1164   // Verify that there are no pointers to new space in spaces where we
1165   // do not expect them.
1166   VerifyNonPointerSpacePointersVisitor v;
1167   HeapObjectIterator code_it(HEAP->code_space());
1168   for (HeapObject* object = code_it.Next();
1169        object != NULL; object = code_it.Next())
1170     object->Iterate(&v);
1171
1172   // The old data space was normally swept conservatively so that the iterator
1173   // doesn't work, so we normally skip the next bit.
1174   if (!HEAP->old_data_space()->was_swept_conservatively()) {
1175     HeapObjectIterator data_it(HEAP->old_data_space());
1176     for (HeapObject* object = data_it.Next();
1177          object != NULL; object = data_it.Next())
1178       object->Iterate(&v);
1179   }
1180 }
1181 #endif  // VERIFY_HEAP
1182
1183
1184 void Heap::CheckNewSpaceExpansionCriteria() {
1185   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1186       survived_since_last_expansion_ > new_space_.Capacity() &&
1187       !new_space_high_promotion_mode_active_) {
1188     // Grow the size of new space if there is room to grow, enough data
1189     // has survived scavenge since the last expansion and we are not in
1190     // high promotion mode.
1191     new_space_.Grow();
1192     survived_since_last_expansion_ = 0;
1193   }
1194 }
1195
1196
1197 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1198   return heap->InNewSpace(*p) &&
1199       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1200 }
1201
1202
1203 void Heap::ScavengeStoreBufferCallback(
1204     Heap* heap,
1205     MemoryChunk* page,
1206     StoreBufferEvent event) {
1207   heap->store_buffer_rebuilder_.Callback(page, event);
1208 }
1209
1210
1211 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1212   if (event == kStoreBufferStartScanningPagesEvent) {
1213     start_of_current_page_ = NULL;
1214     current_page_ = NULL;
1215   } else if (event == kStoreBufferScanningPageEvent) {
1216     if (current_page_ != NULL) {
1217       // If this page already overflowed the store buffer during this iteration.
1218       if (current_page_->scan_on_scavenge()) {
1219         // Then we should wipe out the entries that have been added for it.
1220         store_buffer_->SetTop(start_of_current_page_);
1221       } else if (store_buffer_->Top() - start_of_current_page_ >=
1222                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1223         // Did we find too many pointers in the previous page?  The heuristic is
1224         // that no page can take more then 1/5 the remaining slots in the store
1225         // buffer.
1226         current_page_->set_scan_on_scavenge(true);
1227         store_buffer_->SetTop(start_of_current_page_);
1228       } else {
1229         // In this case the page we scanned took a reasonable number of slots in
1230         // the store buffer.  It has now been rehabilitated and is no longer
1231         // marked scan_on_scavenge.
1232         ASSERT(!current_page_->scan_on_scavenge());
1233       }
1234     }
1235     start_of_current_page_ = store_buffer_->Top();
1236     current_page_ = page;
1237   } else if (event == kStoreBufferFullEvent) {
1238     // The current page overflowed the store buffer again.  Wipe out its entries
1239     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1240     // several times while scanning.
1241     if (current_page_ == NULL) {
1242       // Store Buffer overflowed while scanning promoted objects.  These are not
1243       // in any particular page, though they are likely to be clustered by the
1244       // allocation routines.
1245       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1246     } else {
1247       // Store Buffer overflowed while scanning a particular old space page for
1248       // pointers to new space.
1249       ASSERT(current_page_ == page);
1250       ASSERT(page != NULL);
1251       current_page_->set_scan_on_scavenge(true);
1252       ASSERT(start_of_current_page_ != store_buffer_->Top());
1253       store_buffer_->SetTop(start_of_current_page_);
1254     }
1255   } else {
1256     UNREACHABLE();
1257   }
1258 }
1259
1260
1261 void PromotionQueue::Initialize() {
1262   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1263   // entries (where each is a pair of intptr_t). This allows us to simplify
1264   // the test fpr when to switch pages.
1265   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1266          == 0);
1267   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1268   front_ = rear_ =
1269       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1270   emergency_stack_ = NULL;
1271   guard_ = false;
1272 }
1273
1274
1275 void PromotionQueue::RelocateQueueHead() {
1276   ASSERT(emergency_stack_ == NULL);
1277
1278   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1279   intptr_t* head_start = rear_;
1280   intptr_t* head_end =
1281       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1282
1283   int entries_count =
1284       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1285
1286   emergency_stack_ = new List<Entry>(2 * entries_count);
1287
1288   while (head_start != head_end) {
1289     int size = static_cast<int>(*(head_start++));
1290     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1291     emergency_stack_->Add(Entry(obj, size));
1292   }
1293   rear_ = head_end;
1294 }
1295
1296
1297 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1298  public:
1299   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1300
1301   virtual Object* RetainAs(Object* object) {
1302     if (!heap_->InFromSpace(object)) {
1303       return object;
1304     }
1305
1306     MapWord map_word = HeapObject::cast(object)->map_word();
1307     if (map_word.IsForwardingAddress()) {
1308       return map_word.ToForwardingAddress();
1309     }
1310     return NULL;
1311   }
1312
1313  private:
1314   Heap* heap_;
1315 };
1316
1317
1318 void Heap::Scavenge() {
1319   RelocationLock relocation_lock(this);
1320
1321 #ifdef VERIFY_HEAP
1322   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1323 #endif
1324
1325   gc_state_ = SCAVENGE;
1326
1327   // Implements Cheney's copying algorithm
1328   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1329
1330   // Clear descriptor cache.
1331   isolate_->descriptor_lookup_cache()->Clear();
1332
1333   // Used for updating survived_since_last_expansion_ at function end.
1334   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1335
1336   CheckNewSpaceExpansionCriteria();
1337
1338   SelectScavengingVisitorsTable();
1339
1340   incremental_marking()->PrepareForScavenge();
1341
1342   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1343   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1344
1345   // Flip the semispaces.  After flipping, to space is empty, from space has
1346   // live objects.
1347   new_space_.Flip();
1348   new_space_.ResetAllocationInfo();
1349
1350   // We need to sweep newly copied objects which can be either in the
1351   // to space or promoted to the old generation.  For to-space
1352   // objects, we treat the bottom of the to space as a queue.  Newly
1353   // copied and unswept objects lie between a 'front' mark and the
1354   // allocation pointer.
1355   //
1356   // Promoted objects can go into various old-generation spaces, and
1357   // can be allocated internally in the spaces (from the free list).
1358   // We treat the top of the to space as a queue of addresses of
1359   // promoted objects.  The addresses of newly promoted and unswept
1360   // objects lie between a 'front' mark and a 'rear' mark that is
1361   // updated as a side effect of promoting an object.
1362   //
1363   // There is guaranteed to be enough room at the top of the to space
1364   // for the addresses of promoted objects: every object promoted
1365   // frees up its size in bytes from the top of the new space, and
1366   // objects are at least one pointer in size.
1367   Address new_space_front = new_space_.ToSpaceStart();
1368   promotion_queue_.Initialize();
1369
1370 #ifdef DEBUG
1371   store_buffer()->Clean();
1372 #endif
1373
1374   ScavengeVisitor scavenge_visitor(this);
1375   // Copy roots.
1376   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1377
1378   // Copy objects reachable from the old generation.
1379   {
1380     StoreBufferRebuildScope scope(this,
1381                                   store_buffer(),
1382                                   &ScavengeStoreBufferCallback);
1383     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1384   }
1385
1386   // Copy objects reachable from simple cells by scavenging cell values
1387   // directly.
1388   HeapObjectIterator cell_iterator(cell_space_);
1389   for (HeapObject* heap_object = cell_iterator.Next();
1390        heap_object != NULL;
1391        heap_object = cell_iterator.Next()) {
1392     if (heap_object->IsCell()) {
1393       Cell* cell = Cell::cast(heap_object);
1394       Address value_address = cell->ValueAddress();
1395       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1396     }
1397   }
1398
1399   // Copy objects reachable from global property cells by scavenging global
1400   // property cell values directly.
1401   HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1402   for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1403        heap_object != NULL;
1404        heap_object = js_global_property_cell_iterator.Next()) {
1405     if (heap_object->IsPropertyCell()) {
1406       PropertyCell* cell = PropertyCell::cast(heap_object);
1407       Address value_address = cell->ValueAddress();
1408       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1409       Address type_address = cell->TypeAddress();
1410       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1411     }
1412   }
1413
1414   // Copy objects reachable from the code flushing candidates list.
1415   MarkCompactCollector* collector = mark_compact_collector();
1416   if (collector->is_code_flushing_enabled()) {
1417     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1418   }
1419
1420   // Scavenge object reachable from the native contexts list directly.
1421   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1422
1423   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1424
1425   while (isolate()->global_handles()->IterateObjectGroups(
1426       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1427     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1428   }
1429   isolate()->global_handles()->RemoveObjectGroups();
1430   isolate()->global_handles()->RemoveImplicitRefGroups();
1431
1432   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1433       &IsUnscavengedHeapObject);
1434   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1435       &scavenge_visitor);
1436   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1437
1438   UpdateNewSpaceReferencesInExternalStringTable(
1439       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1440
1441   promotion_queue_.Destroy();
1442
1443   if (!FLAG_watch_ic_patching) {
1444     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1445   }
1446   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1447
1448   ScavengeWeakObjectRetainer weak_object_retainer(this);
1449   ProcessWeakReferences(&weak_object_retainer);
1450
1451   ASSERT(new_space_front == new_space_.top());
1452
1453   // Set age mark.
1454   new_space_.set_age_mark(new_space_.top());
1455
1456   new_space_.LowerInlineAllocationLimit(
1457       new_space_.inline_allocation_limit_step());
1458
1459   // Update how much has survived scavenge.
1460   IncrementYoungSurvivorsCounter(static_cast<int>(
1461       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1462
1463   LOG(isolate_, ResourceEvent("scavenge", "end"));
1464
1465   gc_state_ = NOT_IN_GC;
1466
1467   scavenges_since_last_idle_round_++;
1468 }
1469
1470
1471 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1472                                                                 Object** p) {
1473   MapWord first_word = HeapObject::cast(*p)->map_word();
1474
1475   if (!first_word.IsForwardingAddress()) {
1476     // Unreachable external string can be finalized.
1477     heap->FinalizeExternalString(String::cast(*p));
1478     return NULL;
1479   }
1480
1481   // String is still reachable.
1482   return String::cast(first_word.ToForwardingAddress());
1483 }
1484
1485
1486 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1487     ExternalStringTableUpdaterCallback updater_func) {
1488 #ifdef VERIFY_HEAP
1489   if (FLAG_verify_heap) {
1490     external_string_table_.Verify();
1491   }
1492 #endif
1493
1494   if (external_string_table_.new_space_strings_.is_empty()) return;
1495
1496   Object** start = &external_string_table_.new_space_strings_[0];
1497   Object** end = start + external_string_table_.new_space_strings_.length();
1498   Object** last = start;
1499
1500   for (Object** p = start; p < end; ++p) {
1501     ASSERT(InFromSpace(*p));
1502     String* target = updater_func(this, p);
1503
1504     if (target == NULL) continue;
1505
1506     ASSERT(target->IsExternalString());
1507
1508     if (InNewSpace(target)) {
1509       // String is still in new space.  Update the table entry.
1510       *last = target;
1511       ++last;
1512     } else {
1513       // String got promoted.  Move it to the old string list.
1514       external_string_table_.AddOldString(target);
1515     }
1516   }
1517
1518   ASSERT(last <= end);
1519   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1520 }
1521
1522
1523 void Heap::UpdateReferencesInExternalStringTable(
1524     ExternalStringTableUpdaterCallback updater_func) {
1525
1526   // Update old space string references.
1527   if (external_string_table_.old_space_strings_.length() > 0) {
1528     Object** start = &external_string_table_.old_space_strings_[0];
1529     Object** end = start + external_string_table_.old_space_strings_.length();
1530     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1531   }
1532
1533   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1534 }
1535
1536
1537 template <class T>
1538 struct WeakListVisitor;
1539
1540
1541 template <class T>
1542 static Object* VisitWeakList(Heap* heap,
1543                              Object* list,
1544                              WeakObjectRetainer* retainer,
1545                              bool record_slots) {
1546   Object* undefined = heap->undefined_value();
1547   Object* head = undefined;
1548   T* tail = NULL;
1549   MarkCompactCollector* collector = heap->mark_compact_collector();
1550   while (list != undefined) {
1551     // Check whether to keep the candidate in the list.
1552     T* candidate = reinterpret_cast<T*>(list);
1553     Object* retained = retainer->RetainAs(list);
1554     if (retained != NULL) {
1555       if (head == undefined) {
1556         // First element in the list.
1557         head = retained;
1558       } else {
1559         // Subsequent elements in the list.
1560         ASSERT(tail != NULL);
1561         WeakListVisitor<T>::SetWeakNext(tail, retained);
1562         if (record_slots) {
1563           Object** next_slot =
1564             HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1565           collector->RecordSlot(next_slot, next_slot, retained);
1566         }
1567       }
1568       // Retained object is new tail.
1569       ASSERT(!retained->IsUndefined());
1570       candidate = reinterpret_cast<T*>(retained);
1571       tail = candidate;
1572
1573
1574       // tail is a live object, visit it.
1575       WeakListVisitor<T>::VisitLiveObject(
1576           heap, tail, retainer, record_slots);
1577     } else {
1578       WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1579     }
1580
1581     // Move to next element in the list.
1582     list = WeakListVisitor<T>::WeakNext(candidate);
1583   }
1584
1585   // Terminate the list if there is one or more elements.
1586   if (tail != NULL) {
1587     WeakListVisitor<T>::SetWeakNext(tail, undefined);
1588   }
1589   return head;
1590 }
1591
1592
1593 template<>
1594 struct WeakListVisitor<JSFunction> {
1595   static void SetWeakNext(JSFunction* function, Object* next) {
1596     function->set_next_function_link(next);
1597   }
1598
1599   static Object* WeakNext(JSFunction* function) {
1600     return function->next_function_link();
1601   }
1602
1603   static int WeakNextOffset() {
1604     return JSFunction::kNextFunctionLinkOffset;
1605   }
1606
1607   static void VisitLiveObject(Heap*, JSFunction*,
1608                               WeakObjectRetainer*, bool) {
1609   }
1610
1611   static void VisitPhantomObject(Heap*, JSFunction*) {
1612   }
1613 };
1614
1615
1616 template<>
1617 struct WeakListVisitor<Context> {
1618   static void SetWeakNext(Context* context, Object* next) {
1619     context->set(Context::NEXT_CONTEXT_LINK,
1620                  next,
1621                  UPDATE_WRITE_BARRIER);
1622   }
1623
1624   static Object* WeakNext(Context* context) {
1625     return context->get(Context::NEXT_CONTEXT_LINK);
1626   }
1627
1628   static void VisitLiveObject(Heap* heap,
1629                               Context* context,
1630                               WeakObjectRetainer* retainer,
1631                               bool record_slots) {
1632     // Process the weak list of optimized functions for the context.
1633     Object* function_list_head =
1634         VisitWeakList<JSFunction>(
1635             heap,
1636             context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1637             retainer,
1638             record_slots);
1639     context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
1640                  function_list_head,
1641                  UPDATE_WRITE_BARRIER);
1642     if (record_slots) {
1643       Object** optimized_functions =
1644           HeapObject::RawField(
1645               context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1646       heap->mark_compact_collector()->RecordSlot(
1647           optimized_functions, optimized_functions, function_list_head);
1648     }
1649   }
1650
1651   static void VisitPhantomObject(Heap*, Context*) {
1652   }
1653
1654   static int WeakNextOffset() {
1655     return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1656   }
1657 };
1658
1659
1660 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1661   // We don't record weak slots during marking or scavenges.
1662   // Instead we do it once when we complete mark-compact cycle.
1663   // Note that write barrier has no effect if we are already in the middle of
1664   // compacting mark-sweep cycle and we have to record slots manually.
1665   bool record_slots =
1666       gc_state() == MARK_COMPACT &&
1667       mark_compact_collector()->is_compacting();
1668   ProcessArrayBuffers(retainer, record_slots);
1669   ProcessNativeContexts(retainer, record_slots);
1670   ProcessAllocationSites(retainer, record_slots);
1671 }
1672
1673 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1674                                  bool record_slots) {
1675   Object* head =
1676       VisitWeakList<Context>(
1677           this, native_contexts_list(), retainer, record_slots);
1678   // Update the head of the list of contexts.
1679   native_contexts_list_ = head;
1680 }
1681
1682
1683 template<>
1684 struct WeakListVisitor<JSArrayBufferView> {
1685   static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1686     obj->set_weak_next(next);
1687   }
1688
1689   static Object* WeakNext(JSArrayBufferView* obj) {
1690     return obj->weak_next();
1691   }
1692
1693   static void VisitLiveObject(Heap*,
1694                               JSArrayBufferView* obj,
1695                               WeakObjectRetainer* retainer,
1696                               bool record_slots) {}
1697
1698   static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1699
1700   static int WeakNextOffset() {
1701     return JSArrayBufferView::kWeakNextOffset;
1702   }
1703 };
1704
1705
1706 template<>
1707 struct WeakListVisitor<JSArrayBuffer> {
1708   static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1709     obj->set_weak_next(next);
1710   }
1711
1712   static Object* WeakNext(JSArrayBuffer* obj) {
1713     return obj->weak_next();
1714   }
1715
1716   static void VisitLiveObject(Heap* heap,
1717                               JSArrayBuffer* array_buffer,
1718                               WeakObjectRetainer* retainer,
1719                               bool record_slots) {
1720     Object* typed_array_obj =
1721         VisitWeakList<JSArrayBufferView>(
1722             heap,
1723             array_buffer->weak_first_view(),
1724             retainer, record_slots);
1725     array_buffer->set_weak_first_view(typed_array_obj);
1726     if (typed_array_obj != heap->undefined_value() && record_slots) {
1727       Object** slot = HeapObject::RawField(
1728           array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1729       heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1730     }
1731   }
1732
1733   static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1734     Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1735   }
1736
1737   static int WeakNextOffset() {
1738     return JSArrayBuffer::kWeakNextOffset;
1739   }
1740 };
1741
1742
1743 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1744                                bool record_slots) {
1745   Object* array_buffer_obj =
1746       VisitWeakList<JSArrayBuffer>(this,
1747                                    array_buffers_list(),
1748                                    retainer, record_slots);
1749   set_array_buffers_list(array_buffer_obj);
1750 }
1751
1752
1753 void Heap::TearDownArrayBuffers() {
1754   Object* undefined = undefined_value();
1755   for (Object* o = array_buffers_list(); o != undefined;) {
1756     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1757     Runtime::FreeArrayBuffer(isolate(), buffer);
1758     o = buffer->weak_next();
1759   }
1760   array_buffers_list_ = undefined;
1761 }
1762
1763
1764 template<>
1765 struct WeakListVisitor<AllocationSite> {
1766   static void SetWeakNext(AllocationSite* obj, Object* next) {
1767     obj->set_weak_next(next);
1768   }
1769
1770   static Object* WeakNext(AllocationSite* obj) {
1771     return obj->weak_next();
1772   }
1773
1774   static void VisitLiveObject(Heap* heap,
1775                               AllocationSite* array_buffer,
1776                               WeakObjectRetainer* retainer,
1777                               bool record_slots) {}
1778
1779   static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1780
1781   static int WeakNextOffset() {
1782     return AllocationSite::kWeakNextOffset;
1783   }
1784 };
1785
1786
1787 void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1788                                   bool record_slots) {
1789   Object* allocation_site_obj =
1790       VisitWeakList<AllocationSite>(this,
1791                                     allocation_sites_list(),
1792                                     retainer, record_slots);
1793   set_allocation_sites_list(allocation_site_obj);
1794 }
1795
1796
1797 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1798   DisallowHeapAllocation no_allocation;
1799
1800   // Both the external string table and the string table may contain
1801   // external strings, but neither lists them exhaustively, nor is the
1802   // intersection set empty.  Therefore we iterate over the external string
1803   // table first, ignoring internalized strings, and then over the
1804   // internalized string table.
1805
1806   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1807    public:
1808     explicit ExternalStringTableVisitorAdapter(
1809         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1810     virtual void VisitPointers(Object** start, Object** end) {
1811       for (Object** p = start; p < end; p++) {
1812         // Visit non-internalized external strings,
1813         // since internalized strings are listed in the string table.
1814         if (!(*p)->IsInternalizedString()) {
1815           ASSERT((*p)->IsExternalString());
1816           visitor_->VisitExternalString(Utils::ToLocal(
1817               Handle<String>(String::cast(*p))));
1818         }
1819       }
1820     }
1821    private:
1822     v8::ExternalResourceVisitor* visitor_;
1823   } external_string_table_visitor(visitor);
1824
1825   external_string_table_.Iterate(&external_string_table_visitor);
1826
1827   class StringTableVisitorAdapter : public ObjectVisitor {
1828    public:
1829     explicit StringTableVisitorAdapter(
1830         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1831     virtual void VisitPointers(Object** start, Object** end) {
1832       for (Object** p = start; p < end; p++) {
1833         if ((*p)->IsExternalString()) {
1834           ASSERT((*p)->IsInternalizedString());
1835           visitor_->VisitExternalString(Utils::ToLocal(
1836               Handle<String>(String::cast(*p))));
1837         }
1838       }
1839     }
1840    private:
1841     v8::ExternalResourceVisitor* visitor_;
1842   } string_table_visitor(visitor);
1843
1844   string_table()->IterateElements(&string_table_visitor);
1845 }
1846
1847
1848 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1849  public:
1850   static inline void VisitPointer(Heap* heap, Object** p) {
1851     Object* object = *p;
1852     if (!heap->InNewSpace(object)) return;
1853     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1854                          reinterpret_cast<HeapObject*>(object));
1855   }
1856 };
1857
1858
1859 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1860                          Address new_space_front) {
1861   do {
1862     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1863     // The addresses new_space_front and new_space_.top() define a
1864     // queue of unprocessed copied objects.  Process them until the
1865     // queue is empty.
1866     while (new_space_front != new_space_.top()) {
1867       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1868         HeapObject* object = HeapObject::FromAddress(new_space_front);
1869         new_space_front +=
1870           NewSpaceScavenger::IterateBody(object->map(), object);
1871       } else {
1872         new_space_front =
1873             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1874       }
1875     }
1876
1877     // Promote and process all the to-be-promoted objects.
1878     {
1879       StoreBufferRebuildScope scope(this,
1880                                     store_buffer(),
1881                                     &ScavengeStoreBufferCallback);
1882       while (!promotion_queue()->is_empty()) {
1883         HeapObject* target;
1884         int size;
1885         promotion_queue()->remove(&target, &size);
1886
1887         // Promoted object might be already partially visited
1888         // during old space pointer iteration. Thus we search specificly
1889         // for pointers to from semispace instead of looking for pointers
1890         // to new space.
1891         ASSERT(!target->IsMap());
1892         IterateAndMarkPointersToFromSpace(target->address(),
1893                                           target->address() + size,
1894                                           &ScavengeObject);
1895       }
1896     }
1897
1898     // Take another spin if there are now unswept objects in new space
1899     // (there are currently no more unswept promoted objects).
1900   } while (new_space_front != new_space_.top());
1901
1902   return new_space_front;
1903 }
1904
1905
1906 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1907
1908
1909 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1910                                               HeapObject* object,
1911                                               int size));
1912
1913 static HeapObject* EnsureDoubleAligned(Heap* heap,
1914                                        HeapObject* object,
1915                                        int size) {
1916   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1917     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1918     return HeapObject::FromAddress(object->address() + kPointerSize);
1919   } else {
1920     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1921                                kPointerSize);
1922     return object;
1923   }
1924 }
1925
1926
1927 enum LoggingAndProfiling {
1928   LOGGING_AND_PROFILING_ENABLED,
1929   LOGGING_AND_PROFILING_DISABLED
1930 };
1931
1932
1933 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1934
1935
1936 template<MarksHandling marks_handling,
1937          LoggingAndProfiling logging_and_profiling_mode>
1938 class ScavengingVisitor : public StaticVisitorBase {
1939  public:
1940   static void Initialize() {
1941     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1942     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1943     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1944     table_.Register(kVisitByteArray, &EvacuateByteArray);
1945     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1946     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1947
1948     table_.Register(kVisitNativeContext,
1949                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1950                         template VisitSpecialized<Context::kSize>);
1951
1952     table_.Register(kVisitConsString,
1953                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1954                         template VisitSpecialized<ConsString::kSize>);
1955
1956     table_.Register(kVisitSlicedString,
1957                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1958                         template VisitSpecialized<SlicedString::kSize>);
1959
1960     table_.Register(kVisitSymbol,
1961                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1962                         template VisitSpecialized<Symbol::kSize>);
1963
1964     table_.Register(kVisitSharedFunctionInfo,
1965                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1966                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1967
1968     table_.Register(kVisitJSWeakMap,
1969                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1970                     Visit);
1971
1972     table_.Register(kVisitJSWeakSet,
1973                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1974                     Visit);
1975
1976     table_.Register(kVisitJSArrayBuffer,
1977                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1978                     Visit);
1979
1980     table_.Register(kVisitJSTypedArray,
1981                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1982                     Visit);
1983
1984     table_.Register(kVisitJSDataView,
1985                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1986                     Visit);
1987
1988     table_.Register(kVisitJSRegExp,
1989                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1990                     Visit);
1991
1992     if (marks_handling == IGNORE_MARKS) {
1993       table_.Register(kVisitJSFunction,
1994                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1995                           template VisitSpecialized<JSFunction::kSize>);
1996     } else {
1997       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1998     }
1999
2000     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2001                                    kVisitDataObject,
2002                                    kVisitDataObjectGeneric>();
2003
2004     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2005                                    kVisitJSObject,
2006                                    kVisitJSObjectGeneric>();
2007
2008     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2009                                    kVisitStruct,
2010                                    kVisitStructGeneric>();
2011   }
2012
2013   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2014     return &table_;
2015   }
2016
2017  private:
2018   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
2019   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
2020
2021   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2022     bool should_record = false;
2023 #ifdef DEBUG
2024     should_record = FLAG_heap_stats;
2025 #endif
2026     should_record = should_record || FLAG_log_gc;
2027     if (should_record) {
2028       if (heap->new_space()->Contains(obj)) {
2029         heap->new_space()->RecordAllocation(obj);
2030       } else {
2031         heap->new_space()->RecordPromotion(obj);
2032       }
2033     }
2034   }
2035
2036   // Helper function used by CopyObject to copy a source object to an
2037   // allocated target object and update the forwarding pointer in the source
2038   // object.  Returns the target object.
2039   INLINE(static void MigrateObject(Heap* heap,
2040                                    HeapObject* source,
2041                                    HeapObject* target,
2042                                    int size)) {
2043     // Copy the content of source to target.
2044     heap->CopyBlock(target->address(), source->address(), size);
2045
2046     // Set the forwarding address.
2047     source->set_map_word(MapWord::FromForwardingAddress(target));
2048
2049     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2050       // Update NewSpace stats if necessary.
2051       RecordCopiedObject(heap, target);
2052       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
2053       Isolate* isolate = heap->isolate();
2054       if (isolate->logger()->is_logging_code_events() ||
2055           isolate->cpu_profiler()->is_profiling()) {
2056         if (target->IsSharedFunctionInfo()) {
2057           PROFILE(isolate, SharedFunctionInfoMoveEvent(
2058               source->address(), target->address()));
2059         }
2060       }
2061     }
2062
2063     if (marks_handling == TRANSFER_MARKS) {
2064       if (Marking::TransferColor(source, target)) {
2065         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2066       }
2067     }
2068   }
2069
2070
2071   template<ObjectContents object_contents,
2072            SizeRestriction size_restriction,
2073            int alignment>
2074   static inline void EvacuateObject(Map* map,
2075                                     HeapObject** slot,
2076                                     HeapObject* object,
2077                                     int object_size) {
2078     SLOW_ASSERT((size_restriction != SMALL) ||
2079                 (object_size <= Page::kMaxNonCodeHeapObjectSize));
2080     SLOW_ASSERT(object->Size() == object_size);
2081
2082     int allocation_size = object_size;
2083     if (alignment != kObjectAlignment) {
2084       ASSERT(alignment == kDoubleAlignment);
2085       allocation_size += kPointerSize;
2086     }
2087
2088     Heap* heap = map->GetHeap();
2089     if (heap->ShouldBePromoted(object->address(), object_size)) {
2090       MaybeObject* maybe_result;
2091
2092       if ((size_restriction != SMALL) &&
2093           (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
2094         maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
2095                                                      NOT_EXECUTABLE);
2096       } else {
2097         if (object_contents == DATA_OBJECT) {
2098           maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2099         } else {
2100           maybe_result =
2101               heap->old_pointer_space()->AllocateRaw(allocation_size);
2102         }
2103       }
2104
2105       Object* result = NULL;  // Initialization to please compiler.
2106       if (maybe_result->ToObject(&result)) {
2107         HeapObject* target = HeapObject::cast(result);
2108
2109         if (alignment != kObjectAlignment) {
2110           target = EnsureDoubleAligned(heap, target, allocation_size);
2111         }
2112
2113         // Order is important: slot might be inside of the target if target
2114         // was allocated over a dead object and slot comes from the store
2115         // buffer.
2116         *slot = target;
2117         MigrateObject(heap, object, target, object_size);
2118
2119         if (object_contents == POINTER_OBJECT) {
2120           if (map->instance_type() == JS_FUNCTION_TYPE) {
2121             heap->promotion_queue()->insert(
2122                 target, JSFunction::kNonWeakFieldsEndOffset);
2123           } else {
2124             heap->promotion_queue()->insert(target, object_size);
2125           }
2126         }
2127
2128         heap->tracer()->increment_promoted_objects_size(object_size);
2129         return;
2130       }
2131     }
2132     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2133     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2134     Object* result = allocation->ToObjectUnchecked();
2135     HeapObject* target = HeapObject::cast(result);
2136
2137     if (alignment != kObjectAlignment) {
2138       target = EnsureDoubleAligned(heap, target, allocation_size);
2139     }
2140
2141     // Order is important: slot might be inside of the target if target
2142     // was allocated over a dead object and slot comes from the store
2143     // buffer.
2144     *slot = target;
2145     MigrateObject(heap, object, target, object_size);
2146     return;
2147   }
2148
2149
2150   static inline void EvacuateJSFunction(Map* map,
2151                                         HeapObject** slot,
2152                                         HeapObject* object) {
2153     ObjectEvacuationStrategy<POINTER_OBJECT>::
2154         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2155
2156     HeapObject* target = *slot;
2157     MarkBit mark_bit = Marking::MarkBitFrom(target);
2158     if (Marking::IsBlack(mark_bit)) {
2159       // This object is black and it might not be rescanned by marker.
2160       // We should explicitly record code entry slot for compaction because
2161       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2162       // miss it as it is not HeapObject-tagged.
2163       Address code_entry_slot =
2164           target->address() + JSFunction::kCodeEntryOffset;
2165       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2166       map->GetHeap()->mark_compact_collector()->
2167           RecordCodeEntrySlot(code_entry_slot, code);
2168     }
2169   }
2170
2171
2172   static inline void EvacuateFixedArray(Map* map,
2173                                         HeapObject** slot,
2174                                         HeapObject* object) {
2175     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2176     EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2177                                                  slot,
2178                                                  object,
2179                                                  object_size);
2180   }
2181
2182
2183   static inline void EvacuateFixedDoubleArray(Map* map,
2184                                               HeapObject** slot,
2185                                               HeapObject* object) {
2186     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2187     int object_size = FixedDoubleArray::SizeFor(length);
2188     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2189         map,
2190         slot,
2191         object,
2192         object_size);
2193   }
2194
2195
2196   static inline void EvacuateByteArray(Map* map,
2197                                        HeapObject** slot,
2198                                        HeapObject* object) {
2199     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2200     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2201         map, slot, object, object_size);
2202   }
2203
2204
2205   static inline void EvacuateSeqOneByteString(Map* map,
2206                                             HeapObject** slot,
2207                                             HeapObject* object) {
2208     int object_size = SeqOneByteString::cast(object)->
2209         SeqOneByteStringSize(map->instance_type());
2210     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2211         map, slot, object, object_size);
2212   }
2213
2214
2215   static inline void EvacuateSeqTwoByteString(Map* map,
2216                                               HeapObject** slot,
2217                                               HeapObject* object) {
2218     int object_size = SeqTwoByteString::cast(object)->
2219         SeqTwoByteStringSize(map->instance_type());
2220     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2221         map, slot, object, object_size);
2222   }
2223
2224
2225   static inline bool IsShortcutCandidate(int type) {
2226     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2227   }
2228
2229   static inline void EvacuateShortcutCandidate(Map* map,
2230                                                HeapObject** slot,
2231                                                HeapObject* object) {
2232     ASSERT(IsShortcutCandidate(map->instance_type()));
2233
2234     Heap* heap = map->GetHeap();
2235
2236     if (marks_handling == IGNORE_MARKS &&
2237         ConsString::cast(object)->unchecked_second() ==
2238         heap->empty_string()) {
2239       HeapObject* first =
2240           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2241
2242       *slot = first;
2243
2244       if (!heap->InNewSpace(first)) {
2245         object->set_map_word(MapWord::FromForwardingAddress(first));
2246         return;
2247       }
2248
2249       MapWord first_word = first->map_word();
2250       if (first_word.IsForwardingAddress()) {
2251         HeapObject* target = first_word.ToForwardingAddress();
2252
2253         *slot = target;
2254         object->set_map_word(MapWord::FromForwardingAddress(target));
2255         return;
2256       }
2257
2258       heap->DoScavengeObject(first->map(), slot, first);
2259       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2260       return;
2261     }
2262
2263     int object_size = ConsString::kSize;
2264     EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2265         map, slot, object, object_size);
2266   }
2267
2268   template<ObjectContents object_contents>
2269   class ObjectEvacuationStrategy {
2270    public:
2271     template<int object_size>
2272     static inline void VisitSpecialized(Map* map,
2273                                         HeapObject** slot,
2274                                         HeapObject* object) {
2275       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2276           map, slot, object, object_size);
2277     }
2278
2279     static inline void Visit(Map* map,
2280                              HeapObject** slot,
2281                              HeapObject* object) {
2282       int object_size = map->instance_size();
2283       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2284           map, slot, object, object_size);
2285     }
2286   };
2287
2288   static VisitorDispatchTable<ScavengingCallback> table_;
2289 };
2290
2291
2292 template<MarksHandling marks_handling,
2293          LoggingAndProfiling logging_and_profiling_mode>
2294 VisitorDispatchTable<ScavengingCallback>
2295     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2296
2297
2298 static void InitializeScavengingVisitorsTables() {
2299   ScavengingVisitor<TRANSFER_MARKS,
2300                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2301   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2302   ScavengingVisitor<TRANSFER_MARKS,
2303                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2304   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2305 }
2306
2307
2308 void Heap::SelectScavengingVisitorsTable() {
2309   bool logging_and_profiling =
2310       isolate()->logger()->is_logging() ||
2311       isolate()->cpu_profiler()->is_profiling() ||
2312       (isolate()->heap_profiler() != NULL &&
2313        isolate()->heap_profiler()->is_profiling());
2314
2315   if (!incremental_marking()->IsMarking()) {
2316     if (!logging_and_profiling) {
2317       scavenging_visitors_table_.CopyFrom(
2318           ScavengingVisitor<IGNORE_MARKS,
2319                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2320     } else {
2321       scavenging_visitors_table_.CopyFrom(
2322           ScavengingVisitor<IGNORE_MARKS,
2323                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2324     }
2325   } else {
2326     if (!logging_and_profiling) {
2327       scavenging_visitors_table_.CopyFrom(
2328           ScavengingVisitor<TRANSFER_MARKS,
2329                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2330     } else {
2331       scavenging_visitors_table_.CopyFrom(
2332           ScavengingVisitor<TRANSFER_MARKS,
2333                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2334     }
2335
2336     if (incremental_marking()->IsCompacting()) {
2337       // When compacting forbid short-circuiting of cons-strings.
2338       // Scavenging code relies on the fact that new space object
2339       // can't be evacuated into evacuation candidate but
2340       // short-circuiting violates this assumption.
2341       scavenging_visitors_table_.Register(
2342           StaticVisitorBase::kVisitShortcutCandidate,
2343           scavenging_visitors_table_.GetVisitorById(
2344               StaticVisitorBase::kVisitConsString));
2345     }
2346   }
2347 }
2348
2349
2350 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2351   SLOW_ASSERT(HEAP->InFromSpace(object));
2352   MapWord first_word = object->map_word();
2353   SLOW_ASSERT(!first_word.IsForwardingAddress());
2354   Map* map = first_word.ToMap();
2355   map->GetHeap()->DoScavengeObject(map, p, object);
2356 }
2357
2358
2359 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2360                                       int instance_size) {
2361   Object* result;
2362   MaybeObject* maybe_result = AllocateRawMap();
2363   if (!maybe_result->ToObject(&result)) return maybe_result;
2364
2365   // Map::cast cannot be used due to uninitialized map field.
2366   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2367   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2368   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2369   reinterpret_cast<Map*>(result)->set_visitor_id(
2370         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2371   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2372   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2373   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2374   reinterpret_cast<Map*>(result)->set_bit_field(0);
2375   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2376   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2377                    Map::OwnsDescriptors::encode(true);
2378   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2379   return result;
2380 }
2381
2382
2383 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2384                                int instance_size,
2385                                ElementsKind elements_kind) {
2386   Object* result;
2387   MaybeObject* maybe_result = AllocateRawMap();
2388   if (!maybe_result->To(&result)) return maybe_result;
2389
2390   Map* map = reinterpret_cast<Map*>(result);
2391   map->set_map_no_write_barrier(meta_map());
2392   map->set_instance_type(instance_type);
2393   map->set_visitor_id(
2394       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2395   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2396   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2397   map->set_instance_size(instance_size);
2398   map->set_inobject_properties(0);
2399   map->set_pre_allocated_property_fields(0);
2400   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2401   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2402                           SKIP_WRITE_BARRIER);
2403   map->init_back_pointer(undefined_value());
2404   map->set_unused_property_fields(0);
2405   map->set_instance_descriptors(empty_descriptor_array());
2406   map->set_bit_field(0);
2407   map->set_bit_field2(1 << Map::kIsExtensible);
2408   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2409                    Map::OwnsDescriptors::encode(true);
2410   map->set_bit_field3(bit_field3);
2411   map->set_elements_kind(elements_kind);
2412
2413   return map;
2414 }
2415
2416
2417 MaybeObject* Heap::AllocateCodeCache() {
2418   CodeCache* code_cache;
2419   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2420     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2421   }
2422   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2423   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2424   return code_cache;
2425 }
2426
2427
2428 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2429   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2430 }
2431
2432
2433 MaybeObject* Heap::AllocateAccessorPair() {
2434   AccessorPair* accessors;
2435   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2436     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2437   }
2438   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2439   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2440   return accessors;
2441 }
2442
2443
2444 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2445   TypeFeedbackInfo* info;
2446   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2447     if (!maybe_info->To(&info)) return maybe_info;
2448   }
2449   info->initialize_storage();
2450   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2451                                 SKIP_WRITE_BARRIER);
2452   return info;
2453 }
2454
2455
2456 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2457   AliasedArgumentsEntry* entry;
2458   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2459     if (!maybe_entry->To(&entry)) return maybe_entry;
2460   }
2461   entry->set_aliased_context_slot(aliased_context_slot);
2462   return entry;
2463 }
2464
2465
2466 const Heap::StringTypeTable Heap::string_type_table[] = {
2467 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2468   {type, size, k##camel_name##MapRootIndex},
2469   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2470 #undef STRING_TYPE_ELEMENT
2471 };
2472
2473
2474 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2475 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2476   {contents, k##name##RootIndex},
2477   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2478 #undef CONSTANT_STRING_ELEMENT
2479 };
2480
2481
2482 const Heap::StructTable Heap::struct_table[] = {
2483 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2484   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2485   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2486 #undef STRUCT_TABLE_ELEMENT
2487 };
2488
2489
2490 bool Heap::CreateInitialMaps() {
2491   Object* obj;
2492   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2493     if (!maybe_obj->ToObject(&obj)) return false;
2494   }
2495   // Map::cast cannot be used due to uninitialized map field.
2496   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2497   set_meta_map(new_meta_map);
2498   new_meta_map->set_map(new_meta_map);
2499
2500   { MaybeObject* maybe_obj =
2501         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2502     if (!maybe_obj->ToObject(&obj)) return false;
2503   }
2504   set_fixed_array_map(Map::cast(obj));
2505
2506   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2507     if (!maybe_obj->ToObject(&obj)) return false;
2508   }
2509   set_oddball_map(Map::cast(obj));
2510
2511   // Allocate the empty array.
2512   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2513     if (!maybe_obj->ToObject(&obj)) return false;
2514   }
2515   set_empty_fixed_array(FixedArray::cast(obj));
2516
2517   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2518     if (!maybe_obj->ToObject(&obj)) return false;
2519   }
2520   set_null_value(Oddball::cast(obj));
2521   Oddball::cast(obj)->set_kind(Oddball::kNull);
2522
2523   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2524     if (!maybe_obj->ToObject(&obj)) return false;
2525   }
2526   set_undefined_value(Oddball::cast(obj));
2527   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2528   ASSERT(!InNewSpace(undefined_value()));
2529
2530   // Allocate the empty descriptor array.
2531   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2532     if (!maybe_obj->ToObject(&obj)) return false;
2533   }
2534   set_empty_descriptor_array(DescriptorArray::cast(obj));
2535
2536   // Fix the instance_descriptors for the existing maps.
2537   meta_map()->set_code_cache(empty_fixed_array());
2538   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2539   meta_map()->init_back_pointer(undefined_value());
2540   meta_map()->set_instance_descriptors(empty_descriptor_array());
2541
2542   fixed_array_map()->set_code_cache(empty_fixed_array());
2543   fixed_array_map()->set_dependent_code(
2544       DependentCode::cast(empty_fixed_array()));
2545   fixed_array_map()->init_back_pointer(undefined_value());
2546   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2547
2548   oddball_map()->set_code_cache(empty_fixed_array());
2549   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2550   oddball_map()->init_back_pointer(undefined_value());
2551   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2552
2553   // Fix prototype object for existing maps.
2554   meta_map()->set_prototype(null_value());
2555   meta_map()->set_constructor(null_value());
2556
2557   fixed_array_map()->set_prototype(null_value());
2558   fixed_array_map()->set_constructor(null_value());
2559
2560   oddball_map()->set_prototype(null_value());
2561   oddball_map()->set_constructor(null_value());
2562
2563   { MaybeObject* maybe_obj =
2564         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2565     if (!maybe_obj->ToObject(&obj)) return false;
2566   }
2567   set_fixed_cow_array_map(Map::cast(obj));
2568   ASSERT(fixed_array_map() != fixed_cow_array_map());
2569
2570   { MaybeObject* maybe_obj =
2571         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2572     if (!maybe_obj->ToObject(&obj)) return false;
2573   }
2574   set_scope_info_map(Map::cast(obj));
2575
2576   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2577     if (!maybe_obj->ToObject(&obj)) return false;
2578   }
2579   set_heap_number_map(Map::cast(obj));
2580
2581   { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2582     if (!maybe_obj->ToObject(&obj)) return false;
2583   }
2584   set_symbol_map(Map::cast(obj));
2585
2586   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2587     if (!maybe_obj->ToObject(&obj)) return false;
2588   }
2589   set_foreign_map(Map::cast(obj));
2590
2591   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2592     const StringTypeTable& entry = string_type_table[i];
2593     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2594       if (!maybe_obj->ToObject(&obj)) return false;
2595     }
2596     roots_[entry.index] = Map::cast(obj);
2597   }
2598
2599   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2600     if (!maybe_obj->ToObject(&obj)) return false;
2601   }
2602   set_undetectable_string_map(Map::cast(obj));
2603   Map::cast(obj)->set_is_undetectable();
2604
2605   { MaybeObject* maybe_obj =
2606         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2607     if (!maybe_obj->ToObject(&obj)) return false;
2608   }
2609   set_undetectable_ascii_string_map(Map::cast(obj));
2610   Map::cast(obj)->set_is_undetectable();
2611
2612   { MaybeObject* maybe_obj =
2613         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2614     if (!maybe_obj->ToObject(&obj)) return false;
2615   }
2616   set_fixed_double_array_map(Map::cast(obj));
2617
2618   { MaybeObject* maybe_obj =
2619         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2620     if (!maybe_obj->ToObject(&obj)) return false;
2621   }
2622   set_byte_array_map(Map::cast(obj));
2623
2624   { MaybeObject* maybe_obj =
2625         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2626     if (!maybe_obj->ToObject(&obj)) return false;
2627   }
2628   set_free_space_map(Map::cast(obj));
2629
2630   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2631     if (!maybe_obj->ToObject(&obj)) return false;
2632   }
2633   set_empty_byte_array(ByteArray::cast(obj));
2634
2635   { MaybeObject* maybe_obj =
2636         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2637     if (!maybe_obj->ToObject(&obj)) return false;
2638   }
2639   set_external_pixel_array_map(Map::cast(obj));
2640
2641   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2642                                          ExternalArray::kAlignedSize);
2643     if (!maybe_obj->ToObject(&obj)) return false;
2644   }
2645   set_external_byte_array_map(Map::cast(obj));
2646
2647   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2648                                          ExternalArray::kAlignedSize);
2649     if (!maybe_obj->ToObject(&obj)) return false;
2650   }
2651   set_external_unsigned_byte_array_map(Map::cast(obj));
2652
2653   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2654                                          ExternalArray::kAlignedSize);
2655     if (!maybe_obj->ToObject(&obj)) return false;
2656   }
2657   set_external_short_array_map(Map::cast(obj));
2658
2659   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2660                                          ExternalArray::kAlignedSize);
2661     if (!maybe_obj->ToObject(&obj)) return false;
2662   }
2663   set_external_unsigned_short_array_map(Map::cast(obj));
2664
2665   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2666                                          ExternalArray::kAlignedSize);
2667     if (!maybe_obj->ToObject(&obj)) return false;
2668   }
2669   set_external_int_array_map(Map::cast(obj));
2670
2671   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2672                                          ExternalArray::kAlignedSize);
2673     if (!maybe_obj->ToObject(&obj)) return false;
2674   }
2675   set_external_unsigned_int_array_map(Map::cast(obj));
2676
2677   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2678                                          ExternalArray::kAlignedSize);
2679     if (!maybe_obj->ToObject(&obj)) return false;
2680   }
2681   set_external_float_array_map(Map::cast(obj));
2682
2683   { MaybeObject* maybe_obj =
2684         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2685     if (!maybe_obj->ToObject(&obj)) return false;
2686   }
2687   set_non_strict_arguments_elements_map(Map::cast(obj));
2688
2689   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2690                                          ExternalArray::kAlignedSize);
2691     if (!maybe_obj->ToObject(&obj)) return false;
2692   }
2693   set_external_double_array_map(Map::cast(obj));
2694
2695   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2696     if (!maybe_obj->ToObject(&obj)) return false;
2697   }
2698   set_empty_external_byte_array(ExternalArray::cast(obj));
2699
2700   { MaybeObject* maybe_obj =
2701         AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2702     if (!maybe_obj->ToObject(&obj)) return false;
2703   }
2704   set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2705
2706   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2707     if (!maybe_obj->ToObject(&obj)) return false;
2708   }
2709   set_empty_external_short_array(ExternalArray::cast(obj));
2710
2711   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2712       kExternalUnsignedShortArray);
2713     if (!maybe_obj->ToObject(&obj)) return false;
2714   }
2715   set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2716
2717   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2718     if (!maybe_obj->ToObject(&obj)) return false;
2719   }
2720   set_empty_external_int_array(ExternalArray::cast(obj));
2721
2722   { MaybeObject* maybe_obj =
2723         AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2724     if (!maybe_obj->ToObject(&obj)) return false;
2725   }
2726   set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2727
2728   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2729     if (!maybe_obj->ToObject(&obj)) return false;
2730   }
2731   set_empty_external_float_array(ExternalArray::cast(obj));
2732
2733   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2734     if (!maybe_obj->ToObject(&obj)) return false;
2735   }
2736   set_empty_external_double_array(ExternalArray::cast(obj));
2737
2738   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2739     if (!maybe_obj->ToObject(&obj)) return false;
2740   }
2741   set_empty_external_pixel_array(ExternalArray::cast(obj));
2742
2743   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2744     if (!maybe_obj->ToObject(&obj)) return false;
2745   }
2746   set_code_map(Map::cast(obj));
2747
2748   { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2749     if (!maybe_obj->ToObject(&obj)) return false;
2750   }
2751   set_cell_map(Map::cast(obj));
2752
2753   { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2754                                          PropertyCell::kSize);
2755     if (!maybe_obj->ToObject(&obj)) return false;
2756   }
2757   set_global_property_cell_map(Map::cast(obj));
2758
2759   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2760     if (!maybe_obj->ToObject(&obj)) return false;
2761   }
2762   set_one_pointer_filler_map(Map::cast(obj));
2763
2764   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2765     if (!maybe_obj->ToObject(&obj)) return false;
2766   }
2767   set_two_pointer_filler_map(Map::cast(obj));
2768
2769   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2770     const StructTable& entry = struct_table[i];
2771     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2772       if (!maybe_obj->ToObject(&obj)) return false;
2773     }
2774     roots_[entry.index] = Map::cast(obj);
2775   }
2776
2777   { MaybeObject* maybe_obj =
2778         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2779     if (!maybe_obj->ToObject(&obj)) return false;
2780   }
2781   set_hash_table_map(Map::cast(obj));
2782
2783   { MaybeObject* maybe_obj =
2784         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2785     if (!maybe_obj->ToObject(&obj)) return false;
2786   }
2787   set_function_context_map(Map::cast(obj));
2788
2789   { MaybeObject* maybe_obj =
2790         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2791     if (!maybe_obj->ToObject(&obj)) return false;
2792   }
2793   set_catch_context_map(Map::cast(obj));
2794
2795   { MaybeObject* maybe_obj =
2796         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2797     if (!maybe_obj->ToObject(&obj)) return false;
2798   }
2799   set_with_context_map(Map::cast(obj));
2800
2801   { MaybeObject* maybe_obj =
2802         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2803     if (!maybe_obj->ToObject(&obj)) return false;
2804   }
2805   set_block_context_map(Map::cast(obj));
2806
2807   { MaybeObject* maybe_obj =
2808         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2809     if (!maybe_obj->ToObject(&obj)) return false;
2810   }
2811   set_module_context_map(Map::cast(obj));
2812
2813   { MaybeObject* maybe_obj =
2814         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2815     if (!maybe_obj->ToObject(&obj)) return false;
2816   }
2817   set_global_context_map(Map::cast(obj));
2818
2819   { MaybeObject* maybe_obj =
2820         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2821     if (!maybe_obj->ToObject(&obj)) return false;
2822   }
2823   Map* native_context_map = Map::cast(obj);
2824   native_context_map->set_dictionary_map(true);
2825   native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2826   set_native_context_map(native_context_map);
2827
2828   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2829                                          SharedFunctionInfo::kAlignedSize);
2830     if (!maybe_obj->ToObject(&obj)) return false;
2831   }
2832   set_shared_function_info_map(Map::cast(obj));
2833
2834   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2835                                          JSMessageObject::kSize);
2836     if (!maybe_obj->ToObject(&obj)) return false;
2837   }
2838   set_message_object_map(Map::cast(obj));
2839
2840   Map* external_map;
2841   { MaybeObject* maybe_obj =
2842         AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2843     if (!maybe_obj->To(&external_map)) return false;
2844   }
2845   external_map->set_is_extensible(false);
2846   set_external_map(external_map);
2847
2848   ASSERT(!InNewSpace(empty_fixed_array()));
2849   return true;
2850 }
2851
2852
2853 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2854   // Statically ensure that it is safe to allocate heap numbers in paged
2855   // spaces.
2856   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2857   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2858
2859   Object* result;
2860   { MaybeObject* maybe_result =
2861         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2862     if (!maybe_result->ToObject(&result)) return maybe_result;
2863   }
2864
2865   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2866   HeapNumber::cast(result)->set_value(value);
2867   return result;
2868 }
2869
2870
2871 MaybeObject* Heap::AllocateHeapNumber(double value) {
2872   // Use general version, if we're forced to always allocate.
2873   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2874
2875   // This version of AllocateHeapNumber is optimized for
2876   // allocation in new space.
2877   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2878   Object* result;
2879   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2880     if (!maybe_result->ToObject(&result)) return maybe_result;
2881   }
2882   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2883   HeapNumber::cast(result)->set_value(value);
2884   return result;
2885 }
2886
2887
2888 MaybeObject* Heap::AllocateCell(Object* value) {
2889   Object* result;
2890   { MaybeObject* maybe_result = AllocateRawCell();
2891     if (!maybe_result->ToObject(&result)) return maybe_result;
2892   }
2893   HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2894   Cell::cast(result)->set_value(value);
2895   return result;
2896 }
2897
2898
2899 MaybeObject* Heap::AllocatePropertyCell(Object* value) {
2900   Object* result;
2901   MaybeObject* maybe_result = AllocateRawPropertyCell();
2902   if (!maybe_result->ToObject(&result)) return maybe_result;
2903
2904   HeapObject::cast(result)->set_map_no_write_barrier(
2905       global_property_cell_map());
2906   PropertyCell* cell = PropertyCell::cast(result);
2907   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2908                            SKIP_WRITE_BARRIER);
2909   cell->set_value(value);
2910   cell->set_type(Type::None());
2911   maybe_result = cell->SetValueInferType(value);
2912   if (maybe_result->IsFailure()) return maybe_result;
2913   return result;
2914 }
2915
2916
2917 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2918   Box* result;
2919   MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2920   if (!maybe_result->To(&result)) return maybe_result;
2921   result->set_value(value);
2922   return result;
2923 }
2924
2925
2926 MaybeObject* Heap::AllocateAllocationSite() {
2927   Object* result;
2928   MaybeObject* maybe_result = Allocate(allocation_site_map(),
2929                                        OLD_POINTER_SPACE);
2930   if (!maybe_result->ToObject(&result)) return maybe_result;
2931   AllocationSite* site = AllocationSite::cast(result);
2932   site->Initialize();
2933
2934   // Link the site
2935   site->set_weak_next(allocation_sites_list());
2936   set_allocation_sites_list(site);
2937   return result;
2938 }
2939
2940
2941 MaybeObject* Heap::CreateOddball(const char* to_string,
2942                                  Object* to_number,
2943                                  byte kind) {
2944   Object* result;
2945   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2946     if (!maybe_result->ToObject(&result)) return maybe_result;
2947   }
2948   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2949 }
2950
2951
2952 bool Heap::CreateApiObjects() {
2953   Object* obj;
2954
2955   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2956     if (!maybe_obj->ToObject(&obj)) return false;
2957   }
2958   // Don't use Smi-only elements optimizations for objects with the neander
2959   // map. There are too many cases where element values are set directly with a
2960   // bottleneck to trap the Smi-only -> fast elements transition, and there
2961   // appears to be no benefit for optimize this case.
2962   Map* new_neander_map = Map::cast(obj);
2963   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2964   set_neander_map(new_neander_map);
2965
2966   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2967     if (!maybe_obj->ToObject(&obj)) return false;
2968   }
2969   Object* elements;
2970   { MaybeObject* maybe_elements = AllocateFixedArray(2);
2971     if (!maybe_elements->ToObject(&elements)) return false;
2972   }
2973   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2974   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2975   set_message_listeners(JSObject::cast(obj));
2976
2977   return true;
2978 }
2979
2980
2981 void Heap::CreateJSEntryStub() {
2982   JSEntryStub stub;
2983   set_js_entry_code(*stub.GetCode(isolate()));
2984 }
2985
2986
2987 void Heap::CreateJSConstructEntryStub() {
2988   JSConstructEntryStub stub;
2989   set_js_construct_entry_code(*stub.GetCode(isolate()));
2990 }
2991
2992
2993 void Heap::CreateFixedStubs() {
2994   // Here we create roots for fixed stubs. They are needed at GC
2995   // for cooking and uncooking (check out frames.cc).
2996   // The eliminates the need for doing dictionary lookup in the
2997   // stub cache for these stubs.
2998   HandleScope scope(isolate());
2999   // gcc-4.4 has problem generating correct code of following snippet:
3000   // {  JSEntryStub stub;
3001   //    js_entry_code_ = *stub.GetCode();
3002   // }
3003   // {  JSConstructEntryStub stub;
3004   //    js_construct_entry_code_ = *stub.GetCode();
3005   // }
3006   // To workaround the problem, make separate functions without inlining.
3007   Heap::CreateJSEntryStub();
3008   Heap::CreateJSConstructEntryStub();
3009
3010   // Create stubs that should be there, so we don't unexpectedly have to
3011   // create them if we need them during the creation of another stub.
3012   // Stub creation mixes raw pointers and handles in an unsafe manner so
3013   // we cannot create stubs while we are creating stubs.
3014   CodeStub::GenerateStubsAheadOfTime(isolate());
3015 }
3016
3017
3018 bool Heap::CreateInitialObjects() {
3019   Object* obj;
3020
3021   // The -0 value must be set before NumberFromDouble works.
3022   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3023     if (!maybe_obj->ToObject(&obj)) return false;
3024   }
3025   set_minus_zero_value(HeapNumber::cast(obj));
3026   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3027
3028   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3029     if (!maybe_obj->ToObject(&obj)) return false;
3030   }
3031   set_nan_value(HeapNumber::cast(obj));
3032
3033   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3034     if (!maybe_obj->ToObject(&obj)) return false;
3035   }
3036   set_infinity_value(HeapNumber::cast(obj));
3037
3038   // The hole has not been created yet, but we want to put something
3039   // predictable in the gaps in the string table, so lets make that Smi zero.
3040   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3041
3042   // Allocate initial string table.
3043   { MaybeObject* maybe_obj =
3044         StringTable::Allocate(this, kInitialStringTableSize);
3045     if (!maybe_obj->ToObject(&obj)) return false;
3046   }
3047   // Don't use set_string_table() due to asserts.
3048   roots_[kStringTableRootIndex] = obj;
3049
3050   // Finish initializing oddballs after creating the string table.
3051   { MaybeObject* maybe_obj =
3052         undefined_value()->Initialize("undefined",
3053                                       nan_value(),
3054                                       Oddball::kUndefined);
3055     if (!maybe_obj->ToObject(&obj)) return false;
3056   }
3057
3058   // Initialize the null_value.
3059   { MaybeObject* maybe_obj =
3060         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
3061     if (!maybe_obj->ToObject(&obj)) return false;
3062   }
3063
3064   { MaybeObject* maybe_obj = CreateOddball("true",
3065                                            Smi::FromInt(1),
3066                                            Oddball::kTrue);
3067     if (!maybe_obj->ToObject(&obj)) return false;
3068   }
3069   set_true_value(Oddball::cast(obj));
3070
3071   { MaybeObject* maybe_obj = CreateOddball("false",
3072                                            Smi::FromInt(0),
3073                                            Oddball::kFalse);
3074     if (!maybe_obj->ToObject(&obj)) return false;
3075   }
3076   set_false_value(Oddball::cast(obj));
3077
3078   { MaybeObject* maybe_obj = CreateOddball("hole",
3079                                            Smi::FromInt(-1),
3080                                            Oddball::kTheHole);
3081     if (!maybe_obj->ToObject(&obj)) return false;
3082   }
3083   set_the_hole_value(Oddball::cast(obj));
3084
3085   { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3086                                            Smi::FromInt(-1),
3087                                            Oddball::kUninitialized);
3088     if (!maybe_obj->ToObject(&obj)) return false;
3089   }
3090   set_uninitialized_value(Oddball::cast(obj));
3091
3092   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3093                                            Smi::FromInt(-4),
3094                                            Oddball::kArgumentMarker);
3095     if (!maybe_obj->ToObject(&obj)) return false;
3096   }
3097   set_arguments_marker(Oddball::cast(obj));
3098
3099   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3100                                            Smi::FromInt(-2),
3101                                            Oddball::kOther);
3102     if (!maybe_obj->ToObject(&obj)) return false;
3103   }
3104   set_no_interceptor_result_sentinel(obj);
3105
3106   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3107                                            Smi::FromInt(-3),
3108                                            Oddball::kOther);
3109     if (!maybe_obj->ToObject(&obj)) return false;
3110   }
3111   set_termination_exception(obj);
3112
3113   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3114     { MaybeObject* maybe_obj =
3115           InternalizeUtf8String(constant_string_table[i].contents);
3116       if (!maybe_obj->ToObject(&obj)) return false;
3117     }
3118     roots_[constant_string_table[i].index] = String::cast(obj);
3119   }
3120
3121   // Allocate the hidden string which is used to identify the hidden properties
3122   // in JSObjects. The hash code has a special value so that it will not match
3123   // the empty string when searching for the property. It cannot be part of the
3124   // loop above because it needs to be allocated manually with the special
3125   // hash code in place. The hash code for the hidden_string is zero to ensure
3126   // that it will always be at the first entry in property descriptors.
3127   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3128       OneByteVector("", 0), String::kEmptyStringHash);
3129     if (!maybe_obj->ToObject(&obj)) return false;
3130   }
3131   hidden_string_ = String::cast(obj);
3132
3133   // Allocate the code_stubs dictionary. The initial size is set to avoid
3134   // expanding the dictionary during bootstrapping.
3135   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3136     if (!maybe_obj->ToObject(&obj)) return false;
3137   }
3138   set_code_stubs(UnseededNumberDictionary::cast(obj));
3139
3140
3141   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3142   // is set to avoid expanding the dictionary during bootstrapping.
3143   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3144     if (!maybe_obj->ToObject(&obj)) return false;
3145   }
3146   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3147
3148   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3149     if (!maybe_obj->ToObject(&obj)) return false;
3150   }
3151   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3152
3153   set_instanceof_cache_function(Smi::FromInt(0));
3154   set_instanceof_cache_map(Smi::FromInt(0));
3155   set_instanceof_cache_answer(Smi::FromInt(0));
3156
3157   CreateFixedStubs();
3158
3159   // Allocate the dictionary of intrinsic function names.
3160   { MaybeObject* maybe_obj =
3161         NameDictionary::Allocate(this, Runtime::kNumFunctions);
3162     if (!maybe_obj->ToObject(&obj)) return false;
3163   }
3164   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3165                                                                        obj);
3166     if (!maybe_obj->ToObject(&obj)) return false;
3167   }
3168   set_intrinsic_function_names(NameDictionary::cast(obj));
3169
3170   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3171     if (!maybe_obj->ToObject(&obj)) return false;
3172   }
3173   set_number_string_cache(FixedArray::cast(obj));
3174
3175   // Allocate cache for single character one byte strings.
3176   { MaybeObject* maybe_obj =
3177         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3178     if (!maybe_obj->ToObject(&obj)) return false;
3179   }
3180   set_single_character_string_cache(FixedArray::cast(obj));
3181
3182   // Allocate cache for string split.
3183   { MaybeObject* maybe_obj = AllocateFixedArray(
3184       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3185     if (!maybe_obj->ToObject(&obj)) return false;
3186   }
3187   set_string_split_cache(FixedArray::cast(obj));
3188
3189   { MaybeObject* maybe_obj = AllocateFixedArray(
3190       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3191     if (!maybe_obj->ToObject(&obj)) return false;
3192   }
3193   set_regexp_multiple_cache(FixedArray::cast(obj));
3194
3195   // Allocate cache for external strings pointing to native source code.
3196   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3197     if (!maybe_obj->ToObject(&obj)) return false;
3198   }
3199   set_natives_source_cache(FixedArray::cast(obj));
3200
3201   // Allocate object to hold object observation state.
3202   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3203     if (!maybe_obj->ToObject(&obj)) return false;
3204   }
3205   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3206     if (!maybe_obj->ToObject(&obj)) return false;
3207   }
3208   set_observation_state(JSObject::cast(obj));
3209
3210   { MaybeObject* maybe_obj = AllocateSymbol();
3211     if (!maybe_obj->ToObject(&obj)) return false;
3212   }
3213   set_frozen_symbol(Symbol::cast(obj));
3214
3215   { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3216     if (!maybe_obj->ToObject(&obj)) return false;
3217   }
3218   SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3219   set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3220
3221   { MaybeObject* maybe_obj = AllocateSymbol();
3222     if (!maybe_obj->ToObject(&obj)) return false;
3223   }
3224   set_observed_symbol(Symbol::cast(obj));
3225
3226   set_i18n_template_one(the_hole_value());
3227   set_i18n_template_two(the_hole_value());
3228
3229   // Handling of script id generation is in Factory::NewScript.
3230   set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3231
3232   // Initialize keyed lookup cache.
3233   isolate_->keyed_lookup_cache()->Clear();
3234
3235   // Initialize context slot cache.
3236   isolate_->context_slot_cache()->Clear();
3237
3238   // Initialize descriptor cache.
3239   isolate_->descriptor_lookup_cache()->Clear();
3240
3241   // Initialize compilation cache.
3242   isolate_->compilation_cache()->Clear();
3243
3244   return true;
3245 }
3246
3247
3248 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3249   RootListIndex writable_roots[] = {
3250     kStoreBufferTopRootIndex,
3251     kStackLimitRootIndex,
3252     kNumberStringCacheRootIndex,
3253     kInstanceofCacheFunctionRootIndex,
3254     kInstanceofCacheMapRootIndex,
3255     kInstanceofCacheAnswerRootIndex,
3256     kCodeStubsRootIndex,
3257     kNonMonomorphicCacheRootIndex,
3258     kPolymorphicCodeCacheRootIndex,
3259     kLastScriptIdRootIndex,
3260     kEmptyScriptRootIndex,
3261     kRealStackLimitRootIndex,
3262     kArgumentsAdaptorDeoptPCOffsetRootIndex,
3263     kConstructStubDeoptPCOffsetRootIndex,
3264     kGetterStubDeoptPCOffsetRootIndex,
3265     kSetterStubDeoptPCOffsetRootIndex,
3266     kStringTableRootIndex,
3267   };
3268
3269   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3270     if (root_index == writable_roots[i])
3271       return true;
3272   }
3273   return false;
3274 }
3275
3276
3277 Object* RegExpResultsCache::Lookup(Heap* heap,
3278                                    String* key_string,
3279                                    Object* key_pattern,
3280                                    ResultsCacheType type) {
3281   FixedArray* cache;
3282   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3283   if (type == STRING_SPLIT_SUBSTRINGS) {
3284     ASSERT(key_pattern->IsString());
3285     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3286     cache = heap->string_split_cache();
3287   } else {
3288     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3289     ASSERT(key_pattern->IsFixedArray());
3290     cache = heap->regexp_multiple_cache();
3291   }
3292
3293   uint32_t hash = key_string->Hash();
3294   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3295       ~(kArrayEntriesPerCacheEntry - 1));
3296   if (cache->get(index + kStringOffset) == key_string &&
3297       cache->get(index + kPatternOffset) == key_pattern) {
3298     return cache->get(index + kArrayOffset);
3299   }
3300   index =
3301       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3302   if (cache->get(index + kStringOffset) == key_string &&
3303       cache->get(index + kPatternOffset) == key_pattern) {
3304     return cache->get(index + kArrayOffset);
3305   }
3306   return Smi::FromInt(0);
3307 }
3308
3309
3310 void RegExpResultsCache::Enter(Heap* heap,
3311                                String* key_string,
3312                                Object* key_pattern,
3313                                FixedArray* value_array,
3314                                ResultsCacheType type) {
3315   FixedArray* cache;
3316   if (!key_string->IsInternalizedString()) return;
3317   if (type == STRING_SPLIT_SUBSTRINGS) {
3318     ASSERT(key_pattern->IsString());
3319     if (!key_pattern->IsInternalizedString()) return;
3320     cache = heap->string_split_cache();
3321   } else {
3322     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3323     ASSERT(key_pattern->IsFixedArray());
3324     cache = heap->regexp_multiple_cache();
3325   }
3326
3327   uint32_t hash = key_string->Hash();
3328   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3329       ~(kArrayEntriesPerCacheEntry - 1));
3330   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3331     cache->set(index + kStringOffset, key_string);
3332     cache->set(index + kPatternOffset, key_pattern);
3333     cache->set(index + kArrayOffset, value_array);
3334   } else {
3335     uint32_t index2 =
3336         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3337     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3338       cache->set(index2 + kStringOffset, key_string);
3339       cache->set(index2 + kPatternOffset, key_pattern);
3340       cache->set(index2 + kArrayOffset, value_array);
3341     } else {
3342       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3343       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3344       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3345       cache->set(index + kStringOffset, key_string);
3346       cache->set(index + kPatternOffset, key_pattern);
3347       cache->set(index + kArrayOffset, value_array);
3348     }
3349   }
3350   // If the array is a reasonably short list of substrings, convert it into a
3351   // list of internalized strings.
3352   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3353     for (int i = 0; i < value_array->length(); i++) {
3354       String* str = String::cast(value_array->get(i));
3355       Object* internalized_str;
3356       MaybeObject* maybe_string = heap->InternalizeString(str);
3357       if (maybe_string->ToObject(&internalized_str)) {
3358         value_array->set(i, internalized_str);
3359       }
3360     }
3361   }
3362   // Convert backing store to a copy-on-write array.
3363   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3364 }
3365
3366
3367 void RegExpResultsCache::Clear(FixedArray* cache) {
3368   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3369     cache->set(i, Smi::FromInt(0));
3370   }
3371 }
3372
3373
3374 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3375   MaybeObject* maybe_obj =
3376       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3377   return maybe_obj;
3378 }
3379
3380
3381 int Heap::FullSizeNumberStringCacheLength() {
3382   // Compute the size of the number string cache based on the max newspace size.
3383   // The number string cache has a minimum size based on twice the initial cache
3384   // size to ensure that it is bigger after being made 'full size'.
3385   int number_string_cache_size = max_semispace_size_ / 512;
3386   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3387                                  Min(0x4000, number_string_cache_size));
3388   // There is a string and a number per entry so the length is twice the number
3389   // of entries.
3390   return number_string_cache_size * 2;
3391 }
3392
3393
3394 void Heap::AllocateFullSizeNumberStringCache() {
3395   // The idea is to have a small number string cache in the snapshot to keep
3396   // boot-time memory usage down.  If we expand the number string cache already
3397   // while creating the snapshot then that didn't work out.
3398   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3399   MaybeObject* maybe_obj =
3400       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3401   Object* new_cache;
3402   if (maybe_obj->ToObject(&new_cache)) {
3403     // We don't bother to repopulate the cache with entries from the old cache.
3404     // It will be repopulated soon enough with new strings.
3405     set_number_string_cache(FixedArray::cast(new_cache));
3406   }
3407   // If allocation fails then we just return without doing anything.  It is only
3408   // a cache, so best effort is OK here.
3409 }
3410
3411
3412 void Heap::FlushNumberStringCache() {
3413   // Flush the number to string cache.
3414   int len = number_string_cache()->length();
3415   for (int i = 0; i < len; i++) {
3416     number_string_cache()->set_undefined(this, i);
3417   }
3418 }
3419
3420
3421 static inline int double_get_hash(double d) {
3422   DoubleRepresentation rep(d);
3423   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3424 }
3425
3426
3427 static inline int smi_get_hash(Smi* smi) {
3428   return smi->value();
3429 }
3430
3431
3432 Object* Heap::GetNumberStringCache(Object* number) {
3433   int hash;
3434   int mask = (number_string_cache()->length() >> 1) - 1;
3435   if (number->IsSmi()) {
3436     hash = smi_get_hash(Smi::cast(number)) & mask;
3437   } else {
3438     hash = double_get_hash(number->Number()) & mask;
3439   }
3440   Object* key = number_string_cache()->get(hash * 2);
3441   if (key == number) {
3442     return String::cast(number_string_cache()->get(hash * 2 + 1));
3443   } else if (key->IsHeapNumber() &&
3444              number->IsHeapNumber() &&
3445              key->Number() == number->Number()) {
3446     return String::cast(number_string_cache()->get(hash * 2 + 1));
3447   }
3448   return undefined_value();
3449 }
3450
3451
3452 void Heap::SetNumberStringCache(Object* number, String* string) {
3453   int hash;
3454   int mask = (number_string_cache()->length() >> 1) - 1;
3455   if (number->IsSmi()) {
3456     hash = smi_get_hash(Smi::cast(number)) & mask;
3457   } else {
3458     hash = double_get_hash(number->Number()) & mask;
3459   }
3460   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3461       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3462     // The first time we have a hash collision, we move to the full sized
3463     // number string cache.
3464     AllocateFullSizeNumberStringCache();
3465     return;
3466   }
3467   number_string_cache()->set(hash * 2, number);
3468   number_string_cache()->set(hash * 2 + 1, string);
3469 }
3470
3471
3472 MaybeObject* Heap::NumberToString(Object* number,
3473                                   bool check_number_string_cache,
3474                                   PretenureFlag pretenure) {
3475   isolate_->counters()->number_to_string_runtime()->Increment();
3476   if (check_number_string_cache) {
3477     Object* cached = GetNumberStringCache(number);
3478     if (cached != undefined_value()) {
3479       return cached;
3480     }
3481   }
3482
3483   char arr[100];
3484   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3485   const char* str;
3486   if (number->IsSmi()) {
3487     int num = Smi::cast(number)->value();
3488     str = IntToCString(num, buffer);
3489   } else {
3490     double num = HeapNumber::cast(number)->value();
3491     str = DoubleToCString(num, buffer);
3492   }
3493
3494   Object* js_string;
3495   MaybeObject* maybe_js_string =
3496       AllocateStringFromOneByte(CStrVector(str), pretenure);
3497   if (maybe_js_string->ToObject(&js_string)) {
3498     SetNumberStringCache(number, String::cast(js_string));
3499   }
3500   return maybe_js_string;
3501 }
3502
3503
3504 MaybeObject* Heap::Uint32ToString(uint32_t value,
3505                                   bool check_number_string_cache) {
3506   Object* number;
3507   MaybeObject* maybe = NumberFromUint32(value);
3508   if (!maybe->To<Object>(&number)) return maybe;
3509   return NumberToString(number, check_number_string_cache);
3510 }
3511
3512
3513 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3514   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3515 }
3516
3517
3518 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3519     ExternalArrayType array_type) {
3520   switch (array_type) {
3521     case kExternalByteArray:
3522       return kExternalByteArrayMapRootIndex;
3523     case kExternalUnsignedByteArray:
3524       return kExternalUnsignedByteArrayMapRootIndex;
3525     case kExternalShortArray:
3526       return kExternalShortArrayMapRootIndex;
3527     case kExternalUnsignedShortArray:
3528       return kExternalUnsignedShortArrayMapRootIndex;
3529     case kExternalIntArray:
3530       return kExternalIntArrayMapRootIndex;
3531     case kExternalUnsignedIntArray:
3532       return kExternalUnsignedIntArrayMapRootIndex;
3533     case kExternalFloatArray:
3534       return kExternalFloatArrayMapRootIndex;
3535     case kExternalDoubleArray:
3536       return kExternalDoubleArrayMapRootIndex;
3537     case kExternalPixelArray:
3538       return kExternalPixelArrayMapRootIndex;
3539     default:
3540       UNREACHABLE();
3541       return kUndefinedValueRootIndex;
3542   }
3543 }
3544
3545 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3546     ElementsKind elementsKind) {
3547   switch (elementsKind) {
3548     case EXTERNAL_BYTE_ELEMENTS:
3549       return kEmptyExternalByteArrayRootIndex;
3550     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3551       return kEmptyExternalUnsignedByteArrayRootIndex;
3552     case EXTERNAL_SHORT_ELEMENTS:
3553       return kEmptyExternalShortArrayRootIndex;
3554     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3555       return kEmptyExternalUnsignedShortArrayRootIndex;
3556     case EXTERNAL_INT_ELEMENTS:
3557       return kEmptyExternalIntArrayRootIndex;
3558     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3559       return kEmptyExternalUnsignedIntArrayRootIndex;
3560     case EXTERNAL_FLOAT_ELEMENTS:
3561       return kEmptyExternalFloatArrayRootIndex;
3562     case EXTERNAL_DOUBLE_ELEMENTS:
3563       return kEmptyExternalDoubleArrayRootIndex;
3564     case EXTERNAL_PIXEL_ELEMENTS:
3565       return kEmptyExternalPixelArrayRootIndex;
3566     default:
3567       UNREACHABLE();
3568       return kUndefinedValueRootIndex;
3569   }
3570 }
3571
3572
3573 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3574   return ExternalArray::cast(
3575       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3576 }
3577
3578
3579
3580
3581 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3582   // We need to distinguish the minus zero value and this cannot be
3583   // done after conversion to int. Doing this by comparing bit
3584   // patterns is faster than using fpclassify() et al.
3585   static const DoubleRepresentation minus_zero(-0.0);
3586
3587   DoubleRepresentation rep(value);
3588   if (rep.bits == minus_zero.bits) {
3589     return AllocateHeapNumber(-0.0, pretenure);
3590   }
3591
3592   int int_value = FastD2I(value);
3593   if (value == int_value && Smi::IsValid(int_value)) {
3594     return Smi::FromInt(int_value);
3595   }
3596
3597   // Materialize the value in the heap.
3598   return AllocateHeapNumber(value, pretenure);
3599 }
3600
3601
3602 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3603   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3604   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3605   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3606   Foreign* result;
3607   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3608   if (!maybe_result->To(&result)) return maybe_result;
3609   result->set_foreign_address(address);
3610   return result;
3611 }
3612
3613
3614 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3615   SharedFunctionInfo* share;
3616   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3617   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3618
3619   // Set pointer fields.
3620   share->set_name(name);
3621   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3622   share->set_code(illegal);
3623   share->set_optimized_code_map(Smi::FromInt(0));
3624   share->set_scope_info(ScopeInfo::Empty(isolate_));
3625   Code* construct_stub =
3626       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3627   share->set_construct_stub(construct_stub);
3628   share->set_instance_class_name(Object_string());
3629   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3630   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3631   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3632   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3633   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3634   share->set_ast_node_count(0);
3635   share->set_counters(0);
3636
3637   // Set integer fields (smi or int, depending on the architecture).
3638   share->set_length(0);
3639   share->set_formal_parameter_count(0);
3640   share->set_expected_nof_properties(0);
3641   share->set_num_literals(0);
3642   share->set_start_position_and_type(0);
3643   share->set_end_position(0);
3644   share->set_function_token_position(0);
3645   // All compiler hints default to false or 0.
3646   share->set_compiler_hints(0);
3647   share->set_opt_count(0);
3648
3649   return share;
3650 }
3651
3652
3653 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3654                                            JSArray* arguments,
3655                                            int start_position,
3656                                            int end_position,
3657                                            Object* script,
3658                                            Object* stack_trace,
3659                                            Object* stack_frames) {
3660   Object* result;
3661   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3662     if (!maybe_result->ToObject(&result)) return maybe_result;
3663   }
3664   JSMessageObject* message = JSMessageObject::cast(result);
3665   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3666   message->initialize_elements();
3667   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3668   message->set_type(type);
3669   message->set_arguments(arguments);
3670   message->set_start_position(start_position);
3671   message->set_end_position(end_position);
3672   message->set_script(script);
3673   message->set_stack_trace(stack_trace);
3674   message->set_stack_frames(stack_frames);
3675   return result;
3676 }
3677
3678
3679
3680 // Returns true for a character in a range.  Both limits are inclusive.
3681 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3682   // This makes uses of the the unsigned wraparound.
3683   return character - from <= to - from;
3684 }
3685
3686
3687 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3688     Heap* heap,
3689     uint16_t c1,
3690     uint16_t c2) {
3691   String* result;
3692   // Numeric strings have a different hash algorithm not known by
3693   // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3694   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3695       heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3696     return result;
3697   // Now we know the length is 2, we might as well make use of that fact
3698   // when building the new string.
3699   } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3700     // We can do this.
3701     ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3702     Object* result;
3703     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3704       if (!maybe_result->ToObject(&result)) return maybe_result;
3705     }
3706     uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3707     dest[0] = static_cast<uint8_t>(c1);
3708     dest[1] = static_cast<uint8_t>(c2);
3709     return result;
3710   } else {
3711     Object* result;
3712     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3713       if (!maybe_result->ToObject(&result)) return maybe_result;
3714     }
3715     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3716     dest[0] = c1;
3717     dest[1] = c2;
3718     return result;
3719   }
3720 }
3721
3722
3723 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3724   int first_length = first->length();
3725   if (first_length == 0) {
3726     return second;
3727   }
3728
3729   int second_length = second->length();
3730   if (second_length == 0) {
3731     return first;
3732   }
3733
3734   int length = first_length + second_length;
3735
3736   // Optimization for 2-byte strings often used as keys in a decompression
3737   // dictionary.  Check whether we already have the string in the string
3738   // table to prevent creation of many unneccesary strings.
3739   if (length == 2) {
3740     uint16_t c1 = first->Get(0);
3741     uint16_t c2 = second->Get(0);
3742     return MakeOrFindTwoCharacterString(this, c1, c2);
3743   }
3744
3745   bool first_is_one_byte = first->IsOneByteRepresentation();
3746   bool second_is_one_byte = second->IsOneByteRepresentation();
3747   bool is_one_byte = first_is_one_byte && second_is_one_byte;
3748   // Make sure that an out of memory exception is thrown if the length
3749   // of the new cons string is too large.
3750   if (length > String::kMaxLength || length < 0) {
3751     isolate()->context()->mark_out_of_memory();
3752     return Failure::OutOfMemoryException(0x4);
3753   }
3754
3755   bool is_one_byte_data_in_two_byte_string = false;
3756   if (!is_one_byte) {
3757     // At least one of the strings uses two-byte representation so we
3758     // can't use the fast case code for short ASCII strings below, but
3759     // we can try to save memory if all chars actually fit in ASCII.
3760     is_one_byte_data_in_two_byte_string =
3761         first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3762     if (is_one_byte_data_in_two_byte_string) {
3763       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3764     }
3765   }
3766
3767   // If the resulting string is small make a flat string.
3768   if (length < ConsString::kMinLength) {
3769     // Note that neither of the two inputs can be a slice because:
3770     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3771     ASSERT(first->IsFlat());
3772     ASSERT(second->IsFlat());
3773     if (is_one_byte) {
3774       Object* result;
3775       { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3776         if (!maybe_result->ToObject(&result)) return maybe_result;
3777       }
3778       // Copy the characters into the new object.
3779       uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3780       // Copy first part.
3781       const uint8_t* src;
3782       if (first->IsExternalString()) {
3783         src = ExternalAsciiString::cast(first)->GetChars();
3784       } else {
3785         src = SeqOneByteString::cast(first)->GetChars();
3786       }
3787       for (int i = 0; i < first_length; i++) *dest++ = src[i];
3788       // Copy second part.
3789       if (second->IsExternalString()) {
3790         src = ExternalAsciiString::cast(second)->GetChars();
3791       } else {
3792         src = SeqOneByteString::cast(second)->GetChars();
3793       }
3794       for (int i = 0; i < second_length; i++) *dest++ = src[i];
3795       return result;
3796     } else {
3797       if (is_one_byte_data_in_two_byte_string) {
3798         Object* result;
3799         { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3800           if (!maybe_result->ToObject(&result)) return maybe_result;
3801         }
3802         // Copy the characters into the new object.
3803         uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3804         String::WriteToFlat(first, dest, 0, first_length);
3805         String::WriteToFlat(second, dest + first_length, 0, second_length);
3806         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3807         return result;
3808       }
3809
3810       Object* result;
3811       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3812         if (!maybe_result->ToObject(&result)) return maybe_result;
3813       }
3814       // Copy the characters into the new object.
3815       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3816       String::WriteToFlat(first, dest, 0, first_length);
3817       String::WriteToFlat(second, dest + first_length, 0, second_length);
3818       return result;
3819     }
3820   }
3821
3822   Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3823       cons_ascii_string_map() : cons_string_map();
3824
3825   Object* result;
3826   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3827     if (!maybe_result->ToObject(&result)) return maybe_result;
3828   }
3829
3830   DisallowHeapAllocation no_gc;
3831   ConsString* cons_string = ConsString::cast(result);
3832   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3833   cons_string->set_length(length);
3834   cons_string->set_hash_field(String::kEmptyHashField);
3835   cons_string->set_first(first, mode);
3836   cons_string->set_second(second, mode);
3837   return result;
3838 }
3839
3840
3841 MaybeObject* Heap::AllocateSubString(String* buffer,
3842                                      int start,
3843                                      int end,
3844                                      PretenureFlag pretenure) {
3845   int length = end - start;
3846   if (length <= 0) {
3847     return empty_string();
3848   } else if (length == 1) {
3849     return LookupSingleCharacterStringFromCode(buffer->Get(start));
3850   } else if (length == 2) {
3851     // Optimization for 2-byte strings often used as keys in a decompression
3852     // dictionary.  Check whether we already have the string in the string
3853     // table to prevent creation of many unnecessary strings.
3854     uint16_t c1 = buffer->Get(start);
3855     uint16_t c2 = buffer->Get(start + 1);
3856     return MakeOrFindTwoCharacterString(this, c1, c2);
3857   }
3858
3859   // Make an attempt to flatten the buffer to reduce access time.
3860   buffer = buffer->TryFlattenGetString();
3861
3862   if (!FLAG_string_slices ||
3863       !buffer->IsFlat() ||
3864       length < SlicedString::kMinLength ||
3865       pretenure == TENURED) {
3866     Object* result;
3867     // WriteToFlat takes care of the case when an indirect string has a
3868     // different encoding from its underlying string.  These encodings may
3869     // differ because of externalization.
3870     bool is_one_byte = buffer->IsOneByteRepresentation();
3871     { MaybeObject* maybe_result = is_one_byte
3872                                   ? AllocateRawOneByteString(length, pretenure)
3873                                   : AllocateRawTwoByteString(length, pretenure);
3874       if (!maybe_result->ToObject(&result)) return maybe_result;
3875     }
3876     String* string_result = String::cast(result);
3877     // Copy the characters into the new object.
3878     if (is_one_byte) {
3879       ASSERT(string_result->IsOneByteRepresentation());
3880       uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3881       String::WriteToFlat(buffer, dest, start, end);
3882     } else {
3883       ASSERT(string_result->IsTwoByteRepresentation());
3884       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3885       String::WriteToFlat(buffer, dest, start, end);
3886     }
3887     return result;
3888   }
3889
3890   ASSERT(buffer->IsFlat());
3891 #if VERIFY_HEAP
3892   if (FLAG_verify_heap) {
3893     buffer->StringVerify();
3894   }
3895 #endif
3896
3897   Object* result;
3898   // When slicing an indirect string we use its encoding for a newly created
3899   // slice and don't check the encoding of the underlying string.  This is safe
3900   // even if the encodings are different because of externalization.  If an
3901   // indirect ASCII string is pointing to a two-byte string, the two-byte char
3902   // codes of the underlying string must still fit into ASCII (because
3903   // externalization must not change char codes).
3904   { Map* map = buffer->IsOneByteRepresentation()
3905                  ? sliced_ascii_string_map()
3906                  : sliced_string_map();
3907     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3908     if (!maybe_result->ToObject(&result)) return maybe_result;
3909   }
3910
3911   DisallowHeapAllocation no_gc;
3912   SlicedString* sliced_string = SlicedString::cast(result);
3913   sliced_string->set_length(length);
3914   sliced_string->set_hash_field(String::kEmptyHashField);
3915   if (buffer->IsConsString()) {
3916     ConsString* cons = ConsString::cast(buffer);
3917     ASSERT(cons->second()->length() == 0);
3918     sliced_string->set_parent(cons->first());
3919     sliced_string->set_offset(start);
3920   } else if (buffer->IsSlicedString()) {
3921     // Prevent nesting sliced strings.
3922     SlicedString* parent_slice = SlicedString::cast(buffer);
3923     sliced_string->set_parent(parent_slice->parent());
3924     sliced_string->set_offset(start + parent_slice->offset());
3925   } else {
3926     sliced_string->set_parent(buffer);
3927     sliced_string->set_offset(start);
3928   }
3929   ASSERT(sliced_string->parent()->IsSeqString() ||
3930          sliced_string->parent()->IsExternalString());
3931   return result;
3932 }
3933
3934
3935 MaybeObject* Heap::AllocateExternalStringFromAscii(
3936     const ExternalAsciiString::Resource* resource) {
3937   size_t length = resource->length();
3938   if (length > static_cast<size_t>(String::kMaxLength)) {
3939     isolate()->context()->mark_out_of_memory();
3940     return Failure::OutOfMemoryException(0x5);
3941   }
3942
3943   Map* map = external_ascii_string_map();
3944   Object* result;
3945   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3946     if (!maybe_result->ToObject(&result)) return maybe_result;
3947   }
3948
3949   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3950   external_string->set_length(static_cast<int>(length));
3951   external_string->set_hash_field(String::kEmptyHashField);
3952   external_string->set_resource(resource);
3953
3954   return result;
3955 }
3956
3957
3958 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3959     const ExternalTwoByteString::Resource* resource) {
3960   size_t length = resource->length();
3961   if (length > static_cast<size_t>(String::kMaxLength)) {
3962     isolate()->context()->mark_out_of_memory();
3963     return Failure::OutOfMemoryException(0x6);
3964   }
3965
3966   // For small strings we check whether the resource contains only
3967   // one byte characters.  If yes, we use a different string map.
3968   static const size_t kOneByteCheckLengthLimit = 32;
3969   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3970       String::IsOneByte(resource->data(), static_cast<int>(length));
3971   Map* map = is_one_byte ?
3972       external_string_with_one_byte_data_map() : external_string_map();
3973   Object* result;
3974   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3975     if (!maybe_result->ToObject(&result)) return maybe_result;
3976   }
3977
3978   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3979   external_string->set_length(static_cast<int>(length));
3980   external_string->set_hash_field(String::kEmptyHashField);
3981   external_string->set_resource(resource);
3982
3983   return result;
3984 }
3985
3986
3987 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3988   if (code <= String::kMaxOneByteCharCode) {
3989     Object* value = single_character_string_cache()->get(code);
3990     if (value != undefined_value()) return value;
3991
3992     uint8_t buffer[1];
3993     buffer[0] = static_cast<uint8_t>(code);
3994     Object* result;
3995     MaybeObject* maybe_result =
3996         InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3997
3998     if (!maybe_result->ToObject(&result)) return maybe_result;
3999     single_character_string_cache()->set(code, result);
4000     return result;
4001   }
4002
4003   Object* result;
4004   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
4005     if (!maybe_result->ToObject(&result)) return maybe_result;
4006   }
4007   String* answer = String::cast(result);
4008   answer->Set(0, code);
4009   return answer;
4010 }
4011
4012
4013 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
4014   if (length < 0 || length > ByteArray::kMaxLength) {
4015     return Failure::OutOfMemoryException(0x7);
4016   }
4017   if (pretenure == NOT_TENURED) {
4018     return AllocateByteArray(length);
4019   }
4020   int size = ByteArray::SizeFor(length);
4021   Object* result;
4022   { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
4023                    ? old_data_space_->AllocateRaw(size)
4024                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
4025     if (!maybe_result->ToObject(&result)) return maybe_result;
4026   }
4027
4028   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4029       byte_array_map());
4030   reinterpret_cast<ByteArray*>(result)->set_length(length);
4031   return result;
4032 }
4033
4034
4035 MaybeObject* Heap::AllocateByteArray(int length) {
4036   if (length < 0 || length > ByteArray::kMaxLength) {
4037     return Failure::OutOfMemoryException(0x8);
4038   }
4039   int size = ByteArray::SizeFor(length);
4040   AllocationSpace space =
4041       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
4042   Object* result;
4043   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4044     if (!maybe_result->ToObject(&result)) return maybe_result;
4045   }
4046
4047   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4048       byte_array_map());
4049   reinterpret_cast<ByteArray*>(result)->set_length(length);
4050   return result;
4051 }
4052
4053
4054 void Heap::CreateFillerObjectAt(Address addr, int size) {
4055   if (size == 0) return;
4056   HeapObject* filler = HeapObject::FromAddress(addr);
4057   if (size == kPointerSize) {
4058     filler->set_map_no_write_barrier(one_pointer_filler_map());
4059   } else if (size == 2 * kPointerSize) {
4060     filler->set_map_no_write_barrier(two_pointer_filler_map());
4061   } else {
4062     filler->set_map_no_write_barrier(free_space_map());
4063     FreeSpace::cast(filler)->set_size(size);
4064   }
4065 }
4066
4067
4068 MaybeObject* Heap::AllocateExternalArray(int length,
4069                                          ExternalArrayType array_type,
4070                                          void* external_pointer,
4071                                          PretenureFlag pretenure) {
4072   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4073   Object* result;
4074   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
4075                                             space,
4076                                             OLD_DATA_SPACE);
4077     if (!maybe_result->ToObject(&result)) return maybe_result;
4078   }
4079
4080   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4081       MapForExternalArrayType(array_type));
4082   reinterpret_cast<ExternalArray*>(result)->set_length(length);
4083   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4084       external_pointer);
4085
4086   return result;
4087 }
4088
4089
4090 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4091                               Code::Flags flags,
4092                               Handle<Object> self_reference,
4093                               bool immovable,
4094                               bool crankshafted) {
4095   // Allocate ByteArray before the Code object, so that we do not risk
4096   // leaving uninitialized Code object (and breaking the heap).
4097   ByteArray* reloc_info;
4098   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4099   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4100
4101   // Compute size.
4102   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4103   int obj_size = Code::SizeFor(body_size);
4104   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4105   MaybeObject* maybe_result;
4106   // Large code objects and code objects which should stay at a fixed address
4107   // are allocated in large object space.
4108   HeapObject* result;
4109   bool force_lo_space = obj_size > code_space()->AreaSize();
4110   if (force_lo_space) {
4111     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4112   } else {
4113     maybe_result = code_space_->AllocateRaw(obj_size);
4114   }
4115   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4116
4117   if (immovable && !force_lo_space &&
4118       // Objects on the first page of each space are never moved.
4119       !code_space_->FirstPage()->Contains(result->address())) {
4120     // Discard the first code allocation, which was on a page where it could be
4121     // moved.
4122     CreateFillerObjectAt(result->address(), obj_size);
4123     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4124     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4125   }
4126
4127   // Initialize the object
4128   result->set_map_no_write_barrier(code_map());
4129   Code* code = Code::cast(result);
4130   ASSERT(!isolate_->code_range()->exists() ||
4131       isolate_->code_range()->contains(code->address()));
4132   code->set_instruction_size(desc.instr_size);
4133   code->set_relocation_info(reloc_info);
4134   code->set_flags(flags);
4135   if (code->is_call_stub() || code->is_keyed_call_stub()) {
4136     code->set_check_type(RECEIVER_MAP_CHECK);
4137   }
4138   code->set_is_crankshafted(crankshafted);
4139   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4140   code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4141   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4142   code->set_gc_metadata(Smi::FromInt(0));
4143   code->set_ic_age(global_ic_age_);
4144   code->set_prologue_offset(kPrologueOffsetNotSet);
4145   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4146     code->set_marked_for_deoptimization(false);
4147   }
4148   // Allow self references to created code object by patching the handle to
4149   // point to the newly allocated Code object.
4150   if (!self_reference.is_null()) {
4151     *(self_reference.location()) = code;
4152   }
4153   // Migrate generated code.
4154   // The generated code can contain Object** values (typically from handles)
4155   // that are dereferenced during the copy to point directly to the actual heap
4156   // objects. These pointers can include references to the code object itself,
4157   // through the self_reference parameter.
4158   code->CopyFrom(desc);
4159
4160 #ifdef VERIFY_HEAP
4161   if (FLAG_verify_heap) {
4162     code->Verify();
4163   }
4164 #endif
4165   return code;
4166 }
4167
4168
4169 MaybeObject* Heap::CopyCode(Code* code) {
4170   // Allocate an object the same size as the code object.
4171   int obj_size = code->Size();
4172   MaybeObject* maybe_result;
4173   if (obj_size > code_space()->AreaSize()) {
4174     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4175   } else {
4176     maybe_result = code_space_->AllocateRaw(obj_size);
4177   }
4178
4179   Object* result;
4180   if (!maybe_result->ToObject(&result)) return maybe_result;
4181
4182   // Copy code object.
4183   Address old_addr = code->address();
4184   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4185   CopyBlock(new_addr, old_addr, obj_size);
4186   // Relocate the copy.
4187   Code* new_code = Code::cast(result);
4188   ASSERT(!isolate_->code_range()->exists() ||
4189       isolate_->code_range()->contains(code->address()));
4190   new_code->Relocate(new_addr - old_addr);
4191   return new_code;
4192 }
4193
4194
4195 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4196   // Allocate ByteArray before the Code object, so that we do not risk
4197   // leaving uninitialized Code object (and breaking the heap).
4198   Object* reloc_info_array;
4199   { MaybeObject* maybe_reloc_info_array =
4200         AllocateByteArray(reloc_info.length(), TENURED);
4201     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4202       return maybe_reloc_info_array;
4203     }
4204   }
4205
4206   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4207
4208   int new_obj_size = Code::SizeFor(new_body_size);
4209
4210   Address old_addr = code->address();
4211
4212   size_t relocation_offset =
4213       static_cast<size_t>(code->instruction_end() - old_addr);
4214
4215   MaybeObject* maybe_result;
4216   if (new_obj_size > code_space()->AreaSize()) {
4217     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4218   } else {
4219     maybe_result = code_space_->AllocateRaw(new_obj_size);
4220   }
4221
4222   Object* result;
4223   if (!maybe_result->ToObject(&result)) return maybe_result;
4224
4225   // Copy code object.
4226   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4227
4228   // Copy header and instructions.
4229   CopyBytes(new_addr, old_addr, relocation_offset);
4230
4231   Code* new_code = Code::cast(result);
4232   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4233
4234   // Copy patched rinfo.
4235   CopyBytes(new_code->relocation_start(),
4236             reloc_info.start(),
4237             static_cast<size_t>(reloc_info.length()));
4238
4239   // Relocate the copy.
4240   ASSERT(!isolate_->code_range()->exists() ||
4241       isolate_->code_range()->contains(code->address()));
4242   new_code->Relocate(new_addr - old_addr);
4243
4244 #ifdef VERIFY_HEAP
4245   if (FLAG_verify_heap) {
4246     code->Verify();
4247   }
4248 #endif
4249   return new_code;
4250 }
4251
4252
4253 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4254     Handle<AllocationSite> allocation_site) {
4255   ASSERT(gc_state_ == NOT_IN_GC);
4256   ASSERT(map->instance_type() != MAP_TYPE);
4257   // If allocation failures are disallowed, we may allocate in a different
4258   // space when new space is full and the object is not a large object.
4259   AllocationSpace retry_space =
4260       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4261   int size = map->instance_size() + AllocationMemento::kSize;
4262   Object* result;
4263   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4264   if (!maybe_result->ToObject(&result)) return maybe_result;
4265   // No need for write barrier since object is white and map is in old space.
4266   HeapObject::cast(result)->set_map_no_write_barrier(map);
4267   AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4268       reinterpret_cast<Address>(result) + map->instance_size());
4269   alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4270   alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4271   return result;
4272 }
4273
4274
4275 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4276   ASSERT(gc_state_ == NOT_IN_GC);
4277   ASSERT(map->instance_type() != MAP_TYPE);
4278   // If allocation failures are disallowed, we may allocate in a different
4279   // space when new space is full and the object is not a large object.
4280   AllocationSpace retry_space =
4281       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4282   int size = map->instance_size();
4283   Object* result;
4284   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4285   if (!maybe_result->ToObject(&result)) return maybe_result;
4286   // No need for write barrier since object is white and map is in old space.
4287   HeapObject::cast(result)->set_map_no_write_barrier(map);
4288   return result;
4289 }
4290
4291
4292 void Heap::InitializeFunction(JSFunction* function,
4293                               SharedFunctionInfo* shared,
4294                               Object* prototype) {
4295   ASSERT(!prototype->IsMap());
4296   function->initialize_properties();
4297   function->initialize_elements();
4298   function->set_shared(shared);
4299   function->set_code(shared->code());
4300   function->set_prototype_or_initial_map(prototype);
4301   function->set_context(undefined_value());
4302   function->set_literals_or_bindings(empty_fixed_array());
4303   function->set_next_function_link(undefined_value());
4304 }
4305
4306
4307 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4308   // Make sure to use globals from the function's context, since the function
4309   // can be from a different context.
4310   Context* native_context = function->context()->native_context();
4311   Map* new_map;
4312   if (function->shared()->is_generator()) {
4313     // Generator prototypes can share maps since they don't have "constructor"
4314     // properties.
4315     new_map = native_context->generator_object_prototype_map();
4316   } else {
4317     // Each function prototype gets a fresh map to avoid unwanted sharing of
4318     // maps between prototypes of different constructors.
4319     JSFunction* object_function = native_context->object_function();
4320     ASSERT(object_function->has_initial_map());
4321     MaybeObject* maybe_map = object_function->initial_map()->Copy();
4322     if (!maybe_map->To(&new_map)) return maybe_map;
4323   }
4324
4325   Object* prototype;
4326   MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4327   if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4328
4329   if (!function->shared()->is_generator()) {
4330     MaybeObject* maybe_failure =
4331         JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4332             constructor_string(), function, DONT_ENUM);
4333     if (maybe_failure->IsFailure()) return maybe_failure;
4334   }
4335
4336   return prototype;
4337 }
4338
4339
4340 MaybeObject* Heap::AllocateFunction(Map* function_map,
4341                                     SharedFunctionInfo* shared,
4342                                     Object* prototype,
4343                                     PretenureFlag pretenure) {
4344   AllocationSpace space =
4345       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4346   Object* result;
4347   { MaybeObject* maybe_result = Allocate(function_map, space);
4348     if (!maybe_result->ToObject(&result)) return maybe_result;
4349   }
4350   InitializeFunction(JSFunction::cast(result), shared, prototype);
4351   return result;
4352 }
4353
4354
4355 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4356   // To get fast allocation and map sharing for arguments objects we
4357   // allocate them based on an arguments boilerplate.
4358
4359   JSObject* boilerplate;
4360   int arguments_object_size;
4361   bool strict_mode_callee = callee->IsJSFunction() &&
4362       !JSFunction::cast(callee)->shared()->is_classic_mode();
4363   if (strict_mode_callee) {
4364     boilerplate =
4365         isolate()->context()->native_context()->
4366             strict_mode_arguments_boilerplate();
4367     arguments_object_size = kArgumentsObjectSizeStrict;
4368   } else {
4369     boilerplate =
4370         isolate()->context()->native_context()->arguments_boilerplate();
4371     arguments_object_size = kArgumentsObjectSize;
4372   }
4373
4374   // This calls Copy directly rather than using Heap::AllocateRaw so we
4375   // duplicate the check here.
4376   ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4377
4378   // Check that the size of the boilerplate matches our
4379   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4380   // on the size being a known constant.
4381   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4382
4383   // Do the allocation.
4384   Object* result;
4385   { MaybeObject* maybe_result =
4386         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4387     if (!maybe_result->ToObject(&result)) return maybe_result;
4388   }
4389
4390   // Copy the content. The arguments boilerplate doesn't have any
4391   // fields that point to new space so it's safe to skip the write
4392   // barrier here.
4393   CopyBlock(HeapObject::cast(result)->address(),
4394             boilerplate->address(),
4395             JSObject::kHeaderSize);
4396
4397   // Set the length property.
4398   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4399                                                 Smi::FromInt(length),
4400                                                 SKIP_WRITE_BARRIER);
4401   // Set the callee property for non-strict mode arguments object only.
4402   if (!strict_mode_callee) {
4403     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4404                                                   callee);
4405   }
4406
4407   // Check the state of the object
4408   ASSERT(JSObject::cast(result)->HasFastProperties());
4409   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4410
4411   return result;
4412 }
4413
4414
4415 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4416   ASSERT(!fun->has_initial_map());
4417
4418   // First create a new map with the size and number of in-object properties
4419   // suggested by the function.
4420   InstanceType instance_type;
4421   int instance_size;
4422   int in_object_properties;
4423   if (fun->shared()->is_generator()) {
4424     instance_type = JS_GENERATOR_OBJECT_TYPE;
4425     instance_size = JSGeneratorObject::kSize;
4426     in_object_properties = 0;
4427   } else {
4428     instance_type = JS_OBJECT_TYPE;
4429     instance_size = fun->shared()->CalculateInstanceSize();
4430     in_object_properties = fun->shared()->CalculateInObjectProperties();
4431   }
4432   Map* map;
4433   MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4434   if (!maybe_map->To(&map)) return maybe_map;
4435
4436   // Fetch or allocate prototype.
4437   Object* prototype;
4438   if (fun->has_instance_prototype()) {
4439     prototype = fun->instance_prototype();
4440   } else {
4441     MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4442     if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4443   }
4444   map->set_inobject_properties(in_object_properties);
4445   map->set_unused_property_fields(in_object_properties);
4446   map->set_prototype(prototype);
4447   ASSERT(map->has_fast_object_elements());
4448
4449   if (!fun->shared()->is_generator()) {
4450     fun->shared()->StartInobjectSlackTracking(map);
4451   }
4452
4453   return map;
4454 }
4455
4456
4457 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4458                                      FixedArray* properties,
4459                                      Map* map) {
4460   obj->set_properties(properties);
4461   obj->initialize_elements();
4462   // TODO(1240798): Initialize the object's body using valid initial values
4463   // according to the object's initial map.  For example, if the map's
4464   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4465   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4466   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4467   // verification code has to cope with (temporarily) invalid objects.  See
4468   // for example, JSArray::JSArrayVerify).
4469   Object* filler;
4470   // We cannot always fill with one_pointer_filler_map because objects
4471   // created from API functions expect their internal fields to be initialized
4472   // with undefined_value.
4473   // Pre-allocated fields need to be initialized with undefined_value as well
4474   // so that object accesses before the constructor completes (e.g. in the
4475   // debugger) will not cause a crash.
4476   if (map->constructor()->IsJSFunction() &&
4477       JSFunction::cast(map->constructor())->shared()->
4478           IsInobjectSlackTrackingInProgress()) {
4479     // We might want to shrink the object later.
4480     ASSERT(obj->GetInternalFieldCount() == 0);
4481     filler = Heap::one_pointer_filler_map();
4482   } else {
4483     filler = Heap::undefined_value();
4484   }
4485   obj->InitializeBody(map, Heap::undefined_value(), filler);
4486 }
4487
4488
4489 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4490   // JSFunctions should be allocated using AllocateFunction to be
4491   // properly initialized.
4492   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4493
4494   // Both types of global objects should be allocated using
4495   // AllocateGlobalObject to be properly initialized.
4496   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4497   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4498
4499   // Allocate the backing storage for the properties.
4500   int prop_size = map->InitialPropertiesLength();
4501   ASSERT(prop_size >= 0);
4502   Object* properties;
4503   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4504     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4505   }
4506
4507   // Allocate the JSObject.
4508   AllocationSpace space =
4509       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4510   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4511   Object* obj;
4512   MaybeObject* maybe_obj = Allocate(map, space);
4513   if (!maybe_obj->To(&obj)) return maybe_obj;
4514
4515   // Initialize the JSObject.
4516   InitializeJSObjectFromMap(JSObject::cast(obj),
4517                             FixedArray::cast(properties),
4518                             map);
4519   ASSERT(JSObject::cast(obj)->HasFastElements() ||
4520          JSObject::cast(obj)->HasExternalArrayElements());
4521   return obj;
4522 }
4523
4524
4525 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4526     Handle<AllocationSite> allocation_site) {
4527   // JSFunctions should be allocated using AllocateFunction to be
4528   // properly initialized.
4529   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4530
4531   // Both types of global objects should be allocated using
4532   // AllocateGlobalObject to be properly initialized.
4533   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4534   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4535
4536   // Allocate the backing storage for the properties.
4537   int prop_size = map->InitialPropertiesLength();
4538   ASSERT(prop_size >= 0);
4539   Object* properties;
4540   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4541     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4542   }
4543
4544   // Allocate the JSObject.
4545   AllocationSpace space = NEW_SPACE;
4546   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4547   Object* obj;
4548   MaybeObject* maybe_obj =
4549       AllocateWithAllocationSite(map, space, allocation_site);
4550   if (!maybe_obj->To(&obj)) return maybe_obj;
4551
4552   // Initialize the JSObject.
4553   InitializeJSObjectFromMap(JSObject::cast(obj),
4554                             FixedArray::cast(properties),
4555                             map);
4556   ASSERT(JSObject::cast(obj)->HasFastElements());
4557   return obj;
4558 }
4559
4560
4561 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4562                                     PretenureFlag pretenure) {
4563   // Allocate the initial map if absent.
4564   if (!constructor->has_initial_map()) {
4565     Object* initial_map;
4566     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4567       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4568     }
4569     constructor->set_initial_map(Map::cast(initial_map));
4570     Map::cast(initial_map)->set_constructor(constructor);
4571   }
4572   // Allocate the object based on the constructors initial map.
4573   MaybeObject* result = AllocateJSObjectFromMap(
4574       constructor->initial_map(), pretenure);
4575 #ifdef DEBUG
4576   // Make sure result is NOT a global object if valid.
4577   Object* non_failure;
4578   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4579 #endif
4580   return result;
4581 }
4582
4583
4584 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4585     Handle<AllocationSite> allocation_site) {
4586   // Allocate the initial map if absent.
4587   if (!constructor->has_initial_map()) {
4588     Object* initial_map;
4589     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4590       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4591     }
4592     constructor->set_initial_map(Map::cast(initial_map));
4593     Map::cast(initial_map)->set_constructor(constructor);
4594   }
4595   // Allocate the object based on the constructors initial map, or the payload
4596   // advice
4597   Map* initial_map = constructor->initial_map();
4598
4599   Smi* smi = Smi::cast(allocation_site->transition_info());
4600   ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4601   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4602   if (to_kind != initial_map->elements_kind()) {
4603     MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4604     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4605     // Possibly alter the mode, since we found an updated elements kind
4606     // in the type info cell.
4607     mode = AllocationSite::GetMode(to_kind);
4608   }
4609
4610   MaybeObject* result;
4611   if (mode == TRACK_ALLOCATION_SITE) {
4612     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4613         allocation_site);
4614   } else {
4615     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4616   }
4617 #ifdef DEBUG
4618   // Make sure result is NOT a global object if valid.
4619   Object* non_failure;
4620   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4621 #endif
4622   return result;
4623 }
4624
4625
4626 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4627   ASSERT(function->shared()->is_generator());
4628   Map *map;
4629   if (function->has_initial_map()) {
4630     map = function->initial_map();
4631   } else {
4632     // Allocate the initial map if absent.
4633     MaybeObject* maybe_map = AllocateInitialMap(function);
4634     if (!maybe_map->To(&map)) return maybe_map;
4635     function->set_initial_map(map);
4636     map->set_constructor(function);
4637   }
4638   ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4639   return AllocateJSObjectFromMap(map);
4640 }
4641
4642
4643 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4644   // Allocate a fresh map. Modules do not have a prototype.
4645   Map* map;
4646   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4647   if (!maybe_map->To(&map)) return maybe_map;
4648   // Allocate the object based on the map.
4649   JSModule* module;
4650   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4651   if (!maybe_module->To(&module)) return maybe_module;
4652   module->set_context(context);
4653   module->set_scope_info(scope_info);
4654   return module;
4655 }
4656
4657
4658 MaybeObject* Heap::AllocateJSArrayAndStorage(
4659     ElementsKind elements_kind,
4660     int length,
4661     int capacity,
4662     ArrayStorageAllocationMode mode,
4663     PretenureFlag pretenure) {
4664   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4665   JSArray* array;
4666   if (!maybe_array->To(&array)) return maybe_array;
4667
4668   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4669   // for performance reasons.
4670   ASSERT(capacity >= length);
4671
4672   if (capacity == 0) {
4673     array->set_length(Smi::FromInt(0));
4674     array->set_elements(empty_fixed_array());
4675     return array;
4676   }
4677
4678   FixedArrayBase* elms;
4679   MaybeObject* maybe_elms = NULL;
4680   if (IsFastDoubleElementsKind(elements_kind)) {
4681     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4682       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4683     } else {
4684       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4685       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4686     }
4687   } else {
4688     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4689     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4690       maybe_elms = AllocateUninitializedFixedArray(capacity);
4691     } else {
4692       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4693       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4694     }
4695   }
4696   if (!maybe_elms->To(&elms)) return maybe_elms;
4697
4698   array->set_elements(elms);
4699   array->set_length(Smi::FromInt(length));
4700   return array;
4701 }
4702
4703
4704 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4705     ElementsKind elements_kind,
4706     int length,
4707     int capacity,
4708     Handle<AllocationSite> allocation_site,
4709     ArrayStorageAllocationMode mode) {
4710   MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4711       allocation_site);
4712   JSArray* array;
4713   if (!maybe_array->To(&array)) return maybe_array;
4714   return AllocateJSArrayStorage(array, length, capacity, mode);
4715 }
4716
4717
4718 MaybeObject* Heap::AllocateJSArrayStorage(
4719     JSArray* array,
4720     int length,
4721     int capacity,
4722     ArrayStorageAllocationMode mode) {
4723   ASSERT(capacity >= length);
4724
4725   if (capacity == 0) {
4726     array->set_length(Smi::FromInt(0));
4727     array->set_elements(empty_fixed_array());
4728     return array;
4729   }
4730
4731   FixedArrayBase* elms;
4732   MaybeObject* maybe_elms = NULL;
4733   ElementsKind elements_kind = array->GetElementsKind();
4734   if (IsFastDoubleElementsKind(elements_kind)) {
4735     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4736       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4737     } else {
4738       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4739       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4740     }
4741   } else {
4742     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4743     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4744       maybe_elms = AllocateUninitializedFixedArray(capacity);
4745     } else {
4746       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4747       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4748     }
4749   }
4750   if (!maybe_elms->To(&elms)) return maybe_elms;
4751
4752   array->set_elements(elms);
4753   array->set_length(Smi::FromInt(length));
4754   return array;
4755 }
4756
4757
4758 MaybeObject* Heap::AllocateJSArrayWithElements(
4759     FixedArrayBase* elements,
4760     ElementsKind elements_kind,
4761     int length,
4762     PretenureFlag pretenure) {
4763   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4764   JSArray* array;
4765   if (!maybe_array->To(&array)) return maybe_array;
4766
4767   array->set_elements(elements);
4768   array->set_length(Smi::FromInt(length));
4769   array->ValidateElements();
4770   return array;
4771 }
4772
4773
4774 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4775   // Allocate map.
4776   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4777   // maps. Will probably depend on the identity of the handler object, too.
4778   Map* map;
4779   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4780   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4781   map->set_prototype(prototype);
4782
4783   // Allocate the proxy object.
4784   JSProxy* result;
4785   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4786   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4787   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4788   result->set_handler(handler);
4789   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4790   return result;
4791 }
4792
4793
4794 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4795                                            Object* call_trap,
4796                                            Object* construct_trap,
4797                                            Object* prototype) {
4798   // Allocate map.
4799   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4800   // maps. Will probably depend on the identity of the handler object, too.
4801   Map* map;
4802   MaybeObject* maybe_map_obj =
4803       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4804   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4805   map->set_prototype(prototype);
4806
4807   // Allocate the proxy object.
4808   JSFunctionProxy* result;
4809   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4810   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4811   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4812   result->set_handler(handler);
4813   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4814   result->set_call_trap(call_trap);
4815   result->set_construct_trap(construct_trap);
4816   return result;
4817 }
4818
4819
4820 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4821   ASSERT(constructor->has_initial_map());
4822   Map* map = constructor->initial_map();
4823   ASSERT(map->is_dictionary_map());
4824
4825   // Make sure no field properties are described in the initial map.
4826   // This guarantees us that normalizing the properties does not
4827   // require us to change property values to PropertyCells.
4828   ASSERT(map->NextFreePropertyIndex() == 0);
4829
4830   // Make sure we don't have a ton of pre-allocated slots in the
4831   // global objects. They will be unused once we normalize the object.
4832   ASSERT(map->unused_property_fields() == 0);
4833   ASSERT(map->inobject_properties() == 0);
4834
4835   // Initial size of the backing store to avoid resize of the storage during
4836   // bootstrapping. The size differs between the JS global object ad the
4837   // builtins object.
4838   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4839
4840   // Allocate a dictionary object for backing storage.
4841   NameDictionary* dictionary;
4842   MaybeObject* maybe_dictionary =
4843       NameDictionary::Allocate(
4844           this,
4845           map->NumberOfOwnDescriptors() * 2 + initial_size);
4846   if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4847
4848   // The global object might be created from an object template with accessors.
4849   // Fill these accessors into the dictionary.
4850   DescriptorArray* descs = map->instance_descriptors();
4851   for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4852     PropertyDetails details = descs->GetDetails(i);
4853     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
4854     PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4855     Object* value = descs->GetCallbacksObject(i);
4856     MaybeObject* maybe_value = AllocatePropertyCell(value);
4857     if (!maybe_value->ToObject(&value)) return maybe_value;
4858
4859     MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4860     if (!maybe_added->To(&dictionary)) return maybe_added;
4861   }
4862
4863   // Allocate the global object and initialize it with the backing store.
4864   JSObject* global;
4865   MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4866   if (!maybe_global->To(&global)) return maybe_global;
4867
4868   InitializeJSObjectFromMap(global, dictionary, map);
4869
4870   // Create a new map for the global object.
4871   Map* new_map;
4872   MaybeObject* maybe_map = map->CopyDropDescriptors();
4873   if (!maybe_map->To(&new_map)) return maybe_map;
4874   new_map->set_dictionary_map(true);
4875
4876   // Set up the global object as a normalized object.
4877   global->set_map(new_map);
4878   global->set_properties(dictionary);
4879
4880   // Make sure result is a global object with properties in dictionary.
4881   ASSERT(global->IsGlobalObject());
4882   ASSERT(!global->HasFastProperties());
4883   return global;
4884 }
4885
4886
4887 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4888   // Never used to copy functions.  If functions need to be copied we
4889   // have to be careful to clear the literals array.
4890   SLOW_ASSERT(!source->IsJSFunction());
4891
4892   // Make the clone.
4893   Map* map = source->map();
4894   int object_size = map->instance_size();
4895   Object* clone;
4896
4897   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4898
4899   // If we're forced to always allocate, we use the general allocation
4900   // functions which may leave us with an object in old space.
4901   if (always_allocate()) {
4902     { MaybeObject* maybe_clone =
4903           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4904       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4905     }
4906     Address clone_address = HeapObject::cast(clone)->address();
4907     CopyBlock(clone_address,
4908               source->address(),
4909               object_size);
4910     // Update write barrier for all fields that lie beyond the header.
4911     RecordWrites(clone_address,
4912                  JSObject::kHeaderSize,
4913                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4914   } else {
4915     wb_mode = SKIP_WRITE_BARRIER;
4916
4917     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4918       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4919     }
4920     SLOW_ASSERT(InNewSpace(clone));
4921     // Since we know the clone is allocated in new space, we can copy
4922     // the contents without worrying about updating the write barrier.
4923     CopyBlock(HeapObject::cast(clone)->address(),
4924               source->address(),
4925               object_size);
4926   }
4927
4928   SLOW_ASSERT(
4929       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4930   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4931   FixedArray* properties = FixedArray::cast(source->properties());
4932   // Update elements if necessary.
4933   if (elements->length() > 0) {
4934     Object* elem;
4935     { MaybeObject* maybe_elem;
4936       if (elements->map() == fixed_cow_array_map()) {
4937         maybe_elem = FixedArray::cast(elements);
4938       } else if (source->HasFastDoubleElements()) {
4939         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4940       } else {
4941         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4942       }
4943       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4944     }
4945     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4946   }
4947   // Update properties if necessary.
4948   if (properties->length() > 0) {
4949     Object* prop;
4950     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4951       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4952     }
4953     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4954   }
4955   // Return the new clone.
4956   return clone;
4957 }
4958
4959
4960 MaybeObject* Heap::CopyJSObjectWithAllocationSite(
4961     JSObject* source,
4962     AllocationSite* site) {
4963   // Never used to copy functions.  If functions need to be copied we
4964   // have to be careful to clear the literals array.
4965   SLOW_ASSERT(!source->IsJSFunction());
4966
4967   // Make the clone.
4968   Map* map = source->map();
4969   int object_size = map->instance_size();
4970   Object* clone;
4971
4972   ASSERT(map->CanTrackAllocationSite());
4973   ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4974   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4975
4976   // If we're forced to always allocate, we use the general allocation
4977   // functions which may leave us with an object in old space.
4978   int adjusted_object_size = object_size;
4979   if (always_allocate()) {
4980     // We'll only track origin if we are certain to allocate in new space
4981     const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4982     if ((object_size + AllocationMemento::kSize) < kMinFreeNewSpaceAfterGC) {
4983       adjusted_object_size += AllocationMemento::kSize;
4984     }
4985
4986     { MaybeObject* maybe_clone =
4987           AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4988       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4989     }
4990     Address clone_address = HeapObject::cast(clone)->address();
4991     CopyBlock(clone_address,
4992               source->address(),
4993               object_size);
4994     // Update write barrier for all fields that lie beyond the header.
4995     int write_barrier_offset = adjusted_object_size > object_size
4996         ? JSArray::kSize + AllocationMemento::kSize
4997         : JSObject::kHeaderSize;
4998     if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4999       RecordWrites(clone_address,
5000                    write_barrier_offset,
5001                    (object_size - write_barrier_offset) / kPointerSize);
5002     }
5003
5004     // Track allocation site information, if we failed to allocate it inline.
5005     if (InNewSpace(clone) &&
5006         adjusted_object_size == object_size) {
5007       MaybeObject* maybe_alloc_memento =
5008           AllocateStruct(ALLOCATION_MEMENTO_TYPE);
5009       AllocationMemento* alloc_memento;
5010       if (maybe_alloc_memento->To(&alloc_memento)) {
5011         alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5012         alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5013       }
5014     }
5015   } else {
5016     wb_mode = SKIP_WRITE_BARRIER;
5017     adjusted_object_size += AllocationMemento::kSize;
5018
5019     { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
5020       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
5021     }
5022     SLOW_ASSERT(InNewSpace(clone));
5023     // Since we know the clone is allocated in new space, we can copy
5024     // the contents without worrying about updating the write barrier.
5025     CopyBlock(HeapObject::cast(clone)->address(),
5026               source->address(),
5027               object_size);
5028   }
5029
5030   if (adjusted_object_size > object_size) {
5031     AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
5032         reinterpret_cast<Address>(clone) + object_size);
5033     alloc_memento->set_map_no_write_barrier(allocation_memento_map());
5034     alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
5035   }
5036
5037   SLOW_ASSERT(
5038       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
5039   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
5040   FixedArray* properties = FixedArray::cast(source->properties());
5041   // Update elements if necessary.
5042   if (elements->length() > 0) {
5043     Object* elem;
5044     { MaybeObject* maybe_elem;
5045       if (elements->map() == fixed_cow_array_map()) {
5046         maybe_elem = FixedArray::cast(elements);
5047       } else if (source->HasFastDoubleElements()) {
5048         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
5049       } else {
5050         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
5051       }
5052       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
5053     }
5054     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
5055   }
5056   // Update properties if necessary.
5057   if (properties->length() > 0) {
5058     Object* prop;
5059     { MaybeObject* maybe_prop = CopyFixedArray(properties);
5060       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
5061     }
5062     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
5063   }
5064   // Return the new clone.
5065   return clone;
5066 }
5067
5068
5069 MaybeObject* Heap::ReinitializeJSReceiver(
5070     JSReceiver* object, InstanceType type, int size) {
5071   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
5072
5073   // Allocate fresh map.
5074   // TODO(rossberg): Once we optimize proxies, cache these maps.
5075   Map* map;
5076   MaybeObject* maybe = AllocateMap(type, size);
5077   if (!maybe->To<Map>(&map)) return maybe;
5078
5079   // Check that the receiver has at least the size of the fresh object.
5080   int size_difference = object->map()->instance_size() - map->instance_size();
5081   ASSERT(size_difference >= 0);
5082
5083   map->set_prototype(object->map()->prototype());
5084
5085   // Allocate the backing storage for the properties.
5086   int prop_size = map->unused_property_fields() - map->inobject_properties();
5087   Object* properties;
5088   maybe = AllocateFixedArray(prop_size, TENURED);
5089   if (!maybe->ToObject(&properties)) return maybe;
5090
5091   // Functions require some allocation, which might fail here.
5092   SharedFunctionInfo* shared = NULL;
5093   if (type == JS_FUNCTION_TYPE) {
5094     String* name;
5095     maybe =
5096         InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
5097     if (!maybe->To<String>(&name)) return maybe;
5098     maybe = AllocateSharedFunctionInfo(name);
5099     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
5100   }
5101
5102   // Because of possible retries of this function after failure,
5103   // we must NOT fail after this point, where we have changed the type!
5104
5105   // Reset the map for the object.
5106   object->set_map(map);
5107   JSObject* jsobj = JSObject::cast(object);
5108
5109   // Reinitialize the object from the constructor map.
5110   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
5111
5112   // Functions require some minimal initialization.
5113   if (type == JS_FUNCTION_TYPE) {
5114     map->set_function_with_prototype(true);
5115     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
5116     JSFunction::cast(object)->set_context(
5117         isolate()->context()->native_context());
5118   }
5119
5120   // Put in filler if the new object is smaller than the old.
5121   if (size_difference > 0) {
5122     CreateFillerObjectAt(
5123         object->address() + map->instance_size(), size_difference);
5124   }
5125
5126   return object;
5127 }
5128
5129
5130 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5131                                              JSGlobalProxy* object) {
5132   ASSERT(constructor->has_initial_map());
5133   Map* map = constructor->initial_map();
5134
5135   // Check that the already allocated object has the same size and type as
5136   // objects allocated using the constructor.
5137   ASSERT(map->instance_size() == object->map()->instance_size());
5138   ASSERT(map->instance_type() == object->map()->instance_type());
5139
5140   // Allocate the backing storage for the properties.
5141   int prop_size = map->unused_property_fields() - map->inobject_properties();
5142   Object* properties;
5143   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5144     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5145   }
5146
5147   // Reset the map for the object.
5148   object->set_map(constructor->initial_map());
5149
5150   // Reinitialize the object from the constructor map.
5151   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5152   return object;
5153 }
5154
5155
5156 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5157                                            PretenureFlag pretenure) {
5158   int length = string.length();
5159   if (length == 1) {
5160     return Heap::LookupSingleCharacterStringFromCode(string[0]);
5161   }
5162   Object* result;
5163   { MaybeObject* maybe_result =
5164         AllocateRawOneByteString(string.length(), pretenure);
5165     if (!maybe_result->ToObject(&result)) return maybe_result;
5166   }
5167
5168   // Copy the characters into the new object.
5169   CopyChars(SeqOneByteString::cast(result)->GetChars(),
5170             string.start(),
5171             length);
5172   return result;
5173 }
5174
5175
5176 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5177                                               int non_ascii_start,
5178                                               PretenureFlag pretenure) {
5179   // Continue counting the number of characters in the UTF-8 string, starting
5180   // from the first non-ascii character or word.
5181   Access<UnicodeCache::Utf8Decoder>
5182       decoder(isolate_->unicode_cache()->utf8_decoder());
5183   decoder->Reset(string.start() + non_ascii_start,
5184                  string.length() - non_ascii_start);
5185   int utf16_length = decoder->Utf16Length();
5186   ASSERT(utf16_length > 0);
5187   // Allocate string.
5188   Object* result;
5189   {
5190     int chars = non_ascii_start + utf16_length;
5191     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5192     if (!maybe_result->ToObject(&result)) return maybe_result;
5193   }
5194   // Convert and copy the characters into the new object.
5195   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5196   // Copy ascii portion.
5197   uint16_t* data = twobyte->GetChars();
5198   if (non_ascii_start != 0) {
5199     const char* ascii_data = string.start();
5200     for (int i = 0; i < non_ascii_start; i++) {
5201       *data++ = *ascii_data++;
5202     }
5203   }
5204   // Now write the remainder.
5205   decoder->WriteUtf16(data, utf16_length);
5206   return result;
5207 }
5208
5209
5210 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5211                                              PretenureFlag pretenure) {
5212   // Check if the string is an ASCII string.
5213   Object* result;
5214   int length = string.length();
5215   const uc16* start = string.start();
5216
5217   if (String::IsOneByte(start, length)) {
5218     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5219     if (!maybe_result->ToObject(&result)) return maybe_result;
5220     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5221   } else {  // It's not a one byte string.
5222     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5223     if (!maybe_result->ToObject(&result)) return maybe_result;
5224     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5225   }
5226   return result;
5227 }
5228
5229
5230 Map* Heap::InternalizedStringMapForString(String* string) {
5231   // If the string is in new space it cannot be used as internalized.
5232   if (InNewSpace(string)) return NULL;
5233
5234   // Find the corresponding internalized string map for strings.
5235   switch (string->map()->instance_type()) {
5236     case STRING_TYPE: return internalized_string_map();
5237     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5238     case CONS_STRING_TYPE: return cons_internalized_string_map();
5239     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5240     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5241     case EXTERNAL_ASCII_STRING_TYPE:
5242       return external_ascii_internalized_string_map();
5243     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5244       return external_internalized_string_with_one_byte_data_map();
5245     case SHORT_EXTERNAL_STRING_TYPE:
5246       return short_external_internalized_string_map();
5247     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5248       return short_external_ascii_internalized_string_map();
5249     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5250       return short_external_internalized_string_with_one_byte_data_map();
5251     default: return NULL;  // No match found.
5252   }
5253 }
5254
5255
5256 static inline void WriteOneByteData(Vector<const char> vector,
5257                                     uint8_t* chars,
5258                                     int len) {
5259   // Only works for ascii.
5260   ASSERT(vector.length() == len);
5261   OS::MemCopy(chars, vector.start(), len);
5262 }
5263
5264 static inline void WriteTwoByteData(Vector<const char> vector,
5265                                     uint16_t* chars,
5266                                     int len) {
5267   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5268   unsigned stream_length = vector.length();
5269   while (stream_length != 0) {
5270     unsigned consumed = 0;
5271     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5272     ASSERT(c != unibrow::Utf8::kBadChar);
5273     ASSERT(consumed <= stream_length);
5274     stream_length -= consumed;
5275     stream += consumed;
5276     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5277       len -= 2;
5278       if (len < 0) break;
5279       *chars++ = unibrow::Utf16::LeadSurrogate(c);
5280       *chars++ = unibrow::Utf16::TrailSurrogate(c);
5281     } else {
5282       len -= 1;
5283       if (len < 0) break;
5284       *chars++ = c;
5285     }
5286   }
5287   ASSERT(stream_length == 0);
5288   ASSERT(len == 0);
5289 }
5290
5291
5292 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5293   ASSERT(s->length() == len);
5294   String::WriteToFlat(s, chars, 0, len);
5295 }
5296
5297
5298 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5299   ASSERT(s->length() == len);
5300   String::WriteToFlat(s, chars, 0, len);
5301 }
5302
5303
5304 template<bool is_one_byte, typename T>
5305 MaybeObject* Heap::AllocateInternalizedStringImpl(
5306     T t, int chars, uint32_t hash_field) {
5307   ASSERT(chars >= 0);
5308   // Compute map and object size.
5309   int size;
5310   Map* map;
5311
5312   if (is_one_byte) {
5313     if (chars > SeqOneByteString::kMaxLength) {
5314       return Failure::OutOfMemoryException(0x9);
5315     }
5316     map = ascii_internalized_string_map();
5317     size = SeqOneByteString::SizeFor(chars);
5318   } else {
5319     if (chars > SeqTwoByteString::kMaxLength) {
5320       return Failure::OutOfMemoryException(0xa);
5321     }
5322     map = internalized_string_map();
5323     size = SeqTwoByteString::SizeFor(chars);
5324   }
5325
5326   // Allocate string.
5327   Object* result;
5328   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5329                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5330                    : old_data_space_->AllocateRaw(size);
5331     if (!maybe_result->ToObject(&result)) return maybe_result;
5332   }
5333
5334   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5335   // Set length and hash fields of the allocated string.
5336   String* answer = String::cast(result);
5337   answer->set_length(chars);
5338   answer->set_hash_field(hash_field);
5339
5340   ASSERT_EQ(size, answer->Size());
5341
5342   if (is_one_byte) {
5343     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5344   } else {
5345     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5346   }
5347   return answer;
5348 }
5349
5350
5351 // Need explicit instantiations.
5352 template
5353 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5354 template
5355 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5356     String*, int, uint32_t);
5357 template
5358 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5359     Vector<const char>, int, uint32_t);
5360
5361
5362 MaybeObject* Heap::AllocateRawOneByteString(int length,
5363                                             PretenureFlag pretenure) {
5364   if (length < 0 || length > SeqOneByteString::kMaxLength) {
5365     return Failure::OutOfMemoryException(0xb);
5366   }
5367   int size = SeqOneByteString::SizeFor(length);
5368   ASSERT(size <= SeqOneByteString::kMaxSize);
5369   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5370   AllocationSpace retry_space = OLD_DATA_SPACE;
5371
5372   if (size > Page::kMaxNonCodeHeapObjectSize) {
5373     // Allocate in large object space, retry space will be ignored.
5374     space = LO_SPACE;
5375   }
5376
5377   Object* result;
5378   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5379     if (!maybe_result->ToObject(&result)) return maybe_result;
5380   }
5381
5382   // Partially initialize the object.
5383   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5384   String::cast(result)->set_length(length);
5385   String::cast(result)->set_hash_field(String::kEmptyHashField);
5386   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5387
5388   return result;
5389 }
5390
5391
5392 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5393                                             PretenureFlag pretenure) {
5394   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5395     return Failure::OutOfMemoryException(0xc);
5396   }
5397   int size = SeqTwoByteString::SizeFor(length);
5398   ASSERT(size <= SeqTwoByteString::kMaxSize);
5399   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5400   AllocationSpace retry_space = OLD_DATA_SPACE;
5401
5402   if (size > Page::kMaxNonCodeHeapObjectSize) {
5403     // Allocate in large object space, retry space will be ignored.
5404     space = LO_SPACE;
5405   }
5406
5407   Object* result;
5408   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5409     if (!maybe_result->ToObject(&result)) return maybe_result;
5410   }
5411
5412   // Partially initialize the object.
5413   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5414   String::cast(result)->set_length(length);
5415   String::cast(result)->set_hash_field(String::kEmptyHashField);
5416   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5417   return result;
5418 }
5419
5420
5421 MaybeObject* Heap::AllocateJSArray(
5422     ElementsKind elements_kind,
5423     PretenureFlag pretenure) {
5424   Context* native_context = isolate()->context()->native_context();
5425   JSFunction* array_function = native_context->array_function();
5426   Map* map = array_function->initial_map();
5427   Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5428   if (transition_map != NULL) map = transition_map;
5429   return AllocateJSObjectFromMap(map, pretenure);
5430 }
5431
5432
5433 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5434     ElementsKind elements_kind,
5435     Handle<AllocationSite> allocation_site) {
5436   Context* native_context = isolate()->context()->native_context();
5437   JSFunction* array_function = native_context->array_function();
5438   Map* map = array_function->initial_map();
5439   Object* maybe_map_array = native_context->js_array_maps();
5440   if (!maybe_map_array->IsUndefined()) {
5441     Object* maybe_transitioned_map =
5442         FixedArray::cast(maybe_map_array)->get(elements_kind);
5443     if (!maybe_transitioned_map->IsUndefined()) {
5444       map = Map::cast(maybe_transitioned_map);
5445     }
5446   }
5447   return AllocateJSObjectFromMapWithAllocationSite(map, allocation_site);
5448 }
5449
5450
5451 MaybeObject* Heap::AllocateEmptyFixedArray() {
5452   int size = FixedArray::SizeFor(0);
5453   Object* result;
5454   { MaybeObject* maybe_result =
5455         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5456     if (!maybe_result->ToObject(&result)) return maybe_result;
5457   }
5458   // Initialize the object.
5459   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5460       fixed_array_map());
5461   reinterpret_cast<FixedArray*>(result)->set_length(0);
5462   return result;
5463 }
5464
5465
5466 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5467   return AllocateExternalArray(0, array_type, NULL, TENURED);
5468 }
5469
5470
5471 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5472   if (length < 0 || length > FixedArray::kMaxLength) {
5473     return Failure::OutOfMemoryException(0xd);
5474   }
5475   ASSERT(length > 0);
5476   // Use the general function if we're forced to always allocate.
5477   if (always_allocate()) return AllocateFixedArray(length, TENURED);
5478   // Allocate the raw data for a fixed array.
5479   int size = FixedArray::SizeFor(length);
5480   return size <= Page::kMaxNonCodeHeapObjectSize
5481       ? new_space_.AllocateRaw(size)
5482       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5483 }
5484
5485
5486 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5487   int len = src->length();
5488   Object* obj;
5489   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5490     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5491   }
5492   if (InNewSpace(obj)) {
5493     HeapObject* dst = HeapObject::cast(obj);
5494     dst->set_map_no_write_barrier(map);
5495     CopyBlock(dst->address() + kPointerSize,
5496               src->address() + kPointerSize,
5497               FixedArray::SizeFor(len) - kPointerSize);
5498     return obj;
5499   }
5500   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5501   FixedArray* result = FixedArray::cast(obj);
5502   result->set_length(len);
5503
5504   // Copy the content
5505   DisallowHeapAllocation no_gc;
5506   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5507   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5508   return result;
5509 }
5510
5511
5512 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5513                                                Map* map) {
5514   int len = src->length();
5515   Object* obj;
5516   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5517     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5518   }
5519   HeapObject* dst = HeapObject::cast(obj);
5520   dst->set_map_no_write_barrier(map);
5521   CopyBlock(
5522       dst->address() + FixedDoubleArray::kLengthOffset,
5523       src->address() + FixedDoubleArray::kLengthOffset,
5524       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5525   return obj;
5526 }
5527
5528
5529 MaybeObject* Heap::AllocateFixedArray(int length) {
5530   ASSERT(length >= 0);
5531   if (length == 0) return empty_fixed_array();
5532   Object* result;
5533   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5534     if (!maybe_result->ToObject(&result)) return maybe_result;
5535   }
5536   // Initialize header.
5537   FixedArray* array = reinterpret_cast<FixedArray*>(result);
5538   array->set_map_no_write_barrier(fixed_array_map());
5539   array->set_length(length);
5540   // Initialize body.
5541   ASSERT(!InNewSpace(undefined_value()));
5542   MemsetPointer(array->data_start(), undefined_value(), length);
5543   return result;
5544 }
5545
5546
5547 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5548   if (length < 0 || length > FixedArray::kMaxLength) {
5549     return Failure::OutOfMemoryException(0xe);
5550   }
5551   int size = FixedArray::SizeFor(length);
5552   AllocationSpace space =
5553       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5554   AllocationSpace retry_space = OLD_POINTER_SPACE;
5555
5556   if (size > Page::kMaxNonCodeHeapObjectSize) {
5557     // Allocate in large object space, retry space will be ignored.
5558     space = LO_SPACE;
5559   }
5560
5561   return AllocateRaw(size, space, retry_space);
5562 }
5563
5564
5565 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5566     Heap* heap,
5567     int length,
5568     PretenureFlag pretenure,
5569     Object* filler) {
5570   ASSERT(length >= 0);
5571   ASSERT(heap->empty_fixed_array()->IsFixedArray());
5572   if (length == 0) return heap->empty_fixed_array();
5573
5574   ASSERT(!heap->InNewSpace(filler));
5575   Object* result;
5576   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5577     if (!maybe_result->ToObject(&result)) return maybe_result;
5578   }
5579
5580   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5581   FixedArray* array = FixedArray::cast(result);
5582   array->set_length(length);
5583   MemsetPointer(array->data_start(), filler, length);
5584   return array;
5585 }
5586
5587
5588 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5589   return AllocateFixedArrayWithFiller(this,
5590                                       length,
5591                                       pretenure,
5592                                       undefined_value());
5593 }
5594
5595
5596 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5597                                                PretenureFlag pretenure) {
5598   return AllocateFixedArrayWithFiller(this,
5599                                       length,
5600                                       pretenure,
5601                                       the_hole_value());
5602 }
5603
5604
5605 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5606   if (length == 0) return empty_fixed_array();
5607
5608   Object* obj;
5609   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5610     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5611   }
5612
5613   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5614       fixed_array_map());
5615   FixedArray::cast(obj)->set_length(length);
5616   return obj;
5617 }
5618
5619
5620 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5621   int size = FixedDoubleArray::SizeFor(0);
5622   Object* result;
5623   { MaybeObject* maybe_result =
5624         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5625     if (!maybe_result->ToObject(&result)) return maybe_result;
5626   }
5627   // Initialize the object.
5628   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5629       fixed_double_array_map());
5630   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5631   return result;
5632 }
5633
5634
5635 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5636     int length,
5637     PretenureFlag pretenure) {
5638   if (length == 0) return empty_fixed_array();
5639
5640   Object* elements_object;
5641   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5642   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5643   FixedDoubleArray* elements =
5644       reinterpret_cast<FixedDoubleArray*>(elements_object);
5645
5646   elements->set_map_no_write_barrier(fixed_double_array_map());
5647   elements->set_length(length);
5648   return elements;
5649 }
5650
5651
5652 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5653     int length,
5654     PretenureFlag pretenure) {
5655   if (length == 0) return empty_fixed_array();
5656
5657   Object* elements_object;
5658   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5659   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5660   FixedDoubleArray* elements =
5661       reinterpret_cast<FixedDoubleArray*>(elements_object);
5662
5663   for (int i = 0; i < length; ++i) {
5664     elements->set_the_hole(i);
5665   }
5666
5667   elements->set_map_no_write_barrier(fixed_double_array_map());
5668   elements->set_length(length);
5669   return elements;
5670 }
5671
5672
5673 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5674                                                PretenureFlag pretenure) {
5675   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5676     return Failure::OutOfMemoryException(0xf);
5677   }
5678   int size = FixedDoubleArray::SizeFor(length);
5679   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5680   AllocationSpace retry_space = OLD_DATA_SPACE;
5681
5682 #ifndef V8_HOST_ARCH_64_BIT
5683   size += kPointerSize;
5684 #endif
5685
5686   if (size > Page::kMaxNonCodeHeapObjectSize) {
5687     // Allocate in large object space, retry space will be ignored.
5688     space = LO_SPACE;
5689   }
5690
5691   HeapObject* object;
5692   { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5693     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5694   }
5695
5696   return EnsureDoubleAligned(this, object, size);
5697 }
5698
5699
5700 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5701   Object* result;
5702   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5703     if (!maybe_result->ToObject(&result)) return maybe_result;
5704   }
5705   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5706       hash_table_map());
5707   ASSERT(result->IsHashTable());
5708   return result;
5709 }
5710
5711
5712 MaybeObject* Heap::AllocateSymbol() {
5713   // Statically ensure that it is safe to allocate symbols in paged spaces.
5714   STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5715
5716   Object* result;
5717   MaybeObject* maybe =
5718       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5719   if (!maybe->ToObject(&result)) return maybe;
5720
5721   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5722
5723   // Generate a random hash value.
5724   int hash;
5725   int attempts = 0;
5726   do {
5727     hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5728     attempts++;
5729   } while (hash == 0 && attempts < 30);
5730   if (hash == 0) hash = 1;  // never return 0
5731
5732   Symbol::cast(result)->set_hash_field(
5733       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5734   Symbol::cast(result)->set_name(undefined_value());
5735
5736   ASSERT(result->IsSymbol());
5737   return result;
5738 }
5739
5740
5741 MaybeObject* Heap::AllocateNativeContext() {
5742   Object* result;
5743   { MaybeObject* maybe_result =
5744         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5745     if (!maybe_result->ToObject(&result)) return maybe_result;
5746   }
5747   Context* context = reinterpret_cast<Context*>(result);
5748   context->set_map_no_write_barrier(native_context_map());
5749   context->set_js_array_maps(undefined_value());
5750   ASSERT(context->IsNativeContext());
5751   ASSERT(result->IsContext());
5752   return result;
5753 }
5754
5755
5756 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5757                                          ScopeInfo* scope_info) {
5758   Object* result;
5759   { MaybeObject* maybe_result =
5760         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5761     if (!maybe_result->ToObject(&result)) return maybe_result;
5762   }
5763   Context* context = reinterpret_cast<Context*>(result);
5764   context->set_map_no_write_barrier(global_context_map());
5765   context->set_closure(function);
5766   context->set_previous(function->context());
5767   context->set_extension(scope_info);
5768   context->set_global_object(function->context()->global_object());
5769   ASSERT(context->IsGlobalContext());
5770   ASSERT(result->IsContext());
5771   return context;
5772 }
5773
5774
5775 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5776   Object* result;
5777   { MaybeObject* maybe_result =
5778         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5779     if (!maybe_result->ToObject(&result)) return maybe_result;
5780   }
5781   Context* context = reinterpret_cast<Context*>(result);
5782   context->set_map_no_write_barrier(module_context_map());
5783   // Instance link will be set later.
5784   context->set_extension(Smi::FromInt(0));
5785   return context;
5786 }
5787
5788
5789 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5790   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5791   Object* result;
5792   { MaybeObject* maybe_result = AllocateFixedArray(length);
5793     if (!maybe_result->ToObject(&result)) return maybe_result;
5794   }
5795   Context* context = reinterpret_cast<Context*>(result);
5796   context->set_map_no_write_barrier(function_context_map());
5797   context->set_closure(function);
5798   context->set_previous(function->context());
5799   context->set_extension(Smi::FromInt(0));
5800   context->set_global_object(function->context()->global_object());
5801   return context;
5802 }
5803
5804
5805 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5806                                         Context* previous,
5807                                         String* name,
5808                                         Object* thrown_object) {
5809   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5810   Object* result;
5811   { MaybeObject* maybe_result =
5812         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5813     if (!maybe_result->ToObject(&result)) return maybe_result;
5814   }
5815   Context* context = reinterpret_cast<Context*>(result);
5816   context->set_map_no_write_barrier(catch_context_map());
5817   context->set_closure(function);
5818   context->set_previous(previous);
5819   context->set_extension(name);
5820   context->set_global_object(previous->global_object());
5821   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5822   return context;
5823 }
5824
5825
5826 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5827                                        Context* previous,
5828                                        JSReceiver* extension) {
5829   Object* result;
5830   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5831     if (!maybe_result->ToObject(&result)) return maybe_result;
5832   }
5833   Context* context = reinterpret_cast<Context*>(result);
5834   context->set_map_no_write_barrier(with_context_map());
5835   context->set_closure(function);
5836   context->set_previous(previous);
5837   context->set_extension(extension);
5838   context->set_global_object(previous->global_object());
5839   return context;
5840 }
5841
5842
5843 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5844                                         Context* previous,
5845                                         ScopeInfo* scope_info) {
5846   Object* result;
5847   { MaybeObject* maybe_result =
5848         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5849     if (!maybe_result->ToObject(&result)) return maybe_result;
5850   }
5851   Context* context = reinterpret_cast<Context*>(result);
5852   context->set_map_no_write_barrier(block_context_map());
5853   context->set_closure(function);
5854   context->set_previous(previous);
5855   context->set_extension(scope_info);
5856   context->set_global_object(previous->global_object());
5857   return context;
5858 }
5859
5860
5861 MaybeObject* Heap::AllocateScopeInfo(int length) {
5862   FixedArray* scope_info;
5863   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5864   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5865   scope_info->set_map_no_write_barrier(scope_info_map());
5866   return scope_info;
5867 }
5868
5869
5870 MaybeObject* Heap::AllocateExternal(void* value) {
5871   Foreign* foreign;
5872   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5873     if (!maybe_result->To(&foreign)) return maybe_result;
5874   }
5875   JSObject* external;
5876   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5877     if (!maybe_result->To(&external)) return maybe_result;
5878   }
5879   external->SetInternalField(0, foreign);
5880   return external;
5881 }
5882
5883
5884 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5885   Map* map;
5886   switch (type) {
5887 #define MAKE_CASE(NAME, Name, name) \
5888     case NAME##_TYPE: map = name##_map(); break;
5889 STRUCT_LIST(MAKE_CASE)
5890 #undef MAKE_CASE
5891     default:
5892       UNREACHABLE();
5893       return Failure::InternalError();
5894   }
5895   int size = map->instance_size();
5896   AllocationSpace space =
5897       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5898   Object* result;
5899   { MaybeObject* maybe_result = Allocate(map, space);
5900     if (!maybe_result->ToObject(&result)) return maybe_result;
5901   }
5902   Struct::cast(result)->InitializeBody(size);
5903   return result;
5904 }
5905
5906
5907 bool Heap::IsHeapIterable() {
5908   return (!old_pointer_space()->was_swept_conservatively() &&
5909           !old_data_space()->was_swept_conservatively());
5910 }
5911
5912
5913 void Heap::EnsureHeapIsIterable() {
5914   ASSERT(AllowHeapAllocation::IsAllowed());
5915   if (!IsHeapIterable()) {
5916     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5917   }
5918   ASSERT(IsHeapIterable());
5919 }
5920
5921
5922 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5923   incremental_marking()->Step(step_size,
5924                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5925
5926   if (incremental_marking()->IsComplete()) {
5927     bool uncommit = false;
5928     if (gc_count_at_last_idle_gc_ == gc_count_) {
5929       // No GC since the last full GC, the mutator is probably not active.
5930       isolate_->compilation_cache()->Clear();
5931       uncommit = true;
5932     }
5933     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5934     mark_sweeps_since_idle_round_started_++;
5935     gc_count_at_last_idle_gc_ = gc_count_;
5936     if (uncommit) {
5937       new_space_.Shrink();
5938       UncommitFromSpace();
5939     }
5940   }
5941 }
5942
5943
5944 bool Heap::IdleNotification(int hint) {
5945   // Hints greater than this value indicate that
5946   // the embedder is requesting a lot of GC work.
5947   const int kMaxHint = 1000;
5948   const int kMinHintForIncrementalMarking = 10;
5949   // Minimal hint that allows to do full GC.
5950   const int kMinHintForFullGC = 100;
5951   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5952   // The size factor is in range [5..250]. The numbers here are chosen from
5953   // experiments. If you changes them, make sure to test with
5954   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5955   intptr_t step_size =
5956       size_factor * IncrementalMarking::kAllocatedThreshold;
5957
5958   if (contexts_disposed_ > 0) {
5959     if (hint >= kMaxHint) {
5960       // The embedder is requesting a lot of GC work after context disposal,
5961       // we age inline caches so that they don't keep objects from
5962       // the old context alive.
5963       AgeInlineCaches();
5964     }
5965     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5966     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5967         incremental_marking()->IsStopped()) {
5968       HistogramTimerScope scope(isolate_->counters()->gc_context());
5969       CollectAllGarbage(kReduceMemoryFootprintMask,
5970                         "idle notification: contexts disposed");
5971     } else {
5972       AdvanceIdleIncrementalMarking(step_size);
5973       contexts_disposed_ = 0;
5974     }
5975     // After context disposal there is likely a lot of garbage remaining, reset
5976     // the idle notification counters in order to trigger more incremental GCs
5977     // on subsequent idle notifications.
5978     StartIdleRound();
5979     return false;
5980   }
5981
5982   if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5983     return IdleGlobalGC();
5984   }
5985
5986   // By doing small chunks of GC work in each IdleNotification,
5987   // perform a round of incremental GCs and after that wait until
5988   // the mutator creates enough garbage to justify a new round.
5989   // An incremental GC progresses as follows:
5990   // 1. many incremental marking steps,
5991   // 2. one old space mark-sweep-compact,
5992   // 3. many lazy sweep steps.
5993   // Use mark-sweep-compact events to count incremental GCs in a round.
5994
5995   if (incremental_marking()->IsStopped()) {
5996     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5997         !IsSweepingComplete() &&
5998         !AdvanceSweepers(static_cast<int>(step_size))) {
5999       return false;
6000     }
6001   }
6002
6003   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6004     if (EnoughGarbageSinceLastIdleRound()) {
6005       StartIdleRound();
6006     } else {
6007       return true;
6008     }
6009   }
6010
6011   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
6012                               mark_sweeps_since_idle_round_started_;
6013
6014   if (incremental_marking()->IsStopped()) {
6015     // If there are no more than two GCs left in this idle round and we are
6016     // allowed to do a full GC, then make those GCs full in order to compact
6017     // the code space.
6018     // TODO(ulan): Once we enable code compaction for incremental marking,
6019     // we can get rid of this special case and always start incremental marking.
6020     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
6021       CollectAllGarbage(kReduceMemoryFootprintMask,
6022                         "idle notification: finalize idle round");
6023       mark_sweeps_since_idle_round_started_++;
6024     } else if (hint > kMinHintForIncrementalMarking) {
6025       incremental_marking()->Start();
6026     }
6027   }
6028   if (!incremental_marking()->IsStopped() &&
6029       hint > kMinHintForIncrementalMarking) {
6030     AdvanceIdleIncrementalMarking(step_size);
6031   }
6032
6033   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6034     FinishIdleRound();
6035     return true;
6036   }
6037
6038   return false;
6039 }
6040
6041
6042 bool Heap::IdleGlobalGC() {
6043   static const int kIdlesBeforeScavenge = 4;
6044   static const int kIdlesBeforeMarkSweep = 7;
6045   static const int kIdlesBeforeMarkCompact = 8;
6046   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
6047   static const unsigned int kGCsBetweenCleanup = 4;
6048
6049   if (!last_idle_notification_gc_count_init_) {
6050     last_idle_notification_gc_count_ = gc_count_;
6051     last_idle_notification_gc_count_init_ = true;
6052   }
6053
6054   bool uncommit = true;
6055   bool finished = false;
6056
6057   // Reset the number of idle notifications received when a number of
6058   // GCs have taken place. This allows another round of cleanup based
6059   // on idle notifications if enough work has been carried out to
6060   // provoke a number of garbage collections.
6061   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
6062     number_idle_notifications_ =
6063         Min(number_idle_notifications_ + 1, kMaxIdleCount);
6064   } else {
6065     number_idle_notifications_ = 0;
6066     last_idle_notification_gc_count_ = gc_count_;
6067   }
6068
6069   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
6070     CollectGarbage(NEW_SPACE, "idle notification");
6071     new_space_.Shrink();
6072     last_idle_notification_gc_count_ = gc_count_;
6073   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
6074     // Before doing the mark-sweep collections we clear the
6075     // compilation cache to avoid hanging on to source code and
6076     // generated code for cached functions.
6077     isolate_->compilation_cache()->Clear();
6078
6079     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6080     new_space_.Shrink();
6081     last_idle_notification_gc_count_ = gc_count_;
6082
6083   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
6084     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6085     new_space_.Shrink();
6086     last_idle_notification_gc_count_ = gc_count_;
6087     number_idle_notifications_ = 0;
6088     finished = true;
6089   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
6090     // If we have received more than kIdlesBeforeMarkCompact idle
6091     // notifications we do not perform any cleanup because we don't
6092     // expect to gain much by doing so.
6093     finished = true;
6094   }
6095
6096   if (uncommit) UncommitFromSpace();
6097
6098   return finished;
6099 }
6100
6101
6102 #ifdef DEBUG
6103
6104 void Heap::Print() {
6105   if (!HasBeenSetUp()) return;
6106   isolate()->PrintStack(stdout);
6107   AllSpaces spaces(this);
6108   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6109     space->Print();
6110   }
6111 }
6112
6113
6114 void Heap::ReportCodeStatistics(const char* title) {
6115   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6116   PagedSpace::ResetCodeStatistics();
6117   // We do not look for code in new space, map space, or old space.  If code
6118   // somehow ends up in those spaces, we would miss it here.
6119   code_space_->CollectCodeStatistics();
6120   lo_space_->CollectCodeStatistics();
6121   PagedSpace::ReportCodeStatistics();
6122 }
6123
6124
6125 // This function expects that NewSpace's allocated objects histogram is
6126 // populated (via a call to CollectStatistics or else as a side effect of a
6127 // just-completed scavenge collection).
6128 void Heap::ReportHeapStatistics(const char* title) {
6129   USE(title);
6130   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6131          title, gc_count_);
6132   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6133          old_generation_allocation_limit_);
6134
6135   PrintF("\n");
6136   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6137   isolate_->global_handles()->PrintStats();
6138   PrintF("\n");
6139
6140   PrintF("Heap statistics : ");
6141   isolate_->memory_allocator()->ReportStatistics();
6142   PrintF("To space : ");
6143   new_space_.ReportStatistics();
6144   PrintF("Old pointer space : ");
6145   old_pointer_space_->ReportStatistics();
6146   PrintF("Old data space : ");
6147   old_data_space_->ReportStatistics();
6148   PrintF("Code space : ");
6149   code_space_->ReportStatistics();
6150   PrintF("Map space : ");
6151   map_space_->ReportStatistics();
6152   PrintF("Cell space : ");
6153   cell_space_->ReportStatistics();
6154   PrintF("PropertyCell space : ");
6155   property_cell_space_->ReportStatistics();
6156   PrintF("Large object space : ");
6157   lo_space_->ReportStatistics();
6158   PrintF(">>>>>> ========================================= >>>>>>\n");
6159 }
6160
6161 #endif  // DEBUG
6162
6163 bool Heap::Contains(HeapObject* value) {
6164   return Contains(value->address());
6165 }
6166
6167
6168 bool Heap::Contains(Address addr) {
6169   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6170   return HasBeenSetUp() &&
6171     (new_space_.ToSpaceContains(addr) ||
6172      old_pointer_space_->Contains(addr) ||
6173      old_data_space_->Contains(addr) ||
6174      code_space_->Contains(addr) ||
6175      map_space_->Contains(addr) ||
6176      cell_space_->Contains(addr) ||
6177      property_cell_space_->Contains(addr) ||
6178      lo_space_->SlowContains(addr));
6179 }
6180
6181
6182 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6183   return InSpace(value->address(), space);
6184 }
6185
6186
6187 bool Heap::InSpace(Address addr, AllocationSpace space) {
6188   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6189   if (!HasBeenSetUp()) return false;
6190
6191   switch (space) {
6192     case NEW_SPACE:
6193       return new_space_.ToSpaceContains(addr);
6194     case OLD_POINTER_SPACE:
6195       return old_pointer_space_->Contains(addr);
6196     case OLD_DATA_SPACE:
6197       return old_data_space_->Contains(addr);
6198     case CODE_SPACE:
6199       return code_space_->Contains(addr);
6200     case MAP_SPACE:
6201       return map_space_->Contains(addr);
6202     case CELL_SPACE:
6203       return cell_space_->Contains(addr);
6204     case PROPERTY_CELL_SPACE:
6205       return property_cell_space_->Contains(addr);
6206     case LO_SPACE:
6207       return lo_space_->SlowContains(addr);
6208   }
6209
6210   return false;
6211 }
6212
6213
6214 #ifdef VERIFY_HEAP
6215 void Heap::Verify() {
6216   CHECK(HasBeenSetUp());
6217
6218   store_buffer()->Verify();
6219
6220   VerifyPointersVisitor visitor;
6221   IterateRoots(&visitor, VISIT_ONLY_STRONG);
6222
6223   new_space_.Verify();
6224
6225   old_pointer_space_->Verify(&visitor);
6226   map_space_->Verify(&visitor);
6227
6228   VerifyPointersVisitor no_dirty_regions_visitor;
6229   old_data_space_->Verify(&no_dirty_regions_visitor);
6230   code_space_->Verify(&no_dirty_regions_visitor);
6231   cell_space_->Verify(&no_dirty_regions_visitor);
6232   property_cell_space_->Verify(&no_dirty_regions_visitor);
6233
6234   lo_space_->Verify();
6235 }
6236 #endif
6237
6238
6239 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6240   Object* result = NULL;
6241   Object* new_table;
6242   { MaybeObject* maybe_new_table =
6243         string_table()->LookupUtf8String(string, &result);
6244     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6245   }
6246   // Can't use set_string_table because StringTable::cast knows that
6247   // StringTable is a singleton and checks for identity.
6248   roots_[kStringTableRootIndex] = new_table;
6249   ASSERT(result != NULL);
6250   return result;
6251 }
6252
6253
6254 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6255   Object* result = NULL;
6256   Object* new_table;
6257   { MaybeObject* maybe_new_table =
6258         string_table()->LookupOneByteString(string, &result);
6259     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6260   }
6261   // Can't use set_string_table because StringTable::cast knows that
6262   // StringTable is a singleton and checks for identity.
6263   roots_[kStringTableRootIndex] = new_table;
6264   ASSERT(result != NULL);
6265   return result;
6266 }
6267
6268
6269 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6270                                      int from,
6271                                      int length) {
6272   Object* result = NULL;
6273   Object* new_table;
6274   { MaybeObject* maybe_new_table =
6275         string_table()->LookupSubStringOneByteString(string,
6276                                                    from,
6277                                                    length,
6278                                                    &result);
6279     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6280   }
6281   // Can't use set_string_table because StringTable::cast knows that
6282   // StringTable is a singleton and checks for identity.
6283   roots_[kStringTableRootIndex] = new_table;
6284   ASSERT(result != NULL);
6285   return result;
6286 }
6287
6288
6289 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6290   Object* result = NULL;
6291   Object* new_table;
6292   { MaybeObject* maybe_new_table =
6293         string_table()->LookupTwoByteString(string, &result);
6294     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6295   }
6296   // Can't use set_string_table because StringTable::cast knows that
6297   // StringTable is a singleton and checks for identity.
6298   roots_[kStringTableRootIndex] = new_table;
6299   ASSERT(result != NULL);
6300   return result;
6301 }
6302
6303
6304 MaybeObject* Heap::InternalizeString(String* string) {
6305   if (string->IsInternalizedString()) return string;
6306   Object* result = NULL;
6307   Object* new_table;
6308   { MaybeObject* maybe_new_table =
6309         string_table()->LookupString(string, &result);
6310     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6311   }
6312   // Can't use set_string_table because StringTable::cast knows that
6313   // StringTable is a singleton and checks for identity.
6314   roots_[kStringTableRootIndex] = new_table;
6315   ASSERT(result != NULL);
6316   return result;
6317 }
6318
6319
6320 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6321   if (string->IsInternalizedString()) {
6322     *result = string;
6323     return true;
6324   }
6325   return string_table()->LookupStringIfExists(string, result);
6326 }
6327
6328
6329 void Heap::ZapFromSpace() {
6330   NewSpacePageIterator it(new_space_.FromSpaceStart(),
6331                           new_space_.FromSpaceEnd());
6332   while (it.has_next()) {
6333     NewSpacePage* page = it.next();
6334     for (Address cursor = page->area_start(), limit = page->area_end();
6335          cursor < limit;
6336          cursor += kPointerSize) {
6337       Memory::Address_at(cursor) = kFromSpaceZapValue;
6338     }
6339   }
6340 }
6341
6342
6343 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6344                                              Address end,
6345                                              ObjectSlotCallback callback) {
6346   Address slot_address = start;
6347
6348   // We are not collecting slots on new space objects during mutation
6349   // thus we have to scan for pointers to evacuation candidates when we
6350   // promote objects. But we should not record any slots in non-black
6351   // objects. Grey object's slots would be rescanned.
6352   // White object might not survive until the end of collection
6353   // it would be a violation of the invariant to record it's slots.
6354   bool record_slots = false;
6355   if (incremental_marking()->IsCompacting()) {
6356     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6357     record_slots = Marking::IsBlack(mark_bit);
6358   }
6359
6360   while (slot_address < end) {
6361     Object** slot = reinterpret_cast<Object**>(slot_address);
6362     Object* object = *slot;
6363     // If the store buffer becomes overfull we mark pages as being exempt from
6364     // the store buffer.  These pages are scanned to find pointers that point
6365     // to the new space.  In that case we may hit newly promoted objects and
6366     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6367     if (object->IsHeapObject()) {
6368       if (Heap::InFromSpace(object)) {
6369         callback(reinterpret_cast<HeapObject**>(slot),
6370                  HeapObject::cast(object));
6371         Object* new_object = *slot;
6372         if (InNewSpace(new_object)) {
6373           SLOW_ASSERT(Heap::InToSpace(new_object));
6374           SLOW_ASSERT(new_object->IsHeapObject());
6375           store_buffer_.EnterDirectlyIntoStoreBuffer(
6376               reinterpret_cast<Address>(slot));
6377         }
6378         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6379       } else if (record_slots &&
6380                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6381         mark_compact_collector()->RecordSlot(slot, slot, object);
6382       }
6383     }
6384     slot_address += kPointerSize;
6385   }
6386 }
6387
6388
6389 #ifdef DEBUG
6390 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6391
6392
6393 bool IsAMapPointerAddress(Object** addr) {
6394   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6395   int mod = a % Map::kSize;
6396   return mod >= Map::kPointerFieldsBeginOffset &&
6397          mod < Map::kPointerFieldsEndOffset;
6398 }
6399
6400
6401 bool EverythingsAPointer(Object** addr) {
6402   return true;
6403 }
6404
6405
6406 static void CheckStoreBuffer(Heap* heap,
6407                              Object** current,
6408                              Object** limit,
6409                              Object**** store_buffer_position,
6410                              Object*** store_buffer_top,
6411                              CheckStoreBufferFilter filter,
6412                              Address special_garbage_start,
6413                              Address special_garbage_end) {
6414   Map* free_space_map = heap->free_space_map();
6415   for ( ; current < limit; current++) {
6416     Object* o = *current;
6417     Address current_address = reinterpret_cast<Address>(current);
6418     // Skip free space.
6419     if (o == free_space_map) {
6420       Address current_address = reinterpret_cast<Address>(current);
6421       FreeSpace* free_space =
6422           FreeSpace::cast(HeapObject::FromAddress(current_address));
6423       int skip = free_space->Size();
6424       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6425       ASSERT(skip > 0);
6426       current_address += skip - kPointerSize;
6427       current = reinterpret_cast<Object**>(current_address);
6428       continue;
6429     }
6430     // Skip the current linear allocation space between top and limit which is
6431     // unmarked with the free space map, but can contain junk.
6432     if (current_address == special_garbage_start &&
6433         special_garbage_end != special_garbage_start) {
6434       current_address = special_garbage_end - kPointerSize;
6435       current = reinterpret_cast<Object**>(current_address);
6436       continue;
6437     }
6438     if (!(*filter)(current)) continue;
6439     ASSERT(current_address < special_garbage_start ||
6440            current_address >= special_garbage_end);
6441     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6442     // We have to check that the pointer does not point into new space
6443     // without trying to cast it to a heap object since the hash field of
6444     // a string can contain values like 1 and 3 which are tagged null
6445     // pointers.
6446     if (!heap->InNewSpace(o)) continue;
6447     while (**store_buffer_position < current &&
6448            *store_buffer_position < store_buffer_top) {
6449       (*store_buffer_position)++;
6450     }
6451     if (**store_buffer_position != current ||
6452         *store_buffer_position == store_buffer_top) {
6453       Object** obj_start = current;
6454       while (!(*obj_start)->IsMap()) obj_start--;
6455       UNREACHABLE();
6456     }
6457   }
6458 }
6459
6460
6461 // Check that the store buffer contains all intergenerational pointers by
6462 // scanning a page and ensuring that all pointers to young space are in the
6463 // store buffer.
6464 void Heap::OldPointerSpaceCheckStoreBuffer() {
6465   OldSpace* space = old_pointer_space();
6466   PageIterator pages(space);
6467
6468   store_buffer()->SortUniq();
6469
6470   while (pages.has_next()) {
6471     Page* page = pages.next();
6472     Object** current = reinterpret_cast<Object**>(page->area_start());
6473
6474     Address end = page->area_end();
6475
6476     Object*** store_buffer_position = store_buffer()->Start();
6477     Object*** store_buffer_top = store_buffer()->Top();
6478
6479     Object** limit = reinterpret_cast<Object**>(end);
6480     CheckStoreBuffer(this,
6481                      current,
6482                      limit,
6483                      &store_buffer_position,
6484                      store_buffer_top,
6485                      &EverythingsAPointer,
6486                      space->top(),
6487                      space->limit());
6488   }
6489 }
6490
6491
6492 void Heap::MapSpaceCheckStoreBuffer() {
6493   MapSpace* space = map_space();
6494   PageIterator pages(space);
6495
6496   store_buffer()->SortUniq();
6497
6498   while (pages.has_next()) {
6499     Page* page = pages.next();
6500     Object** current = reinterpret_cast<Object**>(page->area_start());
6501
6502     Address end = page->area_end();
6503
6504     Object*** store_buffer_position = store_buffer()->Start();
6505     Object*** store_buffer_top = store_buffer()->Top();
6506
6507     Object** limit = reinterpret_cast<Object**>(end);
6508     CheckStoreBuffer(this,
6509                      current,
6510                      limit,
6511                      &store_buffer_position,
6512                      store_buffer_top,
6513                      &IsAMapPointerAddress,
6514                      space->top(),
6515                      space->limit());
6516   }
6517 }
6518
6519
6520 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6521   LargeObjectIterator it(lo_space());
6522   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6523     // We only have code, sequential strings, or fixed arrays in large
6524     // object space, and only fixed arrays can possibly contain pointers to
6525     // the young generation.
6526     if (object->IsFixedArray()) {
6527       Object*** store_buffer_position = store_buffer()->Start();
6528       Object*** store_buffer_top = store_buffer()->Top();
6529       Object** current = reinterpret_cast<Object**>(object->address());
6530       Object** limit =
6531           reinterpret_cast<Object**>(object->address() + object->Size());
6532       CheckStoreBuffer(this,
6533                        current,
6534                        limit,
6535                        &store_buffer_position,
6536                        store_buffer_top,
6537                        &EverythingsAPointer,
6538                        NULL,
6539                        NULL);
6540     }
6541   }
6542 }
6543 #endif
6544
6545
6546 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6547   IterateStrongRoots(v, mode);
6548   IterateWeakRoots(v, mode);
6549 }
6550
6551
6552 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6553   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6554   v->Synchronize(VisitorSynchronization::kStringTable);
6555   if (mode != VISIT_ALL_IN_SCAVENGE &&
6556       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6557     // Scavenge collections have special processing for this.
6558     external_string_table_.Iterate(v);
6559   }
6560   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6561 }
6562
6563
6564 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6565   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6566   v->Synchronize(VisitorSynchronization::kStrongRootList);
6567
6568   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6569   v->Synchronize(VisitorSynchronization::kInternalizedString);
6570
6571   isolate_->bootstrapper()->Iterate(v);
6572   v->Synchronize(VisitorSynchronization::kBootstrapper);
6573   isolate_->Iterate(v);
6574   v->Synchronize(VisitorSynchronization::kTop);
6575   Relocatable::Iterate(v);
6576   v->Synchronize(VisitorSynchronization::kRelocatable);
6577
6578 #ifdef ENABLE_DEBUGGER_SUPPORT
6579   isolate_->debug()->Iterate(v);
6580   if (isolate_->deoptimizer_data() != NULL) {
6581     isolate_->deoptimizer_data()->Iterate(v);
6582   }
6583 #endif
6584   v->Synchronize(VisitorSynchronization::kDebug);
6585   isolate_->compilation_cache()->Iterate(v);
6586   v->Synchronize(VisitorSynchronization::kCompilationCache);
6587
6588   // Iterate over local handles in handle scopes.
6589   isolate_->handle_scope_implementer()->Iterate(v);
6590   isolate_->IterateDeferredHandles(v);
6591   v->Synchronize(VisitorSynchronization::kHandleScope);
6592
6593   // Iterate over the builtin code objects and code stubs in the
6594   // heap. Note that it is not necessary to iterate over code objects
6595   // on scavenge collections.
6596   if (mode != VISIT_ALL_IN_SCAVENGE) {
6597     isolate_->builtins()->IterateBuiltins(v);
6598   }
6599   v->Synchronize(VisitorSynchronization::kBuiltins);
6600
6601   // Iterate over global handles.
6602   switch (mode) {
6603     case VISIT_ONLY_STRONG:
6604       isolate_->global_handles()->IterateStrongRoots(v);
6605       break;
6606     case VISIT_ALL_IN_SCAVENGE:
6607       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6608       break;
6609     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6610     case VISIT_ALL:
6611       isolate_->global_handles()->IterateAllRoots(v);
6612       break;
6613   }
6614   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6615
6616   // Iterate over pointers being held by inactive threads.
6617   isolate_->thread_manager()->Iterate(v);
6618   v->Synchronize(VisitorSynchronization::kThreadManager);
6619
6620   // Iterate over the pointers the Serialization/Deserialization code is
6621   // holding.
6622   // During garbage collection this keeps the partial snapshot cache alive.
6623   // During deserialization of the startup snapshot this creates the partial
6624   // snapshot cache and deserializes the objects it refers to.  During
6625   // serialization this does nothing, since the partial snapshot cache is
6626   // empty.  However the next thing we do is create the partial snapshot,
6627   // filling up the partial snapshot cache with objects it needs as we go.
6628   SerializerDeserializer::Iterate(v);
6629   // We don't do a v->Synchronize call here, because in debug mode that will
6630   // output a flag to the snapshot.  However at this point the serializer and
6631   // deserializer are deliberately a little unsynchronized (see above) so the
6632   // checking of the sync flag in the snapshot would fail.
6633 }
6634
6635
6636 // TODO(1236194): Since the heap size is configurable on the command line
6637 // and through the API, we should gracefully handle the case that the heap
6638 // size is not big enough to fit all the initial objects.
6639 bool Heap::ConfigureHeap(int max_semispace_size,
6640                          intptr_t max_old_gen_size,
6641                          intptr_t max_executable_size) {
6642   if (HasBeenSetUp()) return false;
6643
6644   if (FLAG_stress_compaction) {
6645     // This will cause more frequent GCs when stressing.
6646     max_semispace_size_ = Page::kPageSize;
6647   }
6648
6649   if (max_semispace_size > 0) {
6650     if (max_semispace_size < Page::kPageSize) {
6651       max_semispace_size = Page::kPageSize;
6652       if (FLAG_trace_gc) {
6653         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6654                  Page::kPageSize >> 10);
6655       }
6656     }
6657     max_semispace_size_ = max_semispace_size;
6658   }
6659
6660   if (Snapshot::IsEnabled()) {
6661     // If we are using a snapshot we always reserve the default amount
6662     // of memory for each semispace because code in the snapshot has
6663     // write-barrier code that relies on the size and alignment of new
6664     // space.  We therefore cannot use a larger max semispace size
6665     // than the default reserved semispace size.
6666     if (max_semispace_size_ > reserved_semispace_size_) {
6667       max_semispace_size_ = reserved_semispace_size_;
6668       if (FLAG_trace_gc) {
6669         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6670                  reserved_semispace_size_ >> 10);
6671       }
6672     }
6673   } else {
6674     // If we are not using snapshots we reserve space for the actual
6675     // max semispace size.
6676     reserved_semispace_size_ = max_semispace_size_;
6677   }
6678
6679   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6680   if (max_executable_size > 0) {
6681     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6682   }
6683
6684   // The max executable size must be less than or equal to the max old
6685   // generation size.
6686   if (max_executable_size_ > max_old_generation_size_) {
6687     max_executable_size_ = max_old_generation_size_;
6688   }
6689
6690   // The new space size must be a power of two to support single-bit testing
6691   // for containment.
6692   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6693   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6694   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6695
6696   // The external allocation limit should be below 256 MB on all architectures
6697   // to avoid unnecessary low memory notifications, as that is the threshold
6698   // for some embedders.
6699   external_allocation_limit_ = 12 * max_semispace_size_;
6700   ASSERT(external_allocation_limit_ <= 256 * MB);
6701
6702   // The old generation is paged and needs at least one page for each space.
6703   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6704   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6705                                                        Page::kPageSize),
6706                                  RoundUp(max_old_generation_size_,
6707                                          Page::kPageSize));
6708
6709   configured_ = true;
6710   return true;
6711 }
6712
6713
6714 bool Heap::ConfigureHeapDefault() {
6715   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6716                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6717                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6718 }
6719
6720
6721 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6722   *stats->start_marker = HeapStats::kStartMarker;
6723   *stats->end_marker = HeapStats::kEndMarker;
6724   *stats->new_space_size = new_space_.SizeAsInt();
6725   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6726   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6727   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6728   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6729   *stats->old_data_space_capacity = old_data_space_->Capacity();
6730   *stats->code_space_size = code_space_->SizeOfObjects();
6731   *stats->code_space_capacity = code_space_->Capacity();
6732   *stats->map_space_size = map_space_->SizeOfObjects();
6733   *stats->map_space_capacity = map_space_->Capacity();
6734   *stats->cell_space_size = cell_space_->SizeOfObjects();
6735   *stats->cell_space_capacity = cell_space_->Capacity();
6736   *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6737   *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6738   *stats->lo_space_size = lo_space_->Size();
6739   isolate_->global_handles()->RecordStats(stats);
6740   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6741   *stats->memory_allocator_capacity =
6742       isolate()->memory_allocator()->Size() +
6743       isolate()->memory_allocator()->Available();
6744   *stats->os_error = OS::GetLastError();
6745       isolate()->memory_allocator()->Available();
6746   if (take_snapshot) {
6747     HeapIterator iterator(this);
6748     for (HeapObject* obj = iterator.next();
6749          obj != NULL;
6750          obj = iterator.next()) {
6751       InstanceType type = obj->map()->instance_type();
6752       ASSERT(0 <= type && type <= LAST_TYPE);
6753       stats->objects_per_type[type]++;
6754       stats->size_per_type[type] += obj->Size();
6755     }
6756   }
6757 }
6758
6759
6760 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6761   return old_pointer_space_->SizeOfObjects()
6762       + old_data_space_->SizeOfObjects()
6763       + code_space_->SizeOfObjects()
6764       + map_space_->SizeOfObjects()
6765       + cell_space_->SizeOfObjects()
6766       + property_cell_space_->SizeOfObjects()
6767       + lo_space_->SizeOfObjects();
6768 }
6769
6770
6771 intptr_t Heap::PromotedExternalMemorySize() {
6772   if (amount_of_external_allocated_memory_
6773       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6774   return amount_of_external_allocated_memory_
6775       - amount_of_external_allocated_memory_at_last_global_gc_;
6776 }
6777
6778
6779 V8_DECLARE_ONCE(initialize_gc_once);
6780
6781 static void InitializeGCOnce() {
6782   InitializeScavengingVisitorsTables();
6783   NewSpaceScavenger::Initialize();
6784   MarkCompactCollector::Initialize();
6785 }
6786
6787
6788 bool Heap::SetUp() {
6789 #ifdef DEBUG
6790   allocation_timeout_ = FLAG_gc_interval;
6791 #endif
6792
6793   // Initialize heap spaces and initial maps and objects. Whenever something
6794   // goes wrong, just return false. The caller should check the results and
6795   // call Heap::TearDown() to release allocated memory.
6796   //
6797   // If the heap is not yet configured (e.g. through the API), configure it.
6798   // Configuration is based on the flags new-space-size (really the semispace
6799   // size) and old-space-size if set or the initial values of semispace_size_
6800   // and old_generation_size_ otherwise.
6801   if (!configured_) {
6802     if (!ConfigureHeapDefault()) return false;
6803   }
6804
6805   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6806
6807   MarkMapPointersAsEncoded(false);
6808
6809   // Set up memory allocator.
6810   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6811       return false;
6812
6813   // Set up new space.
6814   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6815     return false;
6816   }
6817
6818   // Initialize old pointer space.
6819   old_pointer_space_ =
6820       new OldSpace(this,
6821                    max_old_generation_size_,
6822                    OLD_POINTER_SPACE,
6823                    NOT_EXECUTABLE);
6824   if (old_pointer_space_ == NULL) return false;
6825   if (!old_pointer_space_->SetUp()) return false;
6826
6827   // Initialize old data space.
6828   old_data_space_ =
6829       new OldSpace(this,
6830                    max_old_generation_size_,
6831                    OLD_DATA_SPACE,
6832                    NOT_EXECUTABLE);
6833   if (old_data_space_ == NULL) return false;
6834   if (!old_data_space_->SetUp()) return false;
6835
6836   // Initialize the code space, set its maximum capacity to the old
6837   // generation size. It needs executable memory.
6838   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6839   // virtual address space, so that they can call each other with near calls.
6840   if (code_range_size_ > 0) {
6841     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6842       return false;
6843     }
6844   }
6845
6846   code_space_ =
6847       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6848   if (code_space_ == NULL) return false;
6849   if (!code_space_->SetUp()) return false;
6850
6851   // Initialize map space.
6852   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6853   if (map_space_ == NULL) return false;
6854   if (!map_space_->SetUp()) return false;
6855
6856   // Initialize simple cell space.
6857   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6858   if (cell_space_ == NULL) return false;
6859   if (!cell_space_->SetUp()) return false;
6860
6861   // Initialize global property cell space.
6862   property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6863                                                PROPERTY_CELL_SPACE);
6864   if (property_cell_space_ == NULL) return false;
6865   if (!property_cell_space_->SetUp()) return false;
6866
6867   // The large object code space may contain code or data.  We set the memory
6868   // to be non-executable here for safety, but this means we need to enable it
6869   // explicitly when allocating large code objects.
6870   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6871   if (lo_space_ == NULL) return false;
6872   if (!lo_space_->SetUp()) return false;
6873
6874   // Set up the seed that is used to randomize the string hash function.
6875   ASSERT(hash_seed() == 0);
6876   if (FLAG_randomize_hashes) {
6877     if (FLAG_hash_seed == 0) {
6878       set_hash_seed(
6879           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6880     } else {
6881       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6882     }
6883   }
6884
6885   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6886   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6887
6888   store_buffer()->SetUp();
6889
6890   if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6891 #ifdef DEBUG
6892   relocation_mutex_locked_by_optimizer_thread_ = false;
6893 #endif  // DEBUG
6894
6895   return true;
6896 }
6897
6898
6899 bool Heap::CreateHeapObjects() {
6900   // Create initial maps.
6901   if (!CreateInitialMaps()) return false;
6902   if (!CreateApiObjects()) return false;
6903
6904   // Create initial objects
6905   if (!CreateInitialObjects()) return false;
6906
6907   native_contexts_list_ = undefined_value();
6908   array_buffers_list_ = undefined_value();
6909   allocation_sites_list_ = undefined_value();
6910   return true;
6911 }
6912
6913
6914 void Heap::SetStackLimits() {
6915   ASSERT(isolate_ != NULL);
6916   ASSERT(isolate_ == isolate());
6917   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6918   // something that looks like an out of range Smi to the GC.
6919
6920   // Set up the special root array entries containing the stack limits.
6921   // These are actually addresses, but the tag makes the GC ignore it.
6922   roots_[kStackLimitRootIndex] =
6923       reinterpret_cast<Object*>(
6924           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6925   roots_[kRealStackLimitRootIndex] =
6926       reinterpret_cast<Object*>(
6927           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6928 }
6929
6930
6931 void Heap::TearDown() {
6932 #ifdef VERIFY_HEAP
6933   if (FLAG_verify_heap) {
6934     Verify();
6935   }
6936 #endif
6937
6938   if (FLAG_print_cumulative_gc_stat) {
6939     PrintF("\n");
6940     PrintF("gc_count=%d ", gc_count_);
6941     PrintF("mark_sweep_count=%d ", ms_count_);
6942     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6943     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6944     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6945     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6946            get_max_alive_after_gc());
6947     PrintF("total_marking_time=%.1f ", marking_time());
6948     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6949     PrintF("\n\n");
6950   }
6951
6952   TearDownArrayBuffers();
6953
6954   isolate_->global_handles()->TearDown();
6955
6956   external_string_table_.TearDown();
6957
6958   new_space_.TearDown();
6959
6960   if (old_pointer_space_ != NULL) {
6961     old_pointer_space_->TearDown();
6962     delete old_pointer_space_;
6963     old_pointer_space_ = NULL;
6964   }
6965
6966   if (old_data_space_ != NULL) {
6967     old_data_space_->TearDown();
6968     delete old_data_space_;
6969     old_data_space_ = NULL;
6970   }
6971
6972   if (code_space_ != NULL) {
6973     code_space_->TearDown();
6974     delete code_space_;
6975     code_space_ = NULL;
6976   }
6977
6978   if (map_space_ != NULL) {
6979     map_space_->TearDown();
6980     delete map_space_;
6981     map_space_ = NULL;
6982   }
6983
6984   if (cell_space_ != NULL) {
6985     cell_space_->TearDown();
6986     delete cell_space_;
6987     cell_space_ = NULL;
6988   }
6989
6990   if (property_cell_space_ != NULL) {
6991     property_cell_space_->TearDown();
6992     delete property_cell_space_;
6993     property_cell_space_ = NULL;
6994   }
6995
6996   if (lo_space_ != NULL) {
6997     lo_space_->TearDown();
6998     delete lo_space_;
6999     lo_space_ = NULL;
7000   }
7001
7002   store_buffer()->TearDown();
7003   incremental_marking()->TearDown();
7004
7005   isolate_->memory_allocator()->TearDown();
7006
7007   delete relocation_mutex_;
7008 }
7009
7010
7011 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
7012   ASSERT(callback != NULL);
7013   GCPrologueCallbackPair pair(callback, gc_type);
7014   ASSERT(!gc_prologue_callbacks_.Contains(pair));
7015   return gc_prologue_callbacks_.Add(pair);
7016 }
7017
7018
7019 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
7020   ASSERT(callback != NULL);
7021   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
7022     if (gc_prologue_callbacks_[i].callback == callback) {
7023       gc_prologue_callbacks_.Remove(i);
7024       return;
7025     }
7026   }
7027   UNREACHABLE();
7028 }
7029
7030
7031 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
7032   ASSERT(callback != NULL);
7033   GCEpilogueCallbackPair pair(callback, gc_type);
7034   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
7035   return gc_epilogue_callbacks_.Add(pair);
7036 }
7037
7038
7039 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
7040   ASSERT(callback != NULL);
7041   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
7042     if (gc_epilogue_callbacks_[i].callback == callback) {
7043       gc_epilogue_callbacks_.Remove(i);
7044       return;
7045     }
7046   }
7047   UNREACHABLE();
7048 }
7049
7050
7051 #ifdef DEBUG
7052
7053 class PrintHandleVisitor: public ObjectVisitor {
7054  public:
7055   void VisitPointers(Object** start, Object** end) {
7056     for (Object** p = start; p < end; p++)
7057       PrintF("  handle %p to %p\n",
7058              reinterpret_cast<void*>(p),
7059              reinterpret_cast<void*>(*p));
7060   }
7061 };
7062
7063
7064 void Heap::PrintHandles() {
7065   PrintF("Handles:\n");
7066   PrintHandleVisitor v;
7067   isolate_->handle_scope_implementer()->Iterate(&v);
7068 }
7069
7070 #endif
7071
7072
7073 Space* AllSpaces::next() {
7074   switch (counter_++) {
7075     case NEW_SPACE:
7076       return heap_->new_space();
7077     case OLD_POINTER_SPACE:
7078       return heap_->old_pointer_space();
7079     case OLD_DATA_SPACE:
7080       return heap_->old_data_space();
7081     case CODE_SPACE:
7082       return heap_->code_space();
7083     case MAP_SPACE:
7084       return heap_->map_space();
7085     case CELL_SPACE:
7086       return heap_->cell_space();
7087     case PROPERTY_CELL_SPACE:
7088       return heap_->property_cell_space();
7089     case LO_SPACE:
7090       return heap_->lo_space();
7091     default:
7092       return NULL;
7093   }
7094 }
7095
7096
7097 PagedSpace* PagedSpaces::next() {
7098   switch (counter_++) {
7099     case OLD_POINTER_SPACE:
7100       return heap_->old_pointer_space();
7101     case OLD_DATA_SPACE:
7102       return heap_->old_data_space();
7103     case CODE_SPACE:
7104       return heap_->code_space();
7105     case MAP_SPACE:
7106       return heap_->map_space();
7107     case CELL_SPACE:
7108       return heap_->cell_space();
7109     case PROPERTY_CELL_SPACE:
7110       return heap_->property_cell_space();
7111     default:
7112       return NULL;
7113   }
7114 }
7115
7116
7117
7118 OldSpace* OldSpaces::next() {
7119   switch (counter_++) {
7120     case OLD_POINTER_SPACE:
7121       return heap_->old_pointer_space();
7122     case OLD_DATA_SPACE:
7123       return heap_->old_data_space();
7124     case CODE_SPACE:
7125       return heap_->code_space();
7126     default:
7127       return NULL;
7128   }
7129 }
7130
7131
7132 SpaceIterator::SpaceIterator(Heap* heap)
7133     : heap_(heap),
7134       current_space_(FIRST_SPACE),
7135       iterator_(NULL),
7136       size_func_(NULL) {
7137 }
7138
7139
7140 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7141     : heap_(heap),
7142       current_space_(FIRST_SPACE),
7143       iterator_(NULL),
7144       size_func_(size_func) {
7145 }
7146
7147
7148 SpaceIterator::~SpaceIterator() {
7149   // Delete active iterator if any.
7150   delete iterator_;
7151 }
7152
7153
7154 bool SpaceIterator::has_next() {
7155   // Iterate until no more spaces.
7156   return current_space_ != LAST_SPACE;
7157 }
7158
7159
7160 ObjectIterator* SpaceIterator::next() {
7161   if (iterator_ != NULL) {
7162     delete iterator_;
7163     iterator_ = NULL;
7164     // Move to the next space
7165     current_space_++;
7166     if (current_space_ > LAST_SPACE) {
7167       return NULL;
7168     }
7169   }
7170
7171   // Return iterator for the new current space.
7172   return CreateIterator();
7173 }
7174
7175
7176 // Create an iterator for the space to iterate.
7177 ObjectIterator* SpaceIterator::CreateIterator() {
7178   ASSERT(iterator_ == NULL);
7179
7180   switch (current_space_) {
7181     case NEW_SPACE:
7182       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7183       break;
7184     case OLD_POINTER_SPACE:
7185       iterator_ =
7186           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7187       break;
7188     case OLD_DATA_SPACE:
7189       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7190       break;
7191     case CODE_SPACE:
7192       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7193       break;
7194     case MAP_SPACE:
7195       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7196       break;
7197     case CELL_SPACE:
7198       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7199       break;
7200     case PROPERTY_CELL_SPACE:
7201       iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7202                                          size_func_);
7203       break;
7204     case LO_SPACE:
7205       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7206       break;
7207   }
7208
7209   // Return the newly allocated iterator;
7210   ASSERT(iterator_ != NULL);
7211   return iterator_;
7212 }
7213
7214
7215 class HeapObjectsFilter {
7216  public:
7217   virtual ~HeapObjectsFilter() {}
7218   virtual bool SkipObject(HeapObject* object) = 0;
7219 };
7220
7221
7222 class UnreachableObjectsFilter : public HeapObjectsFilter {
7223  public:
7224   UnreachableObjectsFilter() {
7225     MarkReachableObjects();
7226   }
7227
7228   ~UnreachableObjectsFilter() {
7229     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7230   }
7231
7232   bool SkipObject(HeapObject* object) {
7233     MarkBit mark_bit = Marking::MarkBitFrom(object);
7234     return !mark_bit.Get();
7235   }
7236
7237  private:
7238   class MarkingVisitor : public ObjectVisitor {
7239    public:
7240     MarkingVisitor() : marking_stack_(10) {}
7241
7242     void VisitPointers(Object** start, Object** end) {
7243       for (Object** p = start; p < end; p++) {
7244         if (!(*p)->IsHeapObject()) continue;
7245         HeapObject* obj = HeapObject::cast(*p);
7246         MarkBit mark_bit = Marking::MarkBitFrom(obj);
7247         if (!mark_bit.Get()) {
7248           mark_bit.Set();
7249           marking_stack_.Add(obj);
7250         }
7251       }
7252     }
7253
7254     void TransitiveClosure() {
7255       while (!marking_stack_.is_empty()) {
7256         HeapObject* obj = marking_stack_.RemoveLast();
7257         obj->Iterate(this);
7258       }
7259     }
7260
7261    private:
7262     List<HeapObject*> marking_stack_;
7263   };
7264
7265   void MarkReachableObjects() {
7266     Heap* heap = Isolate::Current()->heap();
7267     MarkingVisitor visitor;
7268     heap->IterateRoots(&visitor, VISIT_ALL);
7269     visitor.TransitiveClosure();
7270   }
7271
7272   DisallowHeapAllocation no_allocation_;
7273 };
7274
7275
7276 HeapIterator::HeapIterator(Heap* heap)
7277     : heap_(heap),
7278       filtering_(HeapIterator::kNoFiltering),
7279       filter_(NULL) {
7280   Init();
7281 }
7282
7283
7284 HeapIterator::HeapIterator(Heap* heap,
7285                            HeapIterator::HeapObjectsFiltering filtering)
7286     : heap_(heap),
7287       filtering_(filtering),
7288       filter_(NULL) {
7289   Init();
7290 }
7291
7292
7293 HeapIterator::~HeapIterator() {
7294   Shutdown();
7295 }
7296
7297
7298 void HeapIterator::Init() {
7299   // Start the iteration.
7300   space_iterator_ = new SpaceIterator(heap_);
7301   switch (filtering_) {
7302     case kFilterUnreachable:
7303       filter_ = new UnreachableObjectsFilter;
7304       break;
7305     default:
7306       break;
7307   }
7308   object_iterator_ = space_iterator_->next();
7309 }
7310
7311
7312 void HeapIterator::Shutdown() {
7313 #ifdef DEBUG
7314   // Assert that in filtering mode we have iterated through all
7315   // objects. Otherwise, heap will be left in an inconsistent state.
7316   if (filtering_ != kNoFiltering) {
7317     ASSERT(object_iterator_ == NULL);
7318   }
7319 #endif
7320   // Make sure the last iterator is deallocated.
7321   delete space_iterator_;
7322   space_iterator_ = NULL;
7323   object_iterator_ = NULL;
7324   delete filter_;
7325   filter_ = NULL;
7326 }
7327
7328
7329 HeapObject* HeapIterator::next() {
7330   if (filter_ == NULL) return NextObject();
7331
7332   HeapObject* obj = NextObject();
7333   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7334   return obj;
7335 }
7336
7337
7338 HeapObject* HeapIterator::NextObject() {
7339   // No iterator means we are done.
7340   if (object_iterator_ == NULL) return NULL;
7341
7342   if (HeapObject* obj = object_iterator_->next_object()) {
7343     // If the current iterator has more objects we are fine.
7344     return obj;
7345   } else {
7346     // Go though the spaces looking for one that has objects.
7347     while (space_iterator_->has_next()) {
7348       object_iterator_ = space_iterator_->next();
7349       if (HeapObject* obj = object_iterator_->next_object()) {
7350         return obj;
7351       }
7352     }
7353   }
7354   // Done with the last space.
7355   object_iterator_ = NULL;
7356   return NULL;
7357 }
7358
7359
7360 void HeapIterator::reset() {
7361   // Restart the iterator.
7362   Shutdown();
7363   Init();
7364 }
7365
7366
7367 #ifdef DEBUG
7368
7369 Object* const PathTracer::kAnyGlobalObject = NULL;
7370
7371 class PathTracer::MarkVisitor: public ObjectVisitor {
7372  public:
7373   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7374   void VisitPointers(Object** start, Object** end) {
7375     // Scan all HeapObject pointers in [start, end)
7376     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7377       if ((*p)->IsHeapObject())
7378         tracer_->MarkRecursively(p, this);
7379     }
7380   }
7381
7382  private:
7383   PathTracer* tracer_;
7384 };
7385
7386
7387 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7388  public:
7389   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7390   void VisitPointers(Object** start, Object** end) {
7391     // Scan all HeapObject pointers in [start, end)
7392     for (Object** p = start; p < end; p++) {
7393       if ((*p)->IsHeapObject())
7394         tracer_->UnmarkRecursively(p, this);
7395     }
7396   }
7397
7398  private:
7399   PathTracer* tracer_;
7400 };
7401
7402
7403 void PathTracer::VisitPointers(Object** start, Object** end) {
7404   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7405   // Visit all HeapObject pointers in [start, end)
7406   for (Object** p = start; !done && (p < end); p++) {
7407     if ((*p)->IsHeapObject()) {
7408       TracePathFrom(p);
7409       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7410     }
7411   }
7412 }
7413
7414
7415 void PathTracer::Reset() {
7416   found_target_ = false;
7417   object_stack_.Clear();
7418 }
7419
7420
7421 void PathTracer::TracePathFrom(Object** root) {
7422   ASSERT((search_target_ == kAnyGlobalObject) ||
7423          search_target_->IsHeapObject());
7424   found_target_in_trace_ = false;
7425   Reset();
7426
7427   MarkVisitor mark_visitor(this);
7428   MarkRecursively(root, &mark_visitor);
7429
7430   UnmarkVisitor unmark_visitor(this);
7431   UnmarkRecursively(root, &unmark_visitor);
7432
7433   ProcessResults();
7434 }
7435
7436
7437 static bool SafeIsNativeContext(HeapObject* obj) {
7438   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7439 }
7440
7441
7442 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7443   if (!(*p)->IsHeapObject()) return;
7444
7445   HeapObject* obj = HeapObject::cast(*p);
7446
7447   Object* map = obj->map();
7448
7449   if (!map->IsHeapObject()) return;  // visited before
7450
7451   if (found_target_in_trace_) return;  // stop if target found
7452   object_stack_.Add(obj);
7453   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7454       (obj == search_target_)) {
7455     found_target_in_trace_ = true;
7456     found_target_ = true;
7457     return;
7458   }
7459
7460   bool is_native_context = SafeIsNativeContext(obj);
7461
7462   // not visited yet
7463   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7464
7465   Address map_addr = map_p->address();
7466
7467   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7468
7469   // Scan the object body.
7470   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7471     // This is specialized to scan Context's properly.
7472     Object** start = reinterpret_cast<Object**>(obj->address() +
7473                                                 Context::kHeaderSize);
7474     Object** end = reinterpret_cast<Object**>(obj->address() +
7475         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7476     mark_visitor->VisitPointers(start, end);
7477   } else {
7478     obj->IterateBody(map_p->instance_type(),
7479                      obj->SizeFromMap(map_p),
7480                      mark_visitor);
7481   }
7482
7483   // Scan the map after the body because the body is a lot more interesting
7484   // when doing leak detection.
7485   MarkRecursively(&map, mark_visitor);
7486
7487   if (!found_target_in_trace_)  // don't pop if found the target
7488     object_stack_.RemoveLast();
7489 }
7490
7491
7492 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7493   if (!(*p)->IsHeapObject()) return;
7494
7495   HeapObject* obj = HeapObject::cast(*p);
7496
7497   Object* map = obj->map();
7498
7499   if (map->IsHeapObject()) return;  // unmarked already
7500
7501   Address map_addr = reinterpret_cast<Address>(map);
7502
7503   map_addr -= kMarkTag;
7504
7505   ASSERT_TAG_ALIGNED(map_addr);
7506
7507   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7508
7509   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7510
7511   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7512
7513   obj->IterateBody(Map::cast(map_p)->instance_type(),
7514                    obj->SizeFromMap(Map::cast(map_p)),
7515                    unmark_visitor);
7516 }
7517
7518
7519 void PathTracer::ProcessResults() {
7520   if (found_target_) {
7521     PrintF("=====================================\n");
7522     PrintF("====        Path to object       ====\n");
7523     PrintF("=====================================\n\n");
7524
7525     ASSERT(!object_stack_.is_empty());
7526     for (int i = 0; i < object_stack_.length(); i++) {
7527       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7528       Object* obj = object_stack_[i];
7529       obj->Print();
7530     }
7531     PrintF("=====================================\n");
7532   }
7533 }
7534
7535
7536 // Triggers a depth-first traversal of reachable objects from one
7537 // given root object and finds a path to a specific heap object and
7538 // prints it.
7539 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7540   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7541   tracer.VisitPointer(&root);
7542 }
7543
7544
7545 // Triggers a depth-first traversal of reachable objects from roots
7546 // and finds a path to a specific heap object and prints it.
7547 void Heap::TracePathToObject(Object* target) {
7548   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7549   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7550 }
7551
7552
7553 // Triggers a depth-first traversal of reachable objects from roots
7554 // and finds a path to any global object and prints it. Useful for
7555 // determining the source for leaks of global objects.
7556 void Heap::TracePathToGlobal() {
7557   PathTracer tracer(PathTracer::kAnyGlobalObject,
7558                     PathTracer::FIND_ALL,
7559                     VISIT_ALL);
7560   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7561 }
7562 #endif
7563
7564
7565 static intptr_t CountTotalHolesSize(Heap* heap) {
7566   intptr_t holes_size = 0;
7567   OldSpaces spaces(heap);
7568   for (OldSpace* space = spaces.next();
7569        space != NULL;
7570        space = spaces.next()) {
7571     holes_size += space->Waste() + space->Available();
7572   }
7573   return holes_size;
7574 }
7575
7576
7577 GCTracer::GCTracer(Heap* heap,
7578                    const char* gc_reason,
7579                    const char* collector_reason)
7580     : start_time_(0.0),
7581       start_object_size_(0),
7582       start_memory_size_(0),
7583       gc_count_(0),
7584       full_gc_count_(0),
7585       allocated_since_last_gc_(0),
7586       spent_in_mutator_(0),
7587       promoted_objects_size_(0),
7588       nodes_died_in_new_space_(0),
7589       nodes_copied_in_new_space_(0),
7590       nodes_promoted_(0),
7591       heap_(heap),
7592       gc_reason_(gc_reason),
7593       collector_reason_(collector_reason) {
7594   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7595   start_time_ = OS::TimeCurrentMillis();
7596   start_object_size_ = heap_->SizeOfObjects();
7597   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7598
7599   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7600     scopes_[i] = 0;
7601   }
7602
7603   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7604
7605   allocated_since_last_gc_ =
7606       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7607
7608   if (heap_->last_gc_end_timestamp_ > 0) {
7609     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7610   }
7611
7612   steps_count_ = heap_->incremental_marking()->steps_count();
7613   steps_took_ = heap_->incremental_marking()->steps_took();
7614   longest_step_ = heap_->incremental_marking()->longest_step();
7615   steps_count_since_last_gc_ =
7616       heap_->incremental_marking()->steps_count_since_last_gc();
7617   steps_took_since_last_gc_ =
7618       heap_->incremental_marking()->steps_took_since_last_gc();
7619 }
7620
7621
7622 GCTracer::~GCTracer() {
7623   // Printf ONE line iff flag is set.
7624   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7625
7626   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7627
7628   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7629   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7630
7631   double time = heap_->last_gc_end_timestamp_ - start_time_;
7632
7633   // Update cumulative GC statistics if required.
7634   if (FLAG_print_cumulative_gc_stat) {
7635     heap_->total_gc_time_ms_ += time;
7636     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7637     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7638                                      heap_->alive_after_last_gc_);
7639     if (!first_gc) {
7640       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7641                                    spent_in_mutator_);
7642     }
7643   } else if (FLAG_trace_gc_verbose) {
7644     heap_->total_gc_time_ms_ += time;
7645   }
7646
7647   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7648
7649   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7650
7651   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7652   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7653
7654   if (!FLAG_trace_gc_nvp) {
7655     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7656
7657     double end_memory_size_mb =
7658         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7659
7660     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7661            CollectorString(),
7662            static_cast<double>(start_object_size_) / MB,
7663            static_cast<double>(start_memory_size_) / MB,
7664            SizeOfHeapObjects(),
7665            end_memory_size_mb);
7666
7667     if (external_time > 0) PrintF("%d / ", external_time);
7668     PrintF("%.1f ms", time);
7669     if (steps_count_ > 0) {
7670       if (collector_ == SCAVENGER) {
7671         PrintF(" (+ %.1f ms in %d steps since last GC)",
7672                steps_took_since_last_gc_,
7673                steps_count_since_last_gc_);
7674       } else {
7675         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7676                    "biggest step %.1f ms)",
7677                steps_took_,
7678                steps_count_,
7679                longest_step_);
7680       }
7681     }
7682
7683     if (gc_reason_ != NULL) {
7684       PrintF(" [%s]", gc_reason_);
7685     }
7686
7687     if (collector_reason_ != NULL) {
7688       PrintF(" [%s]", collector_reason_);
7689     }
7690
7691     PrintF(".\n");
7692   } else {
7693     PrintF("pause=%.1f ", time);
7694     PrintF("mutator=%.1f ", spent_in_mutator_);
7695     PrintF("gc=");
7696     switch (collector_) {
7697       case SCAVENGER:
7698         PrintF("s");
7699         break;
7700       case MARK_COMPACTOR:
7701         PrintF("ms");
7702         break;
7703       default:
7704         UNREACHABLE();
7705     }
7706     PrintF(" ");
7707
7708     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7709     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7710     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7711     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7712     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7713     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7714     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7715     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7716     PrintF("compaction_ptrs=%.1f ",
7717         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7718     PrintF("intracompaction_ptrs=%.1f ",
7719         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7720     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7721     PrintF("weakcollection_process=%.1f ",
7722         scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7723     PrintF("weakcollection_clear=%.1f ",
7724         scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7725
7726     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7727     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7728     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7729            in_free_list_or_wasted_before_gc_);
7730     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7731
7732     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7733     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7734     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7735     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7736     PrintF("nodes_promoted=%d ", nodes_promoted_);
7737
7738     if (collector_ == SCAVENGER) {
7739       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7740       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7741     } else {
7742       PrintF("stepscount=%d ", steps_count_);
7743       PrintF("stepstook=%.1f ", steps_took_);
7744       PrintF("longeststep=%.1f ", longest_step_);
7745     }
7746
7747     PrintF("\n");
7748   }
7749
7750   heap_->PrintShortHeapStatistics();
7751 }
7752
7753
7754 const char* GCTracer::CollectorString() {
7755   switch (collector_) {
7756     case SCAVENGER:
7757       return "Scavenge";
7758     case MARK_COMPACTOR:
7759       return "Mark-sweep";
7760   }
7761   return "Unknown GC";
7762 }
7763
7764
7765 int KeyedLookupCache::Hash(Map* map, Name* name) {
7766   // Uses only lower 32 bits if pointers are larger.
7767   uintptr_t addr_hash =
7768       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7769   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7770 }
7771
7772
7773 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7774   int index = (Hash(map, name) & kHashMask);
7775   for (int i = 0; i < kEntriesPerBucket; i++) {
7776     Key& key = keys_[index + i];
7777     if ((key.map == map) && key.name->Equals(name)) {
7778       return field_offsets_[index + i];
7779     }
7780   }
7781   return kNotFound;
7782 }
7783
7784
7785 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7786   if (!name->IsUniqueName()) {
7787     String* internalized_string;
7788     if (!HEAP->InternalizeStringIfExists(
7789             String::cast(name), &internalized_string)) {
7790       return;
7791     }
7792     name = internalized_string;
7793   }
7794   // This cache is cleared only between mark compact passes, so we expect the
7795   // cache to only contain old space names.
7796   ASSERT(!HEAP->InNewSpace(name));
7797
7798   int index = (Hash(map, name) & kHashMask);
7799   // After a GC there will be free slots, so we use them in order (this may
7800   // help to get the most frequently used one in position 0).
7801   for (int i = 0; i< kEntriesPerBucket; i++) {
7802     Key& key = keys_[index];
7803     Object* free_entry_indicator = NULL;
7804     if (key.map == free_entry_indicator) {
7805       key.map = map;
7806       key.name = name;
7807       field_offsets_[index + i] = field_offset;
7808       return;
7809     }
7810   }
7811   // No free entry found in this bucket, so we move them all down one and
7812   // put the new entry at position zero.
7813   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7814     Key& key = keys_[index + i];
7815     Key& key2 = keys_[index + i - 1];
7816     key = key2;
7817     field_offsets_[index + i] = field_offsets_[index + i - 1];
7818   }
7819
7820   // Write the new first entry.
7821   Key& key = keys_[index];
7822   key.map = map;
7823   key.name = name;
7824   field_offsets_[index] = field_offset;
7825 }
7826
7827
7828 void KeyedLookupCache::Clear() {
7829   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7830 }
7831
7832
7833 void DescriptorLookupCache::Clear() {
7834   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7835 }
7836
7837
7838 #ifdef DEBUG
7839 void Heap::GarbageCollectionGreedyCheck() {
7840   ASSERT(FLAG_gc_greedy);
7841   if (isolate_->bootstrapper()->IsActive()) return;
7842   if (disallow_allocation_failure()) return;
7843   CollectGarbage(NEW_SPACE);
7844 }
7845 #endif
7846
7847
7848 TranscendentalCache::SubCache::SubCache(Type t)
7849   : type_(t),
7850     isolate_(Isolate::Current()) {
7851   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7852   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7853   for (int i = 0; i < kCacheSize; i++) {
7854     elements_[i].in[0] = in0;
7855     elements_[i].in[1] = in1;
7856     elements_[i].output = NULL;
7857   }
7858 }
7859
7860
7861 void TranscendentalCache::Clear() {
7862   for (int i = 0; i < kNumberOfCaches; i++) {
7863     if (caches_[i] != NULL) {
7864       delete caches_[i];
7865       caches_[i] = NULL;
7866     }
7867   }
7868 }
7869
7870
7871 void ExternalStringTable::CleanUp() {
7872   int last = 0;
7873   for (int i = 0; i < new_space_strings_.length(); ++i) {
7874     if (new_space_strings_[i] == heap_->the_hole_value()) {
7875       continue;
7876     }
7877     if (heap_->InNewSpace(new_space_strings_[i])) {
7878       new_space_strings_[last++] = new_space_strings_[i];
7879     } else {
7880       old_space_strings_.Add(new_space_strings_[i]);
7881     }
7882   }
7883   new_space_strings_.Rewind(last);
7884   new_space_strings_.Trim();
7885
7886   last = 0;
7887   for (int i = 0; i < old_space_strings_.length(); ++i) {
7888     if (old_space_strings_[i] == heap_->the_hole_value()) {
7889       continue;
7890     }
7891     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7892     old_space_strings_[last++] = old_space_strings_[i];
7893   }
7894   old_space_strings_.Rewind(last);
7895   old_space_strings_.Trim();
7896 #ifdef VERIFY_HEAP
7897   if (FLAG_verify_heap) {
7898     Verify();
7899   }
7900 #endif
7901 }
7902
7903
7904 void ExternalStringTable::TearDown() {
7905   new_space_strings_.Free();
7906   old_space_strings_.Free();
7907 }
7908
7909
7910 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7911   chunk->set_next_chunk(chunks_queued_for_free_);
7912   chunks_queued_for_free_ = chunk;
7913 }
7914
7915
7916 void Heap::FreeQueuedChunks() {
7917   if (chunks_queued_for_free_ == NULL) return;
7918   MemoryChunk* next;
7919   MemoryChunk* chunk;
7920   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7921     next = chunk->next_chunk();
7922     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7923
7924     if (chunk->owner()->identity() == LO_SPACE) {
7925       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7926       // If FromAnyPointerAddress encounters a slot that belongs to a large
7927       // chunk queued for deletion it will fail to find the chunk because
7928       // it try to perform a search in the list of pages owned by of the large
7929       // object space and queued chunks were detached from that list.
7930       // To work around this we split large chunk into normal kPageSize aligned
7931       // pieces and initialize size, owner and flags field of every piece.
7932       // If FromAnyPointerAddress encounters a slot that belongs to one of
7933       // these smaller pieces it will treat it as a slot on a normal Page.
7934       Address chunk_end = chunk->address() + chunk->size();
7935       MemoryChunk* inner = MemoryChunk::FromAddress(
7936           chunk->address() + Page::kPageSize);
7937       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7938       while (inner <= inner_last) {
7939         // Size of a large chunk is always a multiple of
7940         // OS::AllocateAlignment() so there is always
7941         // enough space for a fake MemoryChunk header.
7942         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7943         // Guard against overflow.
7944         if (area_end < inner->address()) area_end = chunk_end;
7945         inner->SetArea(inner->address(), area_end);
7946         inner->set_size(Page::kPageSize);
7947         inner->set_owner(lo_space());
7948         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7949         inner = MemoryChunk::FromAddress(
7950             inner->address() + Page::kPageSize);
7951       }
7952     }
7953   }
7954   isolate_->heap()->store_buffer()->Compact();
7955   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7956   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7957     next = chunk->next_chunk();
7958     isolate_->memory_allocator()->Free(chunk);
7959   }
7960   chunks_queued_for_free_ = NULL;
7961 }
7962
7963
7964 void Heap::RememberUnmappedPage(Address page, bool compacted) {
7965   uintptr_t p = reinterpret_cast<uintptr_t>(page);
7966   // Tag the page pointer to make it findable in the dump file.
7967   if (compacted) {
7968     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7969   } else {
7970     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7971   }
7972   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7973       reinterpret_cast<Address>(p);
7974   remembered_unmapped_pages_index_++;
7975   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7976 }
7977
7978
7979 void Heap::ClearObjectStats(bool clear_last_time_stats) {
7980   memset(object_counts_, 0, sizeof(object_counts_));
7981   memset(object_sizes_, 0, sizeof(object_sizes_));
7982   if (clear_last_time_stats) {
7983     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7984     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7985   }
7986 }
7987
7988
7989 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7990
7991
7992 void Heap::CheckpointObjectStats() {
7993   ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
7994   Counters* counters = isolate()->counters();
7995 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
7996   counters->count_of_##name()->Increment(                                      \
7997       static_cast<int>(object_counts_[name]));                                 \
7998   counters->count_of_##name()->Decrement(                                      \
7999       static_cast<int>(object_counts_last_time_[name]));                       \
8000   counters->size_of_##name()->Increment(                                       \
8001       static_cast<int>(object_sizes_[name]));                                  \
8002   counters->size_of_##name()->Decrement(                                       \
8003       static_cast<int>(object_sizes_last_time_[name]));
8004   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8005 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8006   int index;
8007 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
8008   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
8009   counters->count_of_CODE_TYPE_##name()->Increment(       \
8010       static_cast<int>(object_counts_[index]));           \
8011   counters->count_of_CODE_TYPE_##name()->Decrement(       \
8012       static_cast<int>(object_counts_last_time_[index])); \
8013   counters->size_of_CODE_TYPE_##name()->Increment(        \
8014       static_cast<int>(object_sizes_[index]));            \
8015   counters->size_of_CODE_TYPE_##name()->Decrement(        \
8016       static_cast<int>(object_sizes_last_time_[index]));
8017   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8018 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8019 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
8020   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
8021   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
8022       static_cast<int>(object_counts_[index]));           \
8023   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
8024       static_cast<int>(object_counts_last_time_[index])); \
8025   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
8026       static_cast<int>(object_sizes_[index]));            \
8027   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
8028       static_cast<int>(object_sizes_last_time_[index]));
8029   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8030 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8031
8032   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8033   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8034   ClearObjectStats();
8035 }
8036
8037
8038 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8039   if (FLAG_parallel_recompilation) {
8040     heap_->relocation_mutex_->Lock();
8041 #ifdef DEBUG
8042     heap_->relocation_mutex_locked_by_optimizer_thread_ =
8043         heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8044 #endif  // DEBUG
8045   }
8046 }
8047
8048 } }  // namespace v8::internal