v8: upgrade to v3.20.2
[platform/upstream/nodejs.git] / deps / v8 / src / heap.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "accessors.h"
31 #include "api.h"
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "compilation-cache.h"
35 #include "cpu-profiler.h"
36 #include "debug.h"
37 #include "deoptimizer.h"
38 #include "global-handles.h"
39 #include "heap-profiler.h"
40 #include "incremental-marking.h"
41 #include "mark-compact.h"
42 #include "natives.h"
43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
45 #include "once.h"
46 #include "runtime-profiler.h"
47 #include "scopeinfo.h"
48 #include "snapshot.h"
49 #include "store-buffer.h"
50 #include "v8threads.h"
51 #include "v8utils.h"
52 #include "vm-state-inl.h"
53 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
54 #include "regexp-macro-assembler.h"
55 #include "arm/regexp-macro-assembler-arm.h"
56 #endif
57 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
58 #include "regexp-macro-assembler.h"
59 #include "mips/regexp-macro-assembler-mips.h"
60 #endif
61
62 namespace v8 {
63 namespace internal {
64
65
66 Heap::Heap()
67     : isolate_(NULL),
68 // semispace_size_ should be a power of 2 and old_generation_size_ should be
69 // a multiple of Page::kPageSize.
70 #if V8_TARGET_ARCH_X64
71 #define LUMP_OF_MEMORY (2 * MB)
72       code_range_size_(512*MB),
73 #else
74 #define LUMP_OF_MEMORY MB
75       code_range_size_(0),
76 #endif
77 #if defined(ANDROID)
78       reserved_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
79       max_semispace_size_(4 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
80       initial_semispace_size_(Page::kPageSize),
81       max_old_generation_size_(192*MB),
82       max_executable_size_(max_old_generation_size_),
83 #else
84       reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
85       max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
86       initial_semispace_size_(Page::kPageSize),
87       max_old_generation_size_(700ul * LUMP_OF_MEMORY),
88       max_executable_size_(256l * LUMP_OF_MEMORY),
89 #endif
90
91 // Variables set based on semispace_size_ and old_generation_size_ in
92 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
93 // Will be 4 * reserved_semispace_size_ to ensure that young
94 // generation can be aligned to its size.
95       survived_since_last_expansion_(0),
96       sweep_generation_(0),
97       always_allocate_scope_depth_(0),
98       linear_allocation_scope_depth_(0),
99       contexts_disposed_(0),
100       global_ic_age_(0),
101       flush_monomorphic_ics_(false),
102       scan_on_scavenge_pages_(0),
103       new_space_(this),
104       old_pointer_space_(NULL),
105       old_data_space_(NULL),
106       code_space_(NULL),
107       map_space_(NULL),
108       cell_space_(NULL),
109       property_cell_space_(NULL),
110       lo_space_(NULL),
111       gc_state_(NOT_IN_GC),
112       gc_post_processing_depth_(0),
113       ms_count_(0),
114       gc_count_(0),
115       remembered_unmapped_pages_index_(0),
116       unflattened_strings_length_(0),
117 #ifdef DEBUG
118       allocation_timeout_(0),
119       disallow_allocation_failure_(false),
120 #endif  // DEBUG
121       new_space_high_promotion_mode_active_(false),
122       old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
123       size_of_old_gen_at_last_old_space_gc_(0),
124       external_allocation_limit_(0),
125       amount_of_external_allocated_memory_(0),
126       amount_of_external_allocated_memory_at_last_global_gc_(0),
127       old_gen_exhausted_(false),
128       store_buffer_rebuilder_(store_buffer()),
129       hidden_string_(NULL),
130       global_gc_prologue_callback_(NULL),
131       global_gc_epilogue_callback_(NULL),
132       gc_safe_size_of_old_object_(NULL),
133       total_regexp_code_generated_(0),
134       tracer_(NULL),
135       young_survivors_after_last_gc_(0),
136       high_survival_rate_period_length_(0),
137       low_survival_rate_period_length_(0),
138       survival_rate_(0),
139       previous_survival_rate_trend_(Heap::STABLE),
140       survival_rate_trend_(Heap::STABLE),
141       max_gc_pause_(0.0),
142       total_gc_time_ms_(0.0),
143       max_alive_after_gc_(0),
144       min_in_mutator_(kMaxInt),
145       alive_after_last_gc_(0),
146       last_gc_end_timestamp_(0.0),
147       marking_time_(0.0),
148       sweeping_time_(0.0),
149       store_buffer_(this),
150       marking_(this),
151       incremental_marking_(this),
152       number_idle_notifications_(0),
153       last_idle_notification_gc_count_(0),
154       last_idle_notification_gc_count_init_(false),
155       mark_sweeps_since_idle_round_started_(0),
156       gc_count_at_last_idle_gc_(0),
157       scavenges_since_last_idle_round_(kIdleScavengeThreshold),
158       gcs_since_last_deopt_(0),
159 #ifdef VERIFY_HEAP
160       no_weak_embedded_maps_verification_scope_depth_(0),
161 #endif
162       promotion_queue_(this),
163       configured_(false),
164       chunks_queued_for_free_(NULL),
165       relocation_mutex_(NULL) {
166   // Allow build-time customization of the max semispace size. Building
167   // V8 with snapshots and a non-default max semispace size is much
168   // easier if you can define it as part of the build environment.
169 #if defined(V8_MAX_SEMISPACE_SIZE)
170   max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
171 #endif
172
173   intptr_t max_virtual = OS::MaxVirtualMemory();
174
175   if (max_virtual > 0) {
176     if (code_range_size_ > 0) {
177       // Reserve no more than 1/8 of the memory for the code range.
178       code_range_size_ = Min(code_range_size_, max_virtual >> 3);
179     }
180   }
181
182   memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
183   native_contexts_list_ = NULL;
184   array_buffers_list_ = Smi::FromInt(0);
185   mark_compact_collector_.heap_ = this;
186   external_string_table_.heap_ = this;
187   // Put a dummy entry in the remembered pages so we can find the list the
188   // minidump even if there are no real unmapped pages.
189   RememberUnmappedPage(NULL, false);
190
191   ClearObjectStats(true);
192 }
193
194
195 intptr_t Heap::Capacity() {
196   if (!HasBeenSetUp()) return 0;
197
198   return new_space_.Capacity() +
199       old_pointer_space_->Capacity() +
200       old_data_space_->Capacity() +
201       code_space_->Capacity() +
202       map_space_->Capacity() +
203       cell_space_->Capacity() +
204       property_cell_space_->Capacity();
205 }
206
207
208 intptr_t Heap::CommittedMemory() {
209   if (!HasBeenSetUp()) return 0;
210
211   return new_space_.CommittedMemory() +
212       old_pointer_space_->CommittedMemory() +
213       old_data_space_->CommittedMemory() +
214       code_space_->CommittedMemory() +
215       map_space_->CommittedMemory() +
216       cell_space_->CommittedMemory() +
217       property_cell_space_->CommittedMemory() +
218       lo_space_->Size();
219 }
220
221
222 size_t Heap::CommittedPhysicalMemory() {
223   if (!HasBeenSetUp()) return 0;
224
225   return new_space_.CommittedPhysicalMemory() +
226       old_pointer_space_->CommittedPhysicalMemory() +
227       old_data_space_->CommittedPhysicalMemory() +
228       code_space_->CommittedPhysicalMemory() +
229       map_space_->CommittedPhysicalMemory() +
230       cell_space_->CommittedPhysicalMemory() +
231       property_cell_space_->CommittedPhysicalMemory() +
232       lo_space_->CommittedPhysicalMemory();
233 }
234
235
236 intptr_t Heap::CommittedMemoryExecutable() {
237   if (!HasBeenSetUp()) return 0;
238
239   return isolate()->memory_allocator()->SizeExecutable();
240 }
241
242
243 intptr_t Heap::Available() {
244   if (!HasBeenSetUp()) return 0;
245
246   return new_space_.Available() +
247       old_pointer_space_->Available() +
248       old_data_space_->Available() +
249       code_space_->Available() +
250       map_space_->Available() +
251       cell_space_->Available() +
252       property_cell_space_->Available();
253 }
254
255
256 bool Heap::HasBeenSetUp() {
257   return old_pointer_space_ != NULL &&
258          old_data_space_ != NULL &&
259          code_space_ != NULL &&
260          map_space_ != NULL &&
261          cell_space_ != NULL &&
262          property_cell_space_ != NULL &&
263          lo_space_ != NULL;
264 }
265
266
267 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
268   if (IntrusiveMarking::IsMarked(object)) {
269     return IntrusiveMarking::SizeOfMarkedObject(object);
270   }
271   return object->SizeFromMap(object->map());
272 }
273
274
275 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
276                                               const char** reason) {
277   // Is global GC requested?
278   if (space != NEW_SPACE) {
279     isolate_->counters()->gc_compactor_caused_by_request()->Increment();
280     *reason = "GC in old space requested";
281     return MARK_COMPACTOR;
282   }
283
284   if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
285     *reason = "GC in old space forced by flags";
286     return MARK_COMPACTOR;
287   }
288
289   // Is enough data promoted to justify a global GC?
290   if (OldGenerationAllocationLimitReached()) {
291     isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
292     *reason = "promotion limit reached";
293     return MARK_COMPACTOR;
294   }
295
296   // Have allocation in OLD and LO failed?
297   if (old_gen_exhausted_) {
298     isolate_->counters()->
299         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
300     *reason = "old generations exhausted";
301     return MARK_COMPACTOR;
302   }
303
304   // Is there enough space left in OLD to guarantee that a scavenge can
305   // succeed?
306   //
307   // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
308   // for object promotion. It counts only the bytes that the memory
309   // allocator has not yet allocated from the OS and assigned to any space,
310   // and does not count available bytes already in the old space or code
311   // space.  Undercounting is safe---we may get an unrequested full GC when
312   // a scavenge would have succeeded.
313   if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
314     isolate_->counters()->
315         gc_compactor_caused_by_oldspace_exhaustion()->Increment();
316     *reason = "scavenge might not succeed";
317     return MARK_COMPACTOR;
318   }
319
320   // Default
321   *reason = NULL;
322   return SCAVENGER;
323 }
324
325
326 // TODO(1238405): Combine the infrastructure for --heap-stats and
327 // --log-gc to avoid the complicated preprocessor and flag testing.
328 void Heap::ReportStatisticsBeforeGC() {
329   // Heap::ReportHeapStatistics will also log NewSpace statistics when
330   // compiled --log-gc is set.  The following logic is used to avoid
331   // double logging.
332 #ifdef DEBUG
333   if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
334   if (FLAG_heap_stats) {
335     ReportHeapStatistics("Before GC");
336   } else if (FLAG_log_gc) {
337     new_space_.ReportStatistics();
338   }
339   if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
340 #else
341   if (FLAG_log_gc) {
342     new_space_.CollectStatistics();
343     new_space_.ReportStatistics();
344     new_space_.ClearHistograms();
345   }
346 #endif  // DEBUG
347 }
348
349
350 void Heap::PrintShortHeapStatistics() {
351   if (!FLAG_trace_gc_verbose) return;
352   PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
353                ", available: %6" V8_PTR_PREFIX "d KB\n",
354            isolate_->memory_allocator()->Size() / KB,
355            isolate_->memory_allocator()->Available() / KB);
356   PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
357                ", available: %6" V8_PTR_PREFIX "d KB"
358                ", committed: %6" V8_PTR_PREFIX "d KB\n",
359            new_space_.Size() / KB,
360            new_space_.Available() / KB,
361            new_space_.CommittedMemory() / KB);
362   PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
363                ", available: %6" V8_PTR_PREFIX "d KB"
364                ", committed: %6" V8_PTR_PREFIX "d KB\n",
365            old_pointer_space_->SizeOfObjects() / KB,
366            old_pointer_space_->Available() / KB,
367            old_pointer_space_->CommittedMemory() / KB);
368   PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
369                ", available: %6" V8_PTR_PREFIX "d KB"
370                ", committed: %6" V8_PTR_PREFIX "d KB\n",
371            old_data_space_->SizeOfObjects() / KB,
372            old_data_space_->Available() / KB,
373            old_data_space_->CommittedMemory() / KB);
374   PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
375                ", available: %6" V8_PTR_PREFIX "d KB"
376                ", committed: %6" V8_PTR_PREFIX "d KB\n",
377            code_space_->SizeOfObjects() / KB,
378            code_space_->Available() / KB,
379            code_space_->CommittedMemory() / KB);
380   PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
381                ", available: %6" V8_PTR_PREFIX "d KB"
382                ", committed: %6" V8_PTR_PREFIX "d KB\n",
383            map_space_->SizeOfObjects() / KB,
384            map_space_->Available() / KB,
385            map_space_->CommittedMemory() / KB);
386   PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
387                ", available: %6" V8_PTR_PREFIX "d KB"
388                ", committed: %6" V8_PTR_PREFIX "d KB\n",
389            cell_space_->SizeOfObjects() / KB,
390            cell_space_->Available() / KB,
391            cell_space_->CommittedMemory() / KB);
392   PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
393                ", available: %6" V8_PTR_PREFIX "d KB"
394                ", committed: %6" V8_PTR_PREFIX "d KB\n",
395            property_cell_space_->SizeOfObjects() / KB,
396            property_cell_space_->Available() / KB,
397            property_cell_space_->CommittedMemory() / KB);
398   PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
399                ", available: %6" V8_PTR_PREFIX "d KB"
400                ", committed: %6" V8_PTR_PREFIX "d KB\n",
401            lo_space_->SizeOfObjects() / KB,
402            lo_space_->Available() / KB,
403            lo_space_->CommittedMemory() / KB);
404   PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
405                ", available: %6" V8_PTR_PREFIX "d KB"
406                ", committed: %6" V8_PTR_PREFIX "d KB\n",
407            this->SizeOfObjects() / KB,
408            this->Available() / KB,
409            this->CommittedMemory() / KB);
410   PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
411            amount_of_external_allocated_memory_ / KB);
412   PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
413 }
414
415
416 // TODO(1238405): Combine the infrastructure for --heap-stats and
417 // --log-gc to avoid the complicated preprocessor and flag testing.
418 void Heap::ReportStatisticsAfterGC() {
419   // Similar to the before GC, we use some complicated logic to ensure that
420   // NewSpace statistics are logged exactly once when --log-gc is turned on.
421 #if defined(DEBUG)
422   if (FLAG_heap_stats) {
423     new_space_.CollectStatistics();
424     ReportHeapStatistics("After GC");
425   } else if (FLAG_log_gc) {
426     new_space_.ReportStatistics();
427   }
428 #else
429   if (FLAG_log_gc) new_space_.ReportStatistics();
430 #endif  // DEBUG
431 }
432
433
434 void Heap::GarbageCollectionPrologue() {
435   {  AllowHeapAllocation for_the_first_part_of_prologue;
436     isolate_->transcendental_cache()->Clear();
437     ClearJSFunctionResultCaches();
438     gc_count_++;
439     unflattened_strings_length_ = 0;
440
441     if (FLAG_flush_code && FLAG_flush_code_incrementally) {
442       mark_compact_collector()->EnableCodeFlushing(true);
443     }
444
445 #ifdef VERIFY_HEAP
446     if (FLAG_verify_heap) {
447       Verify();
448     }
449 #endif
450   }
451
452 #ifdef DEBUG
453   ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
454
455   if (FLAG_gc_verbose) Print();
456
457   ReportStatisticsBeforeGC();
458 #endif  // DEBUG
459
460   store_buffer()->GCPrologue();
461 }
462
463
464 intptr_t Heap::SizeOfObjects() {
465   intptr_t total = 0;
466   AllSpaces spaces(this);
467   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
468     total += space->SizeOfObjects();
469   }
470   return total;
471 }
472
473
474 void Heap::RepairFreeListsAfterBoot() {
475   PagedSpaces spaces(this);
476   for (PagedSpace* space = spaces.next();
477        space != NULL;
478        space = spaces.next()) {
479     space->RepairFreeListsAfterBoot();
480   }
481 }
482
483
484 void Heap::GarbageCollectionEpilogue() {
485   store_buffer()->GCEpilogue();
486
487   // In release mode, we only zap the from space under heap verification.
488   if (Heap::ShouldZapGarbage()) {
489     ZapFromSpace();
490   }
491
492 #ifdef VERIFY_HEAP
493   if (FLAG_verify_heap) {
494     Verify();
495   }
496 #endif
497
498   AllowHeapAllocation for_the_rest_of_the_epilogue;
499
500 #ifdef DEBUG
501   if (FLAG_print_global_handles) isolate_->global_handles()->Print();
502   if (FLAG_print_handles) PrintHandles();
503   if (FLAG_gc_verbose) Print();
504   if (FLAG_code_stats) ReportCodeStatistics("After GC");
505 #endif
506   if (FLAG_deopt_every_n_garbage_collections > 0) {
507     if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
508       Deoptimizer::DeoptimizeAll(isolate());
509       gcs_since_last_deopt_ = 0;
510     }
511   }
512
513   isolate_->counters()->alive_after_last_gc()->Set(
514       static_cast<int>(SizeOfObjects()));
515
516   isolate_->counters()->string_table_capacity()->Set(
517       string_table()->Capacity());
518   isolate_->counters()->number_of_symbols()->Set(
519       string_table()->NumberOfElements());
520
521   if (CommittedMemory() > 0) {
522     isolate_->counters()->external_fragmentation_total()->AddSample(
523         static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
524
525     isolate_->counters()->heap_fraction_map_space()->AddSample(
526         static_cast<int>(
527             (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
528     isolate_->counters()->heap_fraction_cell_space()->AddSample(
529         static_cast<int>(
530             (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
531     isolate_->counters()->heap_fraction_property_cell_space()->
532         AddSample(static_cast<int>(
533             (property_cell_space()->CommittedMemory() * 100.0) /
534             CommittedMemory()));
535
536     isolate_->counters()->heap_sample_total_committed()->AddSample(
537         static_cast<int>(CommittedMemory() / KB));
538     isolate_->counters()->heap_sample_total_used()->AddSample(
539         static_cast<int>(SizeOfObjects() / KB));
540     isolate_->counters()->heap_sample_map_space_committed()->AddSample(
541         static_cast<int>(map_space()->CommittedMemory() / KB));
542     isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
543         static_cast<int>(cell_space()->CommittedMemory() / KB));
544     isolate_->counters()->
545         heap_sample_property_cell_space_committed()->
546             AddSample(static_cast<int>(
547                 property_cell_space()->CommittedMemory() / KB));
548   }
549
550 #define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
551   isolate_->counters()->space##_bytes_available()->Set(                        \
552       static_cast<int>(space()->Available()));                                 \
553   isolate_->counters()->space##_bytes_committed()->Set(                        \
554       static_cast<int>(space()->CommittedMemory()));                           \
555   isolate_->counters()->space##_bytes_used()->Set(                             \
556       static_cast<int>(space()->SizeOfObjects()));
557 #define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
558   if (space()->CommittedMemory() > 0) {                                        \
559     isolate_->counters()->external_fragmentation_##space()->AddSample(         \
560         static_cast<int>(100 -                                                 \
561             (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
562   }
563 #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
564   UPDATE_COUNTERS_FOR_SPACE(space)                                             \
565   UPDATE_FRAGMENTATION_FOR_SPACE(space)
566
567   UPDATE_COUNTERS_FOR_SPACE(new_space)
568   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
569   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
570   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
571   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
572   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
573   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
574   UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
575 #undef UPDATE_COUNTERS_FOR_SPACE
576 #undef UPDATE_FRAGMENTATION_FOR_SPACE
577 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
578
579 #if defined(DEBUG)
580   ReportStatisticsAfterGC();
581 #endif  // DEBUG
582 #ifdef ENABLE_DEBUGGER_SUPPORT
583   isolate_->debug()->AfterGarbageCollection();
584 #endif  // ENABLE_DEBUGGER_SUPPORT
585
586   error_object_list_.DeferredFormatStackTrace(isolate());
587 }
588
589
590 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
591   // Since we are ignoring the return value, the exact choice of space does
592   // not matter, so long as we do not specify NEW_SPACE, which would not
593   // cause a full GC.
594   mark_compact_collector_.SetFlags(flags);
595   CollectGarbage(OLD_POINTER_SPACE, gc_reason);
596   mark_compact_collector_.SetFlags(kNoGCFlags);
597 }
598
599
600 void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
601   // Since we are ignoring the return value, the exact choice of space does
602   // not matter, so long as we do not specify NEW_SPACE, which would not
603   // cause a full GC.
604   // Major GC would invoke weak handle callbacks on weakly reachable
605   // handles, but won't collect weakly reachable objects until next
606   // major GC.  Therefore if we collect aggressively and weak handle callback
607   // has been invoked, we rerun major GC to release objects which become
608   // garbage.
609   // Note: as weak callbacks can execute arbitrary code, we cannot
610   // hope that eventually there will be no weak callbacks invocations.
611   // Therefore stop recollecting after several attempts.
612   mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
613                                      kReduceMemoryFootprintMask);
614   isolate_->compilation_cache()->Clear();
615   const int kMaxNumberOfAttempts = 7;
616   for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
617     if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL)) {
618       break;
619     }
620   }
621   mark_compact_collector()->SetFlags(kNoGCFlags);
622   new_space_.Shrink();
623   UncommitFromSpace();
624   incremental_marking()->UncommitMarkingDeque();
625 }
626
627
628 bool Heap::CollectGarbage(AllocationSpace space,
629                           GarbageCollector collector,
630                           const char* gc_reason,
631                           const char* collector_reason) {
632   // The VM is in the GC state until exiting this function.
633   VMState<GC> state(isolate_);
634
635 #ifdef DEBUG
636   // Reset the allocation timeout to the GC interval, but make sure to
637   // allow at least a few allocations after a collection. The reason
638   // for this is that we have a lot of allocation sequences and we
639   // assume that a garbage collection will allow the subsequent
640   // allocation attempts to go through.
641   allocation_timeout_ = Max(6, FLAG_gc_interval);
642 #endif
643
644   if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
645     if (FLAG_trace_incremental_marking) {
646       PrintF("[IncrementalMarking] Scavenge during marking.\n");
647     }
648   }
649
650   if (collector == MARK_COMPACTOR &&
651       !mark_compact_collector()->abort_incremental_marking() &&
652       !incremental_marking()->IsStopped() &&
653       !incremental_marking()->should_hurry() &&
654       FLAG_incremental_marking_steps) {
655     // Make progress in incremental marking.
656     const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
657     incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
658                                 IncrementalMarking::NO_GC_VIA_STACK_GUARD);
659     if (!incremental_marking()->IsComplete()) {
660       if (FLAG_trace_incremental_marking) {
661         PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
662       }
663       collector = SCAVENGER;
664       collector_reason = "incremental marking delaying mark-sweep";
665     }
666   }
667
668   bool next_gc_likely_to_collect_more = false;
669
670   { GCTracer tracer(this, gc_reason, collector_reason);
671     ASSERT(AllowHeapAllocation::IsAllowed());
672     DisallowHeapAllocation no_allocation_during_gc;
673     GarbageCollectionPrologue();
674     // The GC count was incremented in the prologue.  Tell the tracer about
675     // it.
676     tracer.set_gc_count(gc_count_);
677
678     // Tell the tracer which collector we've selected.
679     tracer.set_collector(collector);
680
681     {
682       HistogramTimerScope histogram_timer_scope(
683           (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
684                                    : isolate_->counters()->gc_compactor());
685       next_gc_likely_to_collect_more =
686           PerformGarbageCollection(collector, &tracer);
687     }
688
689     GarbageCollectionEpilogue();
690   }
691
692   // Start incremental marking for the next cycle. The heap snapshot
693   // generator needs incremental marking to stay off after it aborted.
694   if (!mark_compact_collector()->abort_incremental_marking() &&
695       incremental_marking()->IsStopped() &&
696       incremental_marking()->WorthActivating() &&
697       NextGCIsLikelyToBeFull()) {
698     incremental_marking()->Start();
699   }
700
701   return next_gc_likely_to_collect_more;
702 }
703
704
705 void Heap::PerformScavenge() {
706   GCTracer tracer(this, NULL, NULL);
707   if (incremental_marking()->IsStopped()) {
708     PerformGarbageCollection(SCAVENGER, &tracer);
709   } else {
710     PerformGarbageCollection(MARK_COMPACTOR, &tracer);
711   }
712 }
713
714
715 void Heap::MoveElements(FixedArray* array,
716                         int dst_index,
717                         int src_index,
718                         int len) {
719   if (len == 0) return;
720
721   ASSERT(array->map() != HEAP->fixed_cow_array_map());
722   Object** dst_objects = array->data_start() + dst_index;
723   OS::MemMove(dst_objects,
724               array->data_start() + src_index,
725               len * kPointerSize);
726   if (!InNewSpace(array)) {
727     for (int i = 0; i < len; i++) {
728       // TODO(hpayer): check store buffer for entries
729       if (InNewSpace(dst_objects[i])) {
730         RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
731       }
732     }
733   }
734   incremental_marking()->RecordWrites(array);
735 }
736
737
738 #ifdef VERIFY_HEAP
739 // Helper class for verifying the string table.
740 class StringTableVerifier : public ObjectVisitor {
741  public:
742   void VisitPointers(Object** start, Object** end) {
743     // Visit all HeapObject pointers in [start, end).
744     for (Object** p = start; p < end; p++) {
745       if ((*p)->IsHeapObject()) {
746         // Check that the string is actually internalized.
747         CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
748               (*p)->IsInternalizedString());
749       }
750     }
751   }
752 };
753
754
755 static void VerifyStringTable() {
756   StringTableVerifier verifier;
757   HEAP->string_table()->IterateElements(&verifier);
758 }
759 #endif  // VERIFY_HEAP
760
761
762 static bool AbortIncrementalMarkingAndCollectGarbage(
763     Heap* heap,
764     AllocationSpace space,
765     const char* gc_reason = NULL) {
766   heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
767   bool result = heap->CollectGarbage(space, gc_reason);
768   heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
769   return result;
770 }
771
772
773 void Heap::ReserveSpace(
774     int *sizes,
775     Address *locations_out) {
776   bool gc_performed = true;
777   int counter = 0;
778   static const int kThreshold = 20;
779   while (gc_performed && counter++ < kThreshold) {
780     gc_performed = false;
781     ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
782     for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
783       if (sizes[space] != 0) {
784         MaybeObject* allocation;
785         if (space == NEW_SPACE) {
786           allocation = new_space()->AllocateRaw(sizes[space]);
787         } else {
788           allocation = paged_space(space)->AllocateRaw(sizes[space]);
789         }
790         FreeListNode* node;
791         if (!allocation->To<FreeListNode>(&node)) {
792           if (space == NEW_SPACE) {
793             Heap::CollectGarbage(NEW_SPACE,
794                                  "failed to reserve space in the new space");
795           } else {
796             AbortIncrementalMarkingAndCollectGarbage(
797                 this,
798                 static_cast<AllocationSpace>(space),
799                 "failed to reserve space in paged space");
800           }
801           gc_performed = true;
802           break;
803         } else {
804           // Mark with a free list node, in case we have a GC before
805           // deserializing.
806           node->set_size(this, sizes[space]);
807           locations_out[space] = node->address();
808         }
809       }
810     }
811   }
812
813   if (gc_performed) {
814     // Failed to reserve the space after several attempts.
815     V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
816   }
817 }
818
819
820 void Heap::EnsureFromSpaceIsCommitted() {
821   if (new_space_.CommitFromSpaceIfNeeded()) return;
822
823   // Committing memory to from space failed.
824   // Memory is exhausted and we will die.
825   V8::FatalProcessOutOfMemory("Committing semi space failed.");
826 }
827
828
829 void Heap::ClearJSFunctionResultCaches() {
830   if (isolate_->bootstrapper()->IsActive()) return;
831
832   Object* context = native_contexts_list_;
833   while (!context->IsUndefined()) {
834     // Get the caches for this context. GC can happen when the context
835     // is not fully initialized, so the caches can be undefined.
836     Object* caches_or_undefined =
837         Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
838     if (!caches_or_undefined->IsUndefined()) {
839       FixedArray* caches = FixedArray::cast(caches_or_undefined);
840       // Clear the caches:
841       int length = caches->length();
842       for (int i = 0; i < length; i++) {
843         JSFunctionResultCache::cast(caches->get(i))->Clear();
844       }
845     }
846     // Get the next context:
847     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
848   }
849 }
850
851
852 void Heap::ClearNormalizedMapCaches() {
853   if (isolate_->bootstrapper()->IsActive() &&
854       !incremental_marking()->IsMarking()) {
855     return;
856   }
857
858   Object* context = native_contexts_list_;
859   while (!context->IsUndefined()) {
860     // GC can happen when the context is not fully initialized,
861     // so the cache can be undefined.
862     Object* cache =
863         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
864     if (!cache->IsUndefined()) {
865       NormalizedMapCache::cast(cache)->Clear();
866     }
867     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
868   }
869 }
870
871
872 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
873   double survival_rate =
874       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
875       start_new_space_size;
876
877   if (survival_rate > kYoungSurvivalRateHighThreshold) {
878     high_survival_rate_period_length_++;
879   } else {
880     high_survival_rate_period_length_ = 0;
881   }
882
883   if (survival_rate < kYoungSurvivalRateLowThreshold) {
884     low_survival_rate_period_length_++;
885   } else {
886     low_survival_rate_period_length_ = 0;
887   }
888
889   double survival_rate_diff = survival_rate_ - survival_rate;
890
891   if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
892     set_survival_rate_trend(DECREASING);
893   } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
894     set_survival_rate_trend(INCREASING);
895   } else {
896     set_survival_rate_trend(STABLE);
897   }
898
899   survival_rate_ = survival_rate;
900 }
901
902 bool Heap::PerformGarbageCollection(GarbageCollector collector,
903                                     GCTracer* tracer) {
904   bool next_gc_likely_to_collect_more = false;
905
906   if (collector != SCAVENGER) {
907     PROFILE(isolate_, CodeMovingGCEvent());
908   }
909
910 #ifdef VERIFY_HEAP
911   if (FLAG_verify_heap) {
912     VerifyStringTable();
913   }
914 #endif
915
916   GCType gc_type =
917       collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
918
919   {
920     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
921     VMState<EXTERNAL> state(isolate_);
922     CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
923   }
924
925   EnsureFromSpaceIsCommitted();
926
927   int start_new_space_size = Heap::new_space()->SizeAsInt();
928
929   if (IsHighSurvivalRate()) {
930     // We speed up the incremental marker if it is running so that it
931     // does not fall behind the rate of promotion, which would cause a
932     // constantly growing old space.
933     incremental_marking()->NotifyOfHighPromotionRate();
934   }
935
936   if (collector == MARK_COMPACTOR) {
937     // Perform mark-sweep with optional compaction.
938     MarkCompact(tracer);
939     sweep_generation_++;
940
941     UpdateSurvivalRateTrend(start_new_space_size);
942
943     size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
944
945     old_generation_allocation_limit_ =
946         OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
947
948     old_gen_exhausted_ = false;
949   } else {
950     tracer_ = tracer;
951     Scavenge();
952     tracer_ = NULL;
953
954     UpdateSurvivalRateTrend(start_new_space_size);
955   }
956
957   if (!new_space_high_promotion_mode_active_ &&
958       new_space_.Capacity() == new_space_.MaximumCapacity() &&
959       IsStableOrIncreasingSurvivalTrend() &&
960       IsHighSurvivalRate()) {
961     // Stable high survival rates even though young generation is at
962     // maximum capacity indicates that most objects will be promoted.
963     // To decrease scavenger pauses and final mark-sweep pauses, we
964     // have to limit maximal capacity of the young generation.
965     SetNewSpaceHighPromotionModeActive(true);
966     if (FLAG_trace_gc) {
967       PrintPID("Limited new space size due to high promotion rate: %d MB\n",
968                new_space_.InitialCapacity() / MB);
969     }
970     // Support for global pre-tenuring uses the high promotion mode as a
971     // heuristic indicator of whether to pretenure or not, we trigger
972     // deoptimization here to take advantage of pre-tenuring as soon as
973     // possible.
974     if (FLAG_pretenuring) {
975       isolate_->stack_guard()->FullDeopt();
976     }
977   } else if (new_space_high_promotion_mode_active_ &&
978       IsStableOrDecreasingSurvivalTrend() &&
979       IsLowSurvivalRate()) {
980     // Decreasing low survival rates might indicate that the above high
981     // promotion mode is over and we should allow the young generation
982     // to grow again.
983     SetNewSpaceHighPromotionModeActive(false);
984     if (FLAG_trace_gc) {
985       PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
986                new_space_.MaximumCapacity() / MB);
987     }
988     // Trigger deoptimization here to turn off pre-tenuring as soon as
989     // possible.
990     if (FLAG_pretenuring) {
991       isolate_->stack_guard()->FullDeopt();
992     }
993   }
994
995   if (new_space_high_promotion_mode_active_ &&
996       new_space_.Capacity() > new_space_.InitialCapacity()) {
997     new_space_.Shrink();
998   }
999
1000   isolate_->counters()->objs_since_last_young()->Set(0);
1001
1002   // Callbacks that fire after this point might trigger nested GCs and
1003   // restart incremental marking, the assertion can't be moved down.
1004   ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1005
1006   gc_post_processing_depth_++;
1007   { AllowHeapAllocation allow_allocation;
1008     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1009     next_gc_likely_to_collect_more =
1010         isolate_->global_handles()->PostGarbageCollectionProcessing(
1011             collector, tracer);
1012   }
1013   gc_post_processing_depth_--;
1014
1015   // Update relocatables.
1016   Relocatable::PostGarbageCollectionProcessing();
1017
1018   if (collector == MARK_COMPACTOR) {
1019     // Register the amount of external allocated memory.
1020     amount_of_external_allocated_memory_at_last_global_gc_ =
1021         amount_of_external_allocated_memory_;
1022   }
1023
1024   {
1025     GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1026     VMState<EXTERNAL> state(isolate_);
1027     CallGCEpilogueCallbacks(gc_type);
1028   }
1029
1030 #ifdef VERIFY_HEAP
1031   if (FLAG_verify_heap) {
1032     VerifyStringTable();
1033   }
1034 #endif
1035
1036   return next_gc_likely_to_collect_more;
1037 }
1038
1039
1040 void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1041   if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) {
1042     global_gc_prologue_callback_();
1043   }
1044   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1045     if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1046       gc_prologue_callbacks_[i].callback(gc_type, flags);
1047     }
1048   }
1049 }
1050
1051
1052 void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1053   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1054     if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1055       gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
1056     }
1057   }
1058   if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) {
1059     global_gc_epilogue_callback_();
1060   }
1061 }
1062
1063
1064 void Heap::MarkCompact(GCTracer* tracer) {
1065   gc_state_ = MARK_COMPACT;
1066   LOG(isolate_, ResourceEvent("markcompact", "begin"));
1067
1068   mark_compact_collector_.Prepare(tracer);
1069
1070   ms_count_++;
1071   tracer->set_full_gc_count(ms_count_);
1072
1073   MarkCompactPrologue();
1074
1075   mark_compact_collector_.CollectGarbage();
1076
1077   LOG(isolate_, ResourceEvent("markcompact", "end"));
1078
1079   gc_state_ = NOT_IN_GC;
1080
1081   isolate_->counters()->objs_since_last_full()->Set(0);
1082
1083   contexts_disposed_ = 0;
1084
1085   flush_monomorphic_ics_ = false;
1086 }
1087
1088
1089 void Heap::MarkCompactPrologue() {
1090   // At any old GC clear the keyed lookup cache to enable collection of unused
1091   // maps.
1092   isolate_->keyed_lookup_cache()->Clear();
1093   isolate_->context_slot_cache()->Clear();
1094   isolate_->descriptor_lookup_cache()->Clear();
1095   RegExpResultsCache::Clear(string_split_cache());
1096   RegExpResultsCache::Clear(regexp_multiple_cache());
1097
1098   isolate_->compilation_cache()->MarkCompactPrologue();
1099
1100   CompletelyClearInstanceofCache();
1101
1102   FlushNumberStringCache();
1103   if (FLAG_cleanup_code_caches_at_gc) {
1104     polymorphic_code_cache()->set_cache(undefined_value());
1105   }
1106
1107   ClearNormalizedMapCaches();
1108 }
1109
1110
1111 Object* Heap::FindCodeObject(Address a) {
1112   return isolate()->inner_pointer_to_code_cache()->
1113       GcSafeFindCodeForInnerPointer(a);
1114 }
1115
1116
1117 // Helper class for copying HeapObjects
1118 class ScavengeVisitor: public ObjectVisitor {
1119  public:
1120   explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1121
1122   void VisitPointer(Object** p) { ScavengePointer(p); }
1123
1124   void VisitPointers(Object** start, Object** end) {
1125     // Copy all HeapObject pointers in [start, end)
1126     for (Object** p = start; p < end; p++) ScavengePointer(p);
1127   }
1128
1129  private:
1130   void ScavengePointer(Object** p) {
1131     Object* object = *p;
1132     if (!heap_->InNewSpace(object)) return;
1133     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1134                          reinterpret_cast<HeapObject*>(object));
1135   }
1136
1137   Heap* heap_;
1138 };
1139
1140
1141 #ifdef VERIFY_HEAP
1142 // Visitor class to verify pointers in code or data space do not point into
1143 // new space.
1144 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1145  public:
1146   void VisitPointers(Object** start, Object**end) {
1147     for (Object** current = start; current < end; current++) {
1148       if ((*current)->IsHeapObject()) {
1149         CHECK(!HEAP->InNewSpace(HeapObject::cast(*current)));
1150       }
1151     }
1152   }
1153 };
1154
1155
1156 static void VerifyNonPointerSpacePointers() {
1157   // Verify that there are no pointers to new space in spaces where we
1158   // do not expect them.
1159   VerifyNonPointerSpacePointersVisitor v;
1160   HeapObjectIterator code_it(HEAP->code_space());
1161   for (HeapObject* object = code_it.Next();
1162        object != NULL; object = code_it.Next())
1163     object->Iterate(&v);
1164
1165   // The old data space was normally swept conservatively so that the iterator
1166   // doesn't work, so we normally skip the next bit.
1167   if (!HEAP->old_data_space()->was_swept_conservatively()) {
1168     HeapObjectIterator data_it(HEAP->old_data_space());
1169     for (HeapObject* object = data_it.Next();
1170          object != NULL; object = data_it.Next())
1171       object->Iterate(&v);
1172   }
1173 }
1174 #endif  // VERIFY_HEAP
1175
1176
1177 void Heap::CheckNewSpaceExpansionCriteria() {
1178   if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1179       survived_since_last_expansion_ > new_space_.Capacity() &&
1180       !new_space_high_promotion_mode_active_) {
1181     // Grow the size of new space if there is room to grow, enough data
1182     // has survived scavenge since the last expansion and we are not in
1183     // high promotion mode.
1184     new_space_.Grow();
1185     survived_since_last_expansion_ = 0;
1186   }
1187 }
1188
1189
1190 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1191   return heap->InNewSpace(*p) &&
1192       !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1193 }
1194
1195
1196 void Heap::ScavengeStoreBufferCallback(
1197     Heap* heap,
1198     MemoryChunk* page,
1199     StoreBufferEvent event) {
1200   heap->store_buffer_rebuilder_.Callback(page, event);
1201 }
1202
1203
1204 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1205   if (event == kStoreBufferStartScanningPagesEvent) {
1206     start_of_current_page_ = NULL;
1207     current_page_ = NULL;
1208   } else if (event == kStoreBufferScanningPageEvent) {
1209     if (current_page_ != NULL) {
1210       // If this page already overflowed the store buffer during this iteration.
1211       if (current_page_->scan_on_scavenge()) {
1212         // Then we should wipe out the entries that have been added for it.
1213         store_buffer_->SetTop(start_of_current_page_);
1214       } else if (store_buffer_->Top() - start_of_current_page_ >=
1215                  (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1216         // Did we find too many pointers in the previous page?  The heuristic is
1217         // that no page can take more then 1/5 the remaining slots in the store
1218         // buffer.
1219         current_page_->set_scan_on_scavenge(true);
1220         store_buffer_->SetTop(start_of_current_page_);
1221       } else {
1222         // In this case the page we scanned took a reasonable number of slots in
1223         // the store buffer.  It has now been rehabilitated and is no longer
1224         // marked scan_on_scavenge.
1225         ASSERT(!current_page_->scan_on_scavenge());
1226       }
1227     }
1228     start_of_current_page_ = store_buffer_->Top();
1229     current_page_ = page;
1230   } else if (event == kStoreBufferFullEvent) {
1231     // The current page overflowed the store buffer again.  Wipe out its entries
1232     // in the store buffer and mark it scan-on-scavenge again.  This may happen
1233     // several times while scanning.
1234     if (current_page_ == NULL) {
1235       // Store Buffer overflowed while scanning promoted objects.  These are not
1236       // in any particular page, though they are likely to be clustered by the
1237       // allocation routines.
1238       store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1239     } else {
1240       // Store Buffer overflowed while scanning a particular old space page for
1241       // pointers to new space.
1242       ASSERT(current_page_ == page);
1243       ASSERT(page != NULL);
1244       current_page_->set_scan_on_scavenge(true);
1245       ASSERT(start_of_current_page_ != store_buffer_->Top());
1246       store_buffer_->SetTop(start_of_current_page_);
1247     }
1248   } else {
1249     UNREACHABLE();
1250   }
1251 }
1252
1253
1254 void PromotionQueue::Initialize() {
1255   // Assumes that a NewSpacePage exactly fits a number of promotion queue
1256   // entries (where each is a pair of intptr_t). This allows us to simplify
1257   // the test fpr when to switch pages.
1258   ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1259          == 0);
1260   limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1261   front_ = rear_ =
1262       reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1263   emergency_stack_ = NULL;
1264   guard_ = false;
1265 }
1266
1267
1268 void PromotionQueue::RelocateQueueHead() {
1269   ASSERT(emergency_stack_ == NULL);
1270
1271   Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1272   intptr_t* head_start = rear_;
1273   intptr_t* head_end =
1274       Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1275
1276   int entries_count =
1277       static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1278
1279   emergency_stack_ = new List<Entry>(2 * entries_count);
1280
1281   while (head_start != head_end) {
1282     int size = static_cast<int>(*(head_start++));
1283     HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1284     emergency_stack_->Add(Entry(obj, size));
1285   }
1286   rear_ = head_end;
1287 }
1288
1289
1290 class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1291  public:
1292   explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1293
1294   virtual Object* RetainAs(Object* object) {
1295     if (!heap_->InFromSpace(object)) {
1296       return object;
1297     }
1298
1299     MapWord map_word = HeapObject::cast(object)->map_word();
1300     if (map_word.IsForwardingAddress()) {
1301       return map_word.ToForwardingAddress();
1302     }
1303     return NULL;
1304   }
1305
1306  private:
1307   Heap* heap_;
1308 };
1309
1310
1311 void Heap::Scavenge() {
1312   RelocationLock relocation_lock(this);
1313
1314 #ifdef VERIFY_HEAP
1315   if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
1316 #endif
1317
1318   gc_state_ = SCAVENGE;
1319
1320   // Implements Cheney's copying algorithm
1321   LOG(isolate_, ResourceEvent("scavenge", "begin"));
1322
1323   // Clear descriptor cache.
1324   isolate_->descriptor_lookup_cache()->Clear();
1325
1326   // Used for updating survived_since_last_expansion_ at function end.
1327   intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1328
1329   CheckNewSpaceExpansionCriteria();
1330
1331   SelectScavengingVisitorsTable();
1332
1333   incremental_marking()->PrepareForScavenge();
1334
1335   paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1336   paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1337
1338   // Flip the semispaces.  After flipping, to space is empty, from space has
1339   // live objects.
1340   new_space_.Flip();
1341   new_space_.ResetAllocationInfo();
1342
1343   // We need to sweep newly copied objects which can be either in the
1344   // to space or promoted to the old generation.  For to-space
1345   // objects, we treat the bottom of the to space as a queue.  Newly
1346   // copied and unswept objects lie between a 'front' mark and the
1347   // allocation pointer.
1348   //
1349   // Promoted objects can go into various old-generation spaces, and
1350   // can be allocated internally in the spaces (from the free list).
1351   // We treat the top of the to space as a queue of addresses of
1352   // promoted objects.  The addresses of newly promoted and unswept
1353   // objects lie between a 'front' mark and a 'rear' mark that is
1354   // updated as a side effect of promoting an object.
1355   //
1356   // There is guaranteed to be enough room at the top of the to space
1357   // for the addresses of promoted objects: every object promoted
1358   // frees up its size in bytes from the top of the new space, and
1359   // objects are at least one pointer in size.
1360   Address new_space_front = new_space_.ToSpaceStart();
1361   promotion_queue_.Initialize();
1362
1363 #ifdef DEBUG
1364   store_buffer()->Clean();
1365 #endif
1366
1367   ScavengeVisitor scavenge_visitor(this);
1368   // Copy roots.
1369   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1370
1371   // Copy objects reachable from the old generation.
1372   {
1373     StoreBufferRebuildScope scope(this,
1374                                   store_buffer(),
1375                                   &ScavengeStoreBufferCallback);
1376     store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1377   }
1378
1379   // Copy objects reachable from simple cells by scavenging cell values
1380   // directly.
1381   HeapObjectIterator cell_iterator(cell_space_);
1382   for (HeapObject* heap_object = cell_iterator.Next();
1383        heap_object != NULL;
1384        heap_object = cell_iterator.Next()) {
1385     if (heap_object->IsCell()) {
1386       Cell* cell = Cell::cast(heap_object);
1387       Address value_address = cell->ValueAddress();
1388       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1389     }
1390   }
1391
1392   // Copy objects reachable from global property cells by scavenging global
1393   // property cell values directly.
1394   HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1395   for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1396        heap_object != NULL;
1397        heap_object = js_global_property_cell_iterator.Next()) {
1398     if (heap_object->IsPropertyCell()) {
1399       PropertyCell* cell = PropertyCell::cast(heap_object);
1400       Address value_address = cell->ValueAddress();
1401       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1402       Address type_address = cell->TypeAddress();
1403       scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1404     }
1405   }
1406
1407   // Copy objects reachable from the code flushing candidates list.
1408   MarkCompactCollector* collector = mark_compact_collector();
1409   if (collector->is_code_flushing_enabled()) {
1410     collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1411   }
1412
1413   // Scavenge object reachable from the native contexts list directly.
1414   scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1415
1416   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1417
1418   while (isolate()->global_handles()->IterateObjectGroups(
1419       &scavenge_visitor, &IsUnscavengedHeapObject)) {
1420     new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1421   }
1422   isolate()->global_handles()->RemoveObjectGroups();
1423   isolate()->global_handles()->RemoveImplicitRefGroups();
1424
1425   isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1426       &IsUnscavengedHeapObject);
1427   isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1428       &scavenge_visitor);
1429   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1430
1431   UpdateNewSpaceReferencesInExternalStringTable(
1432       &UpdateNewSpaceReferenceInExternalStringTableEntry);
1433
1434   error_object_list_.UpdateReferencesInNewSpace(this);
1435
1436   promotion_queue_.Destroy();
1437
1438   if (!FLAG_watch_ic_patching) {
1439     isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1440   }
1441   incremental_marking()->UpdateMarkingDequeAfterScavenge();
1442
1443   ScavengeWeakObjectRetainer weak_object_retainer(this);
1444   ProcessWeakReferences(&weak_object_retainer);
1445
1446   ASSERT(new_space_front == new_space_.top());
1447
1448   // Set age mark.
1449   new_space_.set_age_mark(new_space_.top());
1450
1451   new_space_.LowerInlineAllocationLimit(
1452       new_space_.inline_allocation_limit_step());
1453
1454   // Update how much has survived scavenge.
1455   IncrementYoungSurvivorsCounter(static_cast<int>(
1456       (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1457
1458   LOG(isolate_, ResourceEvent("scavenge", "end"));
1459
1460   gc_state_ = NOT_IN_GC;
1461
1462   scavenges_since_last_idle_round_++;
1463 }
1464
1465
1466 String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1467                                                                 Object** p) {
1468   MapWord first_word = HeapObject::cast(*p)->map_word();
1469
1470   if (!first_word.IsForwardingAddress()) {
1471     // Unreachable external string can be finalized.
1472     heap->FinalizeExternalString(String::cast(*p));
1473     return NULL;
1474   }
1475
1476   // String is still reachable.
1477   return String::cast(first_word.ToForwardingAddress());
1478 }
1479
1480
1481 void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1482     ExternalStringTableUpdaterCallback updater_func) {
1483 #ifdef VERIFY_HEAP
1484   if (FLAG_verify_heap) {
1485     external_string_table_.Verify();
1486   }
1487 #endif
1488
1489   if (external_string_table_.new_space_strings_.is_empty()) return;
1490
1491   Object** start = &external_string_table_.new_space_strings_[0];
1492   Object** end = start + external_string_table_.new_space_strings_.length();
1493   Object** last = start;
1494
1495   for (Object** p = start; p < end; ++p) {
1496     ASSERT(InFromSpace(*p));
1497     String* target = updater_func(this, p);
1498
1499     if (target == NULL) continue;
1500
1501     ASSERT(target->IsExternalString());
1502
1503     if (InNewSpace(target)) {
1504       // String is still in new space.  Update the table entry.
1505       *last = target;
1506       ++last;
1507     } else {
1508       // String got promoted.  Move it to the old string list.
1509       external_string_table_.AddOldString(target);
1510     }
1511   }
1512
1513   ASSERT(last <= end);
1514   external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1515 }
1516
1517
1518 void Heap::UpdateReferencesInExternalStringTable(
1519     ExternalStringTableUpdaterCallback updater_func) {
1520
1521   // Update old space string references.
1522   if (external_string_table_.old_space_strings_.length() > 0) {
1523     Object** start = &external_string_table_.old_space_strings_[0];
1524     Object** end = start + external_string_table_.old_space_strings_.length();
1525     for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1526   }
1527
1528   UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1529 }
1530
1531
1532 template <class T>
1533 struct WeakListVisitor;
1534
1535
1536 template <class T>
1537 static Object* VisitWeakList(Heap* heap,
1538                              Object* list,
1539                              WeakObjectRetainer* retainer,
1540                              bool record_slots) {
1541   Object* undefined = heap->undefined_value();
1542   Object* head = undefined;
1543   T* tail = NULL;
1544   MarkCompactCollector* collector = heap->mark_compact_collector();
1545   while (list != undefined) {
1546     // Check whether to keep the candidate in the list.
1547     T* candidate = reinterpret_cast<T*>(list);
1548     Object* retained = retainer->RetainAs(list);
1549     if (retained != NULL) {
1550       if (head == undefined) {
1551         // First element in the list.
1552         head = retained;
1553       } else {
1554         // Subsequent elements in the list.
1555         ASSERT(tail != NULL);
1556         WeakListVisitor<T>::SetWeakNext(tail, retained);
1557         if (record_slots) {
1558           Object** next_slot =
1559             HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1560           collector->RecordSlot(next_slot, next_slot, retained);
1561         }
1562       }
1563       // Retained object is new tail.
1564       ASSERT(!retained->IsUndefined());
1565       candidate = reinterpret_cast<T*>(retained);
1566       tail = candidate;
1567
1568
1569       // tail is a live object, visit it.
1570       WeakListVisitor<T>::VisitLiveObject(
1571           heap, tail, retainer, record_slots);
1572     } else {
1573       WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1574     }
1575
1576     // Move to next element in the list.
1577     list = WeakListVisitor<T>::WeakNext(candidate);
1578   }
1579
1580   // Terminate the list if there is one or more elements.
1581   if (tail != NULL) {
1582     WeakListVisitor<T>::SetWeakNext(tail, undefined);
1583   }
1584   return head;
1585 }
1586
1587
1588 template<>
1589 struct WeakListVisitor<JSFunction> {
1590   static void SetWeakNext(JSFunction* function, Object* next) {
1591     function->set_next_function_link(next);
1592   }
1593
1594   static Object* WeakNext(JSFunction* function) {
1595     return function->next_function_link();
1596   }
1597
1598   static int WeakNextOffset() {
1599     return JSFunction::kNextFunctionLinkOffset;
1600   }
1601
1602   static void VisitLiveObject(Heap*, JSFunction*,
1603                               WeakObjectRetainer*, bool) {
1604   }
1605
1606   static void VisitPhantomObject(Heap*, JSFunction*) {
1607   }
1608 };
1609
1610
1611 template<>
1612 struct WeakListVisitor<Context> {
1613   static void SetWeakNext(Context* context, Object* next) {
1614     context->set(Context::NEXT_CONTEXT_LINK,
1615                  next,
1616                  UPDATE_WRITE_BARRIER);
1617   }
1618
1619   static Object* WeakNext(Context* context) {
1620     return context->get(Context::NEXT_CONTEXT_LINK);
1621   }
1622
1623   static void VisitLiveObject(Heap* heap,
1624                               Context* context,
1625                               WeakObjectRetainer* retainer,
1626                               bool record_slots) {
1627     // Process the weak list of optimized functions for the context.
1628     Object* function_list_head =
1629         VisitWeakList<JSFunction>(
1630             heap,
1631             context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1632             retainer,
1633             record_slots);
1634     context->set(Context::OPTIMIZED_FUNCTIONS_LIST,
1635                  function_list_head,
1636                  UPDATE_WRITE_BARRIER);
1637     if (record_slots) {
1638       Object** optimized_functions =
1639           HeapObject::RawField(
1640               context, FixedArray::SizeFor(Context::OPTIMIZED_FUNCTIONS_LIST));
1641       heap->mark_compact_collector()->RecordSlot(
1642           optimized_functions, optimized_functions, function_list_head);
1643     }
1644   }
1645
1646   static void VisitPhantomObject(Heap*, Context*) {
1647   }
1648
1649   static int WeakNextOffset() {
1650     return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1651   }
1652 };
1653
1654
1655 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1656   // We don't record weak slots during marking or scavenges.
1657   // Instead we do it once when we complete mark-compact cycle.
1658   // Note that write barrier has no effect if we are already in the middle of
1659   // compacting mark-sweep cycle and we have to record slots manually.
1660   bool record_slots =
1661       gc_state() == MARK_COMPACT &&
1662       mark_compact_collector()->is_compacting();
1663   ProcessArrayBuffers(retainer, record_slots);
1664   ProcessNativeContexts(retainer, record_slots);
1665 }
1666
1667 void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1668                                  bool record_slots) {
1669   Object* head =
1670       VisitWeakList<Context>(
1671           this, native_contexts_list(), retainer, record_slots);
1672   // Update the head of the list of contexts.
1673   native_contexts_list_ = head;
1674 }
1675
1676
1677 template<>
1678 struct WeakListVisitor<JSArrayBufferView> {
1679   static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1680     obj->set_weak_next(next);
1681   }
1682
1683   static Object* WeakNext(JSArrayBufferView* obj) {
1684     return obj->weak_next();
1685   }
1686
1687   static void VisitLiveObject(Heap*,
1688                               JSArrayBufferView* obj,
1689                               WeakObjectRetainer* retainer,
1690                               bool record_slots) {}
1691
1692   static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1693
1694   static int WeakNextOffset() {
1695     return JSArrayBufferView::kWeakNextOffset;
1696   }
1697 };
1698
1699
1700 template<>
1701 struct WeakListVisitor<JSArrayBuffer> {
1702   static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1703     obj->set_weak_next(next);
1704   }
1705
1706   static Object* WeakNext(JSArrayBuffer* obj) {
1707     return obj->weak_next();
1708   }
1709
1710   static void VisitLiveObject(Heap* heap,
1711                               JSArrayBuffer* array_buffer,
1712                               WeakObjectRetainer* retainer,
1713                               bool record_slots) {
1714     Object* typed_array_obj =
1715         VisitWeakList<JSArrayBufferView>(
1716             heap,
1717             array_buffer->weak_first_view(),
1718             retainer, record_slots);
1719     array_buffer->set_weak_first_view(typed_array_obj);
1720     if (typed_array_obj != heap->undefined_value() && record_slots) {
1721       Object** slot = HeapObject::RawField(
1722           array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1723       heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1724     }
1725   }
1726
1727   static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1728     Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1729   }
1730
1731   static int WeakNextOffset() {
1732     return JSArrayBuffer::kWeakNextOffset;
1733   }
1734 };
1735
1736
1737 void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1738                                bool record_slots) {
1739   Object* array_buffer_obj =
1740       VisitWeakList<JSArrayBuffer>(this,
1741                                    array_buffers_list(),
1742                                    retainer, record_slots);
1743   set_array_buffers_list(array_buffer_obj);
1744 }
1745
1746
1747 void Heap::TearDownArrayBuffers() {
1748   Object* undefined = undefined_value();
1749   for (Object* o = array_buffers_list(); o != undefined;) {
1750     JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1751     Runtime::FreeArrayBuffer(isolate(), buffer);
1752     o = buffer->weak_next();
1753   }
1754   array_buffers_list_ = undefined;
1755 }
1756
1757
1758 void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1759   DisallowHeapAllocation no_allocation;
1760
1761   // Both the external string table and the string table may contain
1762   // external strings, but neither lists them exhaustively, nor is the
1763   // intersection set empty.  Therefore we iterate over the external string
1764   // table first, ignoring internalized strings, and then over the
1765   // internalized string table.
1766
1767   class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1768    public:
1769     explicit ExternalStringTableVisitorAdapter(
1770         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1771     virtual void VisitPointers(Object** start, Object** end) {
1772       for (Object** p = start; p < end; p++) {
1773         // Visit non-internalized external strings,
1774         // since internalized strings are listed in the string table.
1775         if (!(*p)->IsInternalizedString()) {
1776           ASSERT((*p)->IsExternalString());
1777           visitor_->VisitExternalString(Utils::ToLocal(
1778               Handle<String>(String::cast(*p))));
1779         }
1780       }
1781     }
1782    private:
1783     v8::ExternalResourceVisitor* visitor_;
1784   } external_string_table_visitor(visitor);
1785
1786   external_string_table_.Iterate(&external_string_table_visitor);
1787
1788   class StringTableVisitorAdapter : public ObjectVisitor {
1789    public:
1790     explicit StringTableVisitorAdapter(
1791         v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1792     virtual void VisitPointers(Object** start, Object** end) {
1793       for (Object** p = start; p < end; p++) {
1794         if ((*p)->IsExternalString()) {
1795           ASSERT((*p)->IsInternalizedString());
1796           visitor_->VisitExternalString(Utils::ToLocal(
1797               Handle<String>(String::cast(*p))));
1798         }
1799       }
1800     }
1801    private:
1802     v8::ExternalResourceVisitor* visitor_;
1803   } string_table_visitor(visitor);
1804
1805   string_table()->IterateElements(&string_table_visitor);
1806 }
1807
1808
1809 class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1810  public:
1811   static inline void VisitPointer(Heap* heap, Object** p) {
1812     Object* object = *p;
1813     if (!heap->InNewSpace(object)) return;
1814     Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1815                          reinterpret_cast<HeapObject*>(object));
1816   }
1817 };
1818
1819
1820 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1821                          Address new_space_front) {
1822   do {
1823     SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1824     // The addresses new_space_front and new_space_.top() define a
1825     // queue of unprocessed copied objects.  Process them until the
1826     // queue is empty.
1827     while (new_space_front != new_space_.top()) {
1828       if (!NewSpacePage::IsAtEnd(new_space_front)) {
1829         HeapObject* object = HeapObject::FromAddress(new_space_front);
1830         new_space_front +=
1831           NewSpaceScavenger::IterateBody(object->map(), object);
1832       } else {
1833         new_space_front =
1834             NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1835       }
1836     }
1837
1838     // Promote and process all the to-be-promoted objects.
1839     {
1840       StoreBufferRebuildScope scope(this,
1841                                     store_buffer(),
1842                                     &ScavengeStoreBufferCallback);
1843       while (!promotion_queue()->is_empty()) {
1844         HeapObject* target;
1845         int size;
1846         promotion_queue()->remove(&target, &size);
1847
1848         // Promoted object might be already partially visited
1849         // during old space pointer iteration. Thus we search specificly
1850         // for pointers to from semispace instead of looking for pointers
1851         // to new space.
1852         ASSERT(!target->IsMap());
1853         IterateAndMarkPointersToFromSpace(target->address(),
1854                                           target->address() + size,
1855                                           &ScavengeObject);
1856       }
1857     }
1858
1859     // Take another spin if there are now unswept objects in new space
1860     // (there are currently no more unswept promoted objects).
1861   } while (new_space_front != new_space_.top());
1862
1863   return new_space_front;
1864 }
1865
1866
1867 STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1868
1869
1870 INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1871                                               HeapObject* object,
1872                                               int size));
1873
1874 static HeapObject* EnsureDoubleAligned(Heap* heap,
1875                                        HeapObject* object,
1876                                        int size) {
1877   if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
1878     heap->CreateFillerObjectAt(object->address(), kPointerSize);
1879     return HeapObject::FromAddress(object->address() + kPointerSize);
1880   } else {
1881     heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
1882                                kPointerSize);
1883     return object;
1884   }
1885 }
1886
1887
1888 enum LoggingAndProfiling {
1889   LOGGING_AND_PROFILING_ENABLED,
1890   LOGGING_AND_PROFILING_DISABLED
1891 };
1892
1893
1894 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1895
1896
1897 template<MarksHandling marks_handling,
1898          LoggingAndProfiling logging_and_profiling_mode>
1899 class ScavengingVisitor : public StaticVisitorBase {
1900  public:
1901   static void Initialize() {
1902     table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
1903     table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1904     table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1905     table_.Register(kVisitByteArray, &EvacuateByteArray);
1906     table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1907     table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1908
1909     table_.Register(kVisitNativeContext,
1910                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1911                         template VisitSpecialized<Context::kSize>);
1912
1913     table_.Register(kVisitConsString,
1914                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1915                         template VisitSpecialized<ConsString::kSize>);
1916
1917     table_.Register(kVisitSlicedString,
1918                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1919                         template VisitSpecialized<SlicedString::kSize>);
1920
1921     table_.Register(kVisitSymbol,
1922                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1923                         template VisitSpecialized<Symbol::kSize>);
1924
1925     table_.Register(kVisitSharedFunctionInfo,
1926                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1927                         template VisitSpecialized<SharedFunctionInfo::kSize>);
1928
1929     table_.Register(kVisitJSWeakMap,
1930                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1931                     Visit);
1932
1933     table_.Register(kVisitJSArrayBuffer,
1934                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1935                     Visit);
1936
1937     table_.Register(kVisitJSTypedArray,
1938                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1939                     Visit);
1940
1941     table_.Register(kVisitJSDataView,
1942                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1943                     Visit);
1944
1945     table_.Register(kVisitJSRegExp,
1946                     &ObjectEvacuationStrategy<POINTER_OBJECT>::
1947                     Visit);
1948
1949     if (marks_handling == IGNORE_MARKS) {
1950       table_.Register(kVisitJSFunction,
1951                       &ObjectEvacuationStrategy<POINTER_OBJECT>::
1952                           template VisitSpecialized<JSFunction::kSize>);
1953     } else {
1954       table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1955     }
1956
1957     table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1958                                    kVisitDataObject,
1959                                    kVisitDataObjectGeneric>();
1960
1961     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1962                                    kVisitJSObject,
1963                                    kVisitJSObjectGeneric>();
1964
1965     table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1966                                    kVisitStruct,
1967                                    kVisitStructGeneric>();
1968   }
1969
1970   static VisitorDispatchTable<ScavengingCallback>* GetTable() {
1971     return &table_;
1972   }
1973
1974  private:
1975   enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
1976   enum SizeRestriction { SMALL, UNKNOWN_SIZE };
1977
1978   static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
1979     bool should_record = false;
1980 #ifdef DEBUG
1981     should_record = FLAG_heap_stats;
1982 #endif
1983     should_record = should_record || FLAG_log_gc;
1984     if (should_record) {
1985       if (heap->new_space()->Contains(obj)) {
1986         heap->new_space()->RecordAllocation(obj);
1987       } else {
1988         heap->new_space()->RecordPromotion(obj);
1989       }
1990     }
1991   }
1992
1993   // Helper function used by CopyObject to copy a source object to an
1994   // allocated target object and update the forwarding pointer in the source
1995   // object.  Returns the target object.
1996   INLINE(static void MigrateObject(Heap* heap,
1997                                    HeapObject* source,
1998                                    HeapObject* target,
1999                                    int size)) {
2000     // Copy the content of source to target.
2001     heap->CopyBlock(target->address(), source->address(), size);
2002
2003     // Set the forwarding address.
2004     source->set_map_word(MapWord::FromForwardingAddress(target));
2005
2006     if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2007       // Update NewSpace stats if necessary.
2008       RecordCopiedObject(heap, target);
2009       HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
2010       Isolate* isolate = heap->isolate();
2011       if (isolate->logger()->is_logging_code_events() ||
2012           isolate->cpu_profiler()->is_profiling()) {
2013         if (target->IsSharedFunctionInfo()) {
2014           PROFILE(isolate, SharedFunctionInfoMoveEvent(
2015               source->address(), target->address()));
2016         }
2017       }
2018     }
2019
2020     if (marks_handling == TRANSFER_MARKS) {
2021       if (Marking::TransferColor(source, target)) {
2022         MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2023       }
2024     }
2025   }
2026
2027
2028   template<ObjectContents object_contents,
2029            SizeRestriction size_restriction,
2030            int alignment>
2031   static inline void EvacuateObject(Map* map,
2032                                     HeapObject** slot,
2033                                     HeapObject* object,
2034                                     int object_size) {
2035     SLOW_ASSERT((size_restriction != SMALL) ||
2036                 (object_size <= Page::kMaxNonCodeHeapObjectSize));
2037     SLOW_ASSERT(object->Size() == object_size);
2038
2039     int allocation_size = object_size;
2040     if (alignment != kObjectAlignment) {
2041       ASSERT(alignment == kDoubleAlignment);
2042       allocation_size += kPointerSize;
2043     }
2044
2045     Heap* heap = map->GetHeap();
2046     if (heap->ShouldBePromoted(object->address(), object_size)) {
2047       MaybeObject* maybe_result;
2048
2049       if ((size_restriction != SMALL) &&
2050           (allocation_size > Page::kMaxNonCodeHeapObjectSize)) {
2051         maybe_result = heap->lo_space()->AllocateRaw(allocation_size,
2052                                                      NOT_EXECUTABLE);
2053       } else {
2054         if (object_contents == DATA_OBJECT) {
2055           maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2056         } else {
2057           maybe_result =
2058               heap->old_pointer_space()->AllocateRaw(allocation_size);
2059         }
2060       }
2061
2062       Object* result = NULL;  // Initialization to please compiler.
2063       if (maybe_result->ToObject(&result)) {
2064         HeapObject* target = HeapObject::cast(result);
2065
2066         if (alignment != kObjectAlignment) {
2067           target = EnsureDoubleAligned(heap, target, allocation_size);
2068         }
2069
2070         // Order is important: slot might be inside of the target if target
2071         // was allocated over a dead object and slot comes from the store
2072         // buffer.
2073         *slot = target;
2074         MigrateObject(heap, object, target, object_size);
2075
2076         if (object_contents == POINTER_OBJECT) {
2077           if (map->instance_type() == JS_FUNCTION_TYPE) {
2078             heap->promotion_queue()->insert(
2079                 target, JSFunction::kNonWeakFieldsEndOffset);
2080           } else {
2081             heap->promotion_queue()->insert(target, object_size);
2082           }
2083         }
2084
2085         heap->tracer()->increment_promoted_objects_size(object_size);
2086         return;
2087       }
2088     }
2089     MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2090     heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2091     Object* result = allocation->ToObjectUnchecked();
2092     HeapObject* target = HeapObject::cast(result);
2093
2094     if (alignment != kObjectAlignment) {
2095       target = EnsureDoubleAligned(heap, target, allocation_size);
2096     }
2097
2098     // Order is important: slot might be inside of the target if target
2099     // was allocated over a dead object and slot comes from the store
2100     // buffer.
2101     *slot = target;
2102     MigrateObject(heap, object, target, object_size);
2103     return;
2104   }
2105
2106
2107   static inline void EvacuateJSFunction(Map* map,
2108                                         HeapObject** slot,
2109                                         HeapObject* object) {
2110     ObjectEvacuationStrategy<POINTER_OBJECT>::
2111         template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2112
2113     HeapObject* target = *slot;
2114     MarkBit mark_bit = Marking::MarkBitFrom(target);
2115     if (Marking::IsBlack(mark_bit)) {
2116       // This object is black and it might not be rescanned by marker.
2117       // We should explicitly record code entry slot for compaction because
2118       // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2119       // miss it as it is not HeapObject-tagged.
2120       Address code_entry_slot =
2121           target->address() + JSFunction::kCodeEntryOffset;
2122       Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2123       map->GetHeap()->mark_compact_collector()->
2124           RecordCodeEntrySlot(code_entry_slot, code);
2125     }
2126   }
2127
2128
2129   static inline void EvacuateFixedArray(Map* map,
2130                                         HeapObject** slot,
2131                                         HeapObject* object) {
2132     int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2133     EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(map,
2134                                                  slot,
2135                                                  object,
2136                                                  object_size);
2137   }
2138
2139
2140   static inline void EvacuateFixedDoubleArray(Map* map,
2141                                               HeapObject** slot,
2142                                               HeapObject* object) {
2143     int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2144     int object_size = FixedDoubleArray::SizeFor(length);
2145     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kDoubleAlignment>(
2146         map,
2147         slot,
2148         object,
2149         object_size);
2150   }
2151
2152
2153   static inline void EvacuateByteArray(Map* map,
2154                                        HeapObject** slot,
2155                                        HeapObject* object) {
2156     int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2157     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2158         map, slot, object, object_size);
2159   }
2160
2161
2162   static inline void EvacuateSeqOneByteString(Map* map,
2163                                             HeapObject** slot,
2164                                             HeapObject* object) {
2165     int object_size = SeqOneByteString::cast(object)->
2166         SeqOneByteStringSize(map->instance_type());
2167     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2168         map, slot, object, object_size);
2169   }
2170
2171
2172   static inline void EvacuateSeqTwoByteString(Map* map,
2173                                               HeapObject** slot,
2174                                               HeapObject* object) {
2175     int object_size = SeqTwoByteString::cast(object)->
2176         SeqTwoByteStringSize(map->instance_type());
2177     EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
2178         map, slot, object, object_size);
2179   }
2180
2181
2182   static inline bool IsShortcutCandidate(int type) {
2183     return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2184   }
2185
2186   static inline void EvacuateShortcutCandidate(Map* map,
2187                                                HeapObject** slot,
2188                                                HeapObject* object) {
2189     ASSERT(IsShortcutCandidate(map->instance_type()));
2190
2191     Heap* heap = map->GetHeap();
2192
2193     if (marks_handling == IGNORE_MARKS &&
2194         ConsString::cast(object)->unchecked_second() ==
2195         heap->empty_string()) {
2196       HeapObject* first =
2197           HeapObject::cast(ConsString::cast(object)->unchecked_first());
2198
2199       *slot = first;
2200
2201       if (!heap->InNewSpace(first)) {
2202         object->set_map_word(MapWord::FromForwardingAddress(first));
2203         return;
2204       }
2205
2206       MapWord first_word = first->map_word();
2207       if (first_word.IsForwardingAddress()) {
2208         HeapObject* target = first_word.ToForwardingAddress();
2209
2210         *slot = target;
2211         object->set_map_word(MapWord::FromForwardingAddress(target));
2212         return;
2213       }
2214
2215       heap->DoScavengeObject(first->map(), slot, first);
2216       object->set_map_word(MapWord::FromForwardingAddress(*slot));
2217       return;
2218     }
2219
2220     int object_size = ConsString::kSize;
2221     EvacuateObject<POINTER_OBJECT, SMALL, kObjectAlignment>(
2222         map, slot, object, object_size);
2223   }
2224
2225   template<ObjectContents object_contents>
2226   class ObjectEvacuationStrategy {
2227    public:
2228     template<int object_size>
2229     static inline void VisitSpecialized(Map* map,
2230                                         HeapObject** slot,
2231                                         HeapObject* object) {
2232       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2233           map, slot, object, object_size);
2234     }
2235
2236     static inline void Visit(Map* map,
2237                              HeapObject** slot,
2238                              HeapObject* object) {
2239       int object_size = map->instance_size();
2240       EvacuateObject<object_contents, SMALL, kObjectAlignment>(
2241           map, slot, object, object_size);
2242     }
2243   };
2244
2245   static VisitorDispatchTable<ScavengingCallback> table_;
2246 };
2247
2248
2249 template<MarksHandling marks_handling,
2250          LoggingAndProfiling logging_and_profiling_mode>
2251 VisitorDispatchTable<ScavengingCallback>
2252     ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2253
2254
2255 static void InitializeScavengingVisitorsTables() {
2256   ScavengingVisitor<TRANSFER_MARKS,
2257                     LOGGING_AND_PROFILING_DISABLED>::Initialize();
2258   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2259   ScavengingVisitor<TRANSFER_MARKS,
2260                     LOGGING_AND_PROFILING_ENABLED>::Initialize();
2261   ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2262 }
2263
2264
2265 void Heap::SelectScavengingVisitorsTable() {
2266   bool logging_and_profiling =
2267       isolate()->logger()->is_logging() ||
2268       isolate()->cpu_profiler()->is_profiling() ||
2269       (isolate()->heap_profiler() != NULL &&
2270        isolate()->heap_profiler()->is_profiling());
2271
2272   if (!incremental_marking()->IsMarking()) {
2273     if (!logging_and_profiling) {
2274       scavenging_visitors_table_.CopyFrom(
2275           ScavengingVisitor<IGNORE_MARKS,
2276                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2277     } else {
2278       scavenging_visitors_table_.CopyFrom(
2279           ScavengingVisitor<IGNORE_MARKS,
2280                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2281     }
2282   } else {
2283     if (!logging_and_profiling) {
2284       scavenging_visitors_table_.CopyFrom(
2285           ScavengingVisitor<TRANSFER_MARKS,
2286                             LOGGING_AND_PROFILING_DISABLED>::GetTable());
2287     } else {
2288       scavenging_visitors_table_.CopyFrom(
2289           ScavengingVisitor<TRANSFER_MARKS,
2290                             LOGGING_AND_PROFILING_ENABLED>::GetTable());
2291     }
2292
2293     if (incremental_marking()->IsCompacting()) {
2294       // When compacting forbid short-circuiting of cons-strings.
2295       // Scavenging code relies on the fact that new space object
2296       // can't be evacuated into evacuation candidate but
2297       // short-circuiting violates this assumption.
2298       scavenging_visitors_table_.Register(
2299           StaticVisitorBase::kVisitShortcutCandidate,
2300           scavenging_visitors_table_.GetVisitorById(
2301               StaticVisitorBase::kVisitConsString));
2302     }
2303   }
2304 }
2305
2306
2307 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2308   SLOW_ASSERT(HEAP->InFromSpace(object));
2309   MapWord first_word = object->map_word();
2310   SLOW_ASSERT(!first_word.IsForwardingAddress());
2311   Map* map = first_word.ToMap();
2312   map->GetHeap()->DoScavengeObject(map, p, object);
2313 }
2314
2315
2316 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2317                                       int instance_size) {
2318   Object* result;
2319   MaybeObject* maybe_result = AllocateRawMap();
2320   if (!maybe_result->ToObject(&result)) return maybe_result;
2321
2322   // Map::cast cannot be used due to uninitialized map field.
2323   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2324   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2325   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2326   reinterpret_cast<Map*>(result)->set_visitor_id(
2327         StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2328   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2329   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2330   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2331   reinterpret_cast<Map*>(result)->set_bit_field(0);
2332   reinterpret_cast<Map*>(result)->set_bit_field2(0);
2333   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2334                    Map::OwnsDescriptors::encode(true);
2335   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2336   return result;
2337 }
2338
2339
2340 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2341                                int instance_size,
2342                                ElementsKind elements_kind) {
2343   Object* result;
2344   MaybeObject* maybe_result = AllocateRawMap();
2345   if (!maybe_result->To(&result)) return maybe_result;
2346
2347   Map* map = reinterpret_cast<Map*>(result);
2348   map->set_map_no_write_barrier(meta_map());
2349   map->set_instance_type(instance_type);
2350   map->set_visitor_id(
2351       StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2352   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2353   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2354   map->set_instance_size(instance_size);
2355   map->set_inobject_properties(0);
2356   map->set_pre_allocated_property_fields(0);
2357   map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2358   map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2359                           SKIP_WRITE_BARRIER);
2360   map->init_back_pointer(undefined_value());
2361   map->set_unused_property_fields(0);
2362   map->set_instance_descriptors(empty_descriptor_array());
2363   map->set_bit_field(0);
2364   map->set_bit_field2(1 << Map::kIsExtensible);
2365   int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2366                    Map::OwnsDescriptors::encode(true);
2367   map->set_bit_field3(bit_field3);
2368   map->set_elements_kind(elements_kind);
2369
2370   return map;
2371 }
2372
2373
2374 MaybeObject* Heap::AllocateCodeCache() {
2375   CodeCache* code_cache;
2376   { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2377     if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2378   }
2379   code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2380   code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2381   return code_cache;
2382 }
2383
2384
2385 MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2386   return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2387 }
2388
2389
2390 MaybeObject* Heap::AllocateAccessorPair() {
2391   AccessorPair* accessors;
2392   { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2393     if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2394   }
2395   accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2396   accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2397   return accessors;
2398 }
2399
2400
2401 MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2402   TypeFeedbackInfo* info;
2403   { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2404     if (!maybe_info->To(&info)) return maybe_info;
2405   }
2406   info->initialize_storage();
2407   info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2408                                 SKIP_WRITE_BARRIER);
2409   return info;
2410 }
2411
2412
2413 MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2414   AliasedArgumentsEntry* entry;
2415   { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2416     if (!maybe_entry->To(&entry)) return maybe_entry;
2417   }
2418   entry->set_aliased_context_slot(aliased_context_slot);
2419   return entry;
2420 }
2421
2422
2423 const Heap::StringTypeTable Heap::string_type_table[] = {
2424 #define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2425   {type, size, k##camel_name##MapRootIndex},
2426   STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2427 #undef STRING_TYPE_ELEMENT
2428 };
2429
2430
2431 const Heap::ConstantStringTable Heap::constant_string_table[] = {
2432 #define CONSTANT_STRING_ELEMENT(name, contents)                                \
2433   {contents, k##name##RootIndex},
2434   INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2435 #undef CONSTANT_STRING_ELEMENT
2436 };
2437
2438
2439 const Heap::StructTable Heap::struct_table[] = {
2440 #define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2441   { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2442   STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2443 #undef STRUCT_TABLE_ELEMENT
2444 };
2445
2446
2447 bool Heap::CreateInitialMaps() {
2448   Object* obj;
2449   { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2450     if (!maybe_obj->ToObject(&obj)) return false;
2451   }
2452   // Map::cast cannot be used due to uninitialized map field.
2453   Map* new_meta_map = reinterpret_cast<Map*>(obj);
2454   set_meta_map(new_meta_map);
2455   new_meta_map->set_map(new_meta_map);
2456
2457   { MaybeObject* maybe_obj =
2458         AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2459     if (!maybe_obj->ToObject(&obj)) return false;
2460   }
2461   set_fixed_array_map(Map::cast(obj));
2462
2463   { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2464     if (!maybe_obj->ToObject(&obj)) return false;
2465   }
2466   set_oddball_map(Map::cast(obj));
2467
2468   // Allocate the empty array.
2469   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2470     if (!maybe_obj->ToObject(&obj)) return false;
2471   }
2472   set_empty_fixed_array(FixedArray::cast(obj));
2473
2474   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2475     if (!maybe_obj->ToObject(&obj)) return false;
2476   }
2477   set_null_value(Oddball::cast(obj));
2478   Oddball::cast(obj)->set_kind(Oddball::kNull);
2479
2480   { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2481     if (!maybe_obj->ToObject(&obj)) return false;
2482   }
2483   set_undefined_value(Oddball::cast(obj));
2484   Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2485   ASSERT(!InNewSpace(undefined_value()));
2486
2487   // Allocate the empty descriptor array.
2488   { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2489     if (!maybe_obj->ToObject(&obj)) return false;
2490   }
2491   set_empty_descriptor_array(DescriptorArray::cast(obj));
2492
2493   // Fix the instance_descriptors for the existing maps.
2494   meta_map()->set_code_cache(empty_fixed_array());
2495   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2496   meta_map()->init_back_pointer(undefined_value());
2497   meta_map()->set_instance_descriptors(empty_descriptor_array());
2498
2499   fixed_array_map()->set_code_cache(empty_fixed_array());
2500   fixed_array_map()->set_dependent_code(
2501       DependentCode::cast(empty_fixed_array()));
2502   fixed_array_map()->init_back_pointer(undefined_value());
2503   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2504
2505   oddball_map()->set_code_cache(empty_fixed_array());
2506   oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2507   oddball_map()->init_back_pointer(undefined_value());
2508   oddball_map()->set_instance_descriptors(empty_descriptor_array());
2509
2510   // Fix prototype object for existing maps.
2511   meta_map()->set_prototype(null_value());
2512   meta_map()->set_constructor(null_value());
2513
2514   fixed_array_map()->set_prototype(null_value());
2515   fixed_array_map()->set_constructor(null_value());
2516
2517   oddball_map()->set_prototype(null_value());
2518   oddball_map()->set_constructor(null_value());
2519
2520   { MaybeObject* maybe_obj =
2521         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2522     if (!maybe_obj->ToObject(&obj)) return false;
2523   }
2524   set_fixed_cow_array_map(Map::cast(obj));
2525   ASSERT(fixed_array_map() != fixed_cow_array_map());
2526
2527   { MaybeObject* maybe_obj =
2528         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2529     if (!maybe_obj->ToObject(&obj)) return false;
2530   }
2531   set_scope_info_map(Map::cast(obj));
2532
2533   { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2534     if (!maybe_obj->ToObject(&obj)) return false;
2535   }
2536   set_heap_number_map(Map::cast(obj));
2537
2538   { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2539     if (!maybe_obj->ToObject(&obj)) return false;
2540   }
2541   set_symbol_map(Map::cast(obj));
2542
2543   { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2544     if (!maybe_obj->ToObject(&obj)) return false;
2545   }
2546   set_foreign_map(Map::cast(obj));
2547
2548   for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2549     const StringTypeTable& entry = string_type_table[i];
2550     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2551       if (!maybe_obj->ToObject(&obj)) return false;
2552     }
2553     roots_[entry.index] = Map::cast(obj);
2554   }
2555
2556   { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2557     if (!maybe_obj->ToObject(&obj)) return false;
2558   }
2559   set_undetectable_string_map(Map::cast(obj));
2560   Map::cast(obj)->set_is_undetectable();
2561
2562   { MaybeObject* maybe_obj =
2563         AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2564     if (!maybe_obj->ToObject(&obj)) return false;
2565   }
2566   set_undetectable_ascii_string_map(Map::cast(obj));
2567   Map::cast(obj)->set_is_undetectable();
2568
2569   { MaybeObject* maybe_obj =
2570         AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2571     if (!maybe_obj->ToObject(&obj)) return false;
2572   }
2573   set_fixed_double_array_map(Map::cast(obj));
2574
2575   { MaybeObject* maybe_obj =
2576         AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2577     if (!maybe_obj->ToObject(&obj)) return false;
2578   }
2579   set_byte_array_map(Map::cast(obj));
2580
2581   { MaybeObject* maybe_obj =
2582         AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2583     if (!maybe_obj->ToObject(&obj)) return false;
2584   }
2585   set_free_space_map(Map::cast(obj));
2586
2587   { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2588     if (!maybe_obj->ToObject(&obj)) return false;
2589   }
2590   set_empty_byte_array(ByteArray::cast(obj));
2591
2592   { MaybeObject* maybe_obj =
2593         AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2594     if (!maybe_obj->ToObject(&obj)) return false;
2595   }
2596   set_external_pixel_array_map(Map::cast(obj));
2597
2598   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2599                                          ExternalArray::kAlignedSize);
2600     if (!maybe_obj->ToObject(&obj)) return false;
2601   }
2602   set_external_byte_array_map(Map::cast(obj));
2603
2604   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2605                                          ExternalArray::kAlignedSize);
2606     if (!maybe_obj->ToObject(&obj)) return false;
2607   }
2608   set_external_unsigned_byte_array_map(Map::cast(obj));
2609
2610   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2611                                          ExternalArray::kAlignedSize);
2612     if (!maybe_obj->ToObject(&obj)) return false;
2613   }
2614   set_external_short_array_map(Map::cast(obj));
2615
2616   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2617                                          ExternalArray::kAlignedSize);
2618     if (!maybe_obj->ToObject(&obj)) return false;
2619   }
2620   set_external_unsigned_short_array_map(Map::cast(obj));
2621
2622   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2623                                          ExternalArray::kAlignedSize);
2624     if (!maybe_obj->ToObject(&obj)) return false;
2625   }
2626   set_external_int_array_map(Map::cast(obj));
2627
2628   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2629                                          ExternalArray::kAlignedSize);
2630     if (!maybe_obj->ToObject(&obj)) return false;
2631   }
2632   set_external_unsigned_int_array_map(Map::cast(obj));
2633
2634   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2635                                          ExternalArray::kAlignedSize);
2636     if (!maybe_obj->ToObject(&obj)) return false;
2637   }
2638   set_external_float_array_map(Map::cast(obj));
2639
2640   { MaybeObject* maybe_obj =
2641         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2642     if (!maybe_obj->ToObject(&obj)) return false;
2643   }
2644   set_non_strict_arguments_elements_map(Map::cast(obj));
2645
2646   { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2647                                          ExternalArray::kAlignedSize);
2648     if (!maybe_obj->ToObject(&obj)) return false;
2649   }
2650   set_external_double_array_map(Map::cast(obj));
2651
2652   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2653     if (!maybe_obj->ToObject(&obj)) return false;
2654   }
2655   set_empty_external_byte_array(ExternalArray::cast(obj));
2656
2657   { MaybeObject* maybe_obj =
2658         AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2659     if (!maybe_obj->ToObject(&obj)) return false;
2660   }
2661   set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2662
2663   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2664     if (!maybe_obj->ToObject(&obj)) return false;
2665   }
2666   set_empty_external_short_array(ExternalArray::cast(obj));
2667
2668   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2669       kExternalUnsignedShortArray);
2670     if (!maybe_obj->ToObject(&obj)) return false;
2671   }
2672   set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2673
2674   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2675     if (!maybe_obj->ToObject(&obj)) return false;
2676   }
2677   set_empty_external_int_array(ExternalArray::cast(obj));
2678
2679   { MaybeObject* maybe_obj =
2680         AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2681     if (!maybe_obj->ToObject(&obj)) return false;
2682   }
2683   set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2684
2685   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2686     if (!maybe_obj->ToObject(&obj)) return false;
2687   }
2688   set_empty_external_float_array(ExternalArray::cast(obj));
2689
2690   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2691     if (!maybe_obj->ToObject(&obj)) return false;
2692   }
2693   set_empty_external_double_array(ExternalArray::cast(obj));
2694
2695   { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2696     if (!maybe_obj->ToObject(&obj)) return false;
2697   }
2698   set_empty_external_pixel_array(ExternalArray::cast(obj));
2699
2700   { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2701     if (!maybe_obj->ToObject(&obj)) return false;
2702   }
2703   set_code_map(Map::cast(obj));
2704
2705   { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2706     if (!maybe_obj->ToObject(&obj)) return false;
2707   }
2708   set_cell_map(Map::cast(obj));
2709
2710   { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2711                                          PropertyCell::kSize);
2712     if (!maybe_obj->ToObject(&obj)) return false;
2713   }
2714   set_global_property_cell_map(Map::cast(obj));
2715
2716   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2717     if (!maybe_obj->ToObject(&obj)) return false;
2718   }
2719   set_one_pointer_filler_map(Map::cast(obj));
2720
2721   { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2722     if (!maybe_obj->ToObject(&obj)) return false;
2723   }
2724   set_two_pointer_filler_map(Map::cast(obj));
2725
2726   for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2727     const StructTable& entry = struct_table[i];
2728     { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2729       if (!maybe_obj->ToObject(&obj)) return false;
2730     }
2731     roots_[entry.index] = Map::cast(obj);
2732   }
2733
2734   { MaybeObject* maybe_obj =
2735         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2736     if (!maybe_obj->ToObject(&obj)) return false;
2737   }
2738   set_hash_table_map(Map::cast(obj));
2739
2740   { MaybeObject* maybe_obj =
2741         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2742     if (!maybe_obj->ToObject(&obj)) return false;
2743   }
2744   set_function_context_map(Map::cast(obj));
2745
2746   { MaybeObject* maybe_obj =
2747         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2748     if (!maybe_obj->ToObject(&obj)) return false;
2749   }
2750   set_catch_context_map(Map::cast(obj));
2751
2752   { MaybeObject* maybe_obj =
2753         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2754     if (!maybe_obj->ToObject(&obj)) return false;
2755   }
2756   set_with_context_map(Map::cast(obj));
2757
2758   { MaybeObject* maybe_obj =
2759         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2760     if (!maybe_obj->ToObject(&obj)) return false;
2761   }
2762   set_block_context_map(Map::cast(obj));
2763
2764   { MaybeObject* maybe_obj =
2765         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2766     if (!maybe_obj->ToObject(&obj)) return false;
2767   }
2768   set_module_context_map(Map::cast(obj));
2769
2770   { MaybeObject* maybe_obj =
2771         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2772     if (!maybe_obj->ToObject(&obj)) return false;
2773   }
2774   set_global_context_map(Map::cast(obj));
2775
2776   { MaybeObject* maybe_obj =
2777         AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2778     if (!maybe_obj->ToObject(&obj)) return false;
2779   }
2780   Map* native_context_map = Map::cast(obj);
2781   native_context_map->set_dictionary_map(true);
2782   native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2783   set_native_context_map(native_context_map);
2784
2785   { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2786                                          SharedFunctionInfo::kAlignedSize);
2787     if (!maybe_obj->ToObject(&obj)) return false;
2788   }
2789   set_shared_function_info_map(Map::cast(obj));
2790
2791   { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2792                                          JSMessageObject::kSize);
2793     if (!maybe_obj->ToObject(&obj)) return false;
2794   }
2795   set_message_object_map(Map::cast(obj));
2796
2797   Map* external_map;
2798   { MaybeObject* maybe_obj =
2799         AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2800     if (!maybe_obj->To(&external_map)) return false;
2801   }
2802   external_map->set_is_extensible(false);
2803   set_external_map(external_map);
2804
2805   ASSERT(!InNewSpace(empty_fixed_array()));
2806   return true;
2807 }
2808
2809
2810 MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2811   // Statically ensure that it is safe to allocate heap numbers in paged
2812   // spaces.
2813   STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2814   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2815
2816   Object* result;
2817   { MaybeObject* maybe_result =
2818         AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
2819     if (!maybe_result->ToObject(&result)) return maybe_result;
2820   }
2821
2822   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2823   HeapNumber::cast(result)->set_value(value);
2824   return result;
2825 }
2826
2827
2828 MaybeObject* Heap::AllocateHeapNumber(double value) {
2829   // Use general version, if we're forced to always allocate.
2830   if (always_allocate()) return AllocateHeapNumber(value, TENURED);
2831
2832   // This version of AllocateHeapNumber is optimized for
2833   // allocation in new space.
2834   STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxNonCodeHeapObjectSize);
2835   Object* result;
2836   { MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
2837     if (!maybe_result->ToObject(&result)) return maybe_result;
2838   }
2839   HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2840   HeapNumber::cast(result)->set_value(value);
2841   return result;
2842 }
2843
2844
2845 MaybeObject* Heap::AllocateCell(Object* value) {
2846   Object* result;
2847   { MaybeObject* maybe_result = AllocateRawCell();
2848     if (!maybe_result->ToObject(&result)) return maybe_result;
2849   }
2850   HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2851   Cell::cast(result)->set_value(value);
2852   return result;
2853 }
2854
2855
2856 MaybeObject* Heap::AllocatePropertyCell(Object* value) {
2857   Object* result;
2858   { MaybeObject* maybe_result = AllocateRawPropertyCell();
2859     if (!maybe_result->ToObject(&result)) return maybe_result;
2860   }
2861   HeapObject::cast(result)->set_map_no_write_barrier(
2862       global_property_cell_map());
2863   PropertyCell* cell = PropertyCell::cast(result);
2864   cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2865                            SKIP_WRITE_BARRIER);
2866   cell->set_value(value);
2867   cell->set_type(Type::None());
2868   return result;
2869 }
2870
2871
2872 MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2873   Box* result;
2874   MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2875   if (!maybe_result->To(&result)) return maybe_result;
2876   result->set_value(value);
2877   return result;
2878 }
2879
2880
2881 MaybeObject* Heap::CreateOddball(const char* to_string,
2882                                  Object* to_number,
2883                                  byte kind) {
2884   Object* result;
2885   { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2886     if (!maybe_result->ToObject(&result)) return maybe_result;
2887   }
2888   return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2889 }
2890
2891
2892 bool Heap::CreateApiObjects() {
2893   Object* obj;
2894
2895   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2896     if (!maybe_obj->ToObject(&obj)) return false;
2897   }
2898   // Don't use Smi-only elements optimizations for objects with the neander
2899   // map. There are too many cases where element values are set directly with a
2900   // bottleneck to trap the Smi-only -> fast elements transition, and there
2901   // appears to be no benefit for optimize this case.
2902   Map* new_neander_map = Map::cast(obj);
2903   new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
2904   set_neander_map(new_neander_map);
2905
2906   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2907     if (!maybe_obj->ToObject(&obj)) return false;
2908   }
2909   Object* elements;
2910   { MaybeObject* maybe_elements = AllocateFixedArray(2);
2911     if (!maybe_elements->ToObject(&elements)) return false;
2912   }
2913   FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2914   JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
2915   set_message_listeners(JSObject::cast(obj));
2916
2917   return true;
2918 }
2919
2920
2921 void Heap::CreateJSEntryStub() {
2922   JSEntryStub stub;
2923   set_js_entry_code(*stub.GetCode(isolate()));
2924 }
2925
2926
2927 void Heap::CreateJSConstructEntryStub() {
2928   JSConstructEntryStub stub;
2929   set_js_construct_entry_code(*stub.GetCode(isolate()));
2930 }
2931
2932
2933 void Heap::CreateFixedStubs() {
2934   // Here we create roots for fixed stubs. They are needed at GC
2935   // for cooking and uncooking (check out frames.cc).
2936   // The eliminates the need for doing dictionary lookup in the
2937   // stub cache for these stubs.
2938   HandleScope scope(isolate());
2939   // gcc-4.4 has problem generating correct code of following snippet:
2940   // {  JSEntryStub stub;
2941   //    js_entry_code_ = *stub.GetCode();
2942   // }
2943   // {  JSConstructEntryStub stub;
2944   //    js_construct_entry_code_ = *stub.GetCode();
2945   // }
2946   // To workaround the problem, make separate functions without inlining.
2947   Heap::CreateJSEntryStub();
2948   Heap::CreateJSConstructEntryStub();
2949
2950   // Create stubs that should be there, so we don't unexpectedly have to
2951   // create them if we need them during the creation of another stub.
2952   // Stub creation mixes raw pointers and handles in an unsafe manner so
2953   // we cannot create stubs while we are creating stubs.
2954   CodeStub::GenerateStubsAheadOfTime(isolate());
2955 }
2956
2957
2958 bool Heap::CreateInitialObjects() {
2959   Object* obj;
2960
2961   // The -0 value must be set before NumberFromDouble works.
2962   { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2963     if (!maybe_obj->ToObject(&obj)) return false;
2964   }
2965   set_minus_zero_value(HeapNumber::cast(obj));
2966   ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
2967
2968   { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2969     if (!maybe_obj->ToObject(&obj)) return false;
2970   }
2971   set_nan_value(HeapNumber::cast(obj));
2972
2973   { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2974     if (!maybe_obj->ToObject(&obj)) return false;
2975   }
2976   set_infinity_value(HeapNumber::cast(obj));
2977
2978   // The hole has not been created yet, but we want to put something
2979   // predictable in the gaps in the string table, so lets make that Smi zero.
2980   set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
2981
2982   // Allocate initial string table.
2983   { MaybeObject* maybe_obj =
2984         StringTable::Allocate(this, kInitialStringTableSize);
2985     if (!maybe_obj->ToObject(&obj)) return false;
2986   }
2987   // Don't use set_string_table() due to asserts.
2988   roots_[kStringTableRootIndex] = obj;
2989
2990   // Finish initializing oddballs after creating the string table.
2991   { MaybeObject* maybe_obj =
2992         undefined_value()->Initialize("undefined",
2993                                       nan_value(),
2994                                       Oddball::kUndefined);
2995     if (!maybe_obj->ToObject(&obj)) return false;
2996   }
2997
2998   // Initialize the null_value.
2999   { MaybeObject* maybe_obj =
3000         null_value()->Initialize("null", Smi::FromInt(0), Oddball::kNull);
3001     if (!maybe_obj->ToObject(&obj)) return false;
3002   }
3003
3004   { MaybeObject* maybe_obj = CreateOddball("true",
3005                                            Smi::FromInt(1),
3006                                            Oddball::kTrue);
3007     if (!maybe_obj->ToObject(&obj)) return false;
3008   }
3009   set_true_value(Oddball::cast(obj));
3010
3011   { MaybeObject* maybe_obj = CreateOddball("false",
3012                                            Smi::FromInt(0),
3013                                            Oddball::kFalse);
3014     if (!maybe_obj->ToObject(&obj)) return false;
3015   }
3016   set_false_value(Oddball::cast(obj));
3017
3018   { MaybeObject* maybe_obj = CreateOddball("hole",
3019                                            Smi::FromInt(-1),
3020                                            Oddball::kTheHole);
3021     if (!maybe_obj->ToObject(&obj)) return false;
3022   }
3023   set_the_hole_value(Oddball::cast(obj));
3024
3025   { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3026                                            Smi::FromInt(-1),
3027                                            Oddball::kUninitialized);
3028     if (!maybe_obj->ToObject(&obj)) return false;
3029   }
3030   set_uninitialized_value(Oddball::cast(obj));
3031
3032   { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3033                                            Smi::FromInt(-4),
3034                                            Oddball::kArgumentMarker);
3035     if (!maybe_obj->ToObject(&obj)) return false;
3036   }
3037   set_arguments_marker(Oddball::cast(obj));
3038
3039   { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3040                                            Smi::FromInt(-2),
3041                                            Oddball::kOther);
3042     if (!maybe_obj->ToObject(&obj)) return false;
3043   }
3044   set_no_interceptor_result_sentinel(obj);
3045
3046   { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3047                                            Smi::FromInt(-3),
3048                                            Oddball::kOther);
3049     if (!maybe_obj->ToObject(&obj)) return false;
3050   }
3051   set_termination_exception(obj);
3052
3053   for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3054     { MaybeObject* maybe_obj =
3055           InternalizeUtf8String(constant_string_table[i].contents);
3056       if (!maybe_obj->ToObject(&obj)) return false;
3057     }
3058     roots_[constant_string_table[i].index] = String::cast(obj);
3059   }
3060
3061   // Allocate the hidden string which is used to identify the hidden properties
3062   // in JSObjects. The hash code has a special value so that it will not match
3063   // the empty string when searching for the property. It cannot be part of the
3064   // loop above because it needs to be allocated manually with the special
3065   // hash code in place. The hash code for the hidden_string is zero to ensure
3066   // that it will always be at the first entry in property descriptors.
3067   { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3068       OneByteVector("", 0), String::kEmptyStringHash);
3069     if (!maybe_obj->ToObject(&obj)) return false;
3070   }
3071   hidden_string_ = String::cast(obj);
3072
3073   // Allocate the code_stubs dictionary. The initial size is set to avoid
3074   // expanding the dictionary during bootstrapping.
3075   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3076     if (!maybe_obj->ToObject(&obj)) return false;
3077   }
3078   set_code_stubs(UnseededNumberDictionary::cast(obj));
3079
3080
3081   // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3082   // is set to avoid expanding the dictionary during bootstrapping.
3083   { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3084     if (!maybe_obj->ToObject(&obj)) return false;
3085   }
3086   set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3087
3088   { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3089     if (!maybe_obj->ToObject(&obj)) return false;
3090   }
3091   set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3092
3093   set_instanceof_cache_function(Smi::FromInt(0));
3094   set_instanceof_cache_map(Smi::FromInt(0));
3095   set_instanceof_cache_answer(Smi::FromInt(0));
3096
3097   CreateFixedStubs();
3098
3099   // Allocate the dictionary of intrinsic function names.
3100   { MaybeObject* maybe_obj =
3101         NameDictionary::Allocate(this, Runtime::kNumFunctions);
3102     if (!maybe_obj->ToObject(&obj)) return false;
3103   }
3104   { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3105                                                                        obj);
3106     if (!maybe_obj->ToObject(&obj)) return false;
3107   }
3108   set_intrinsic_function_names(NameDictionary::cast(obj));
3109
3110   { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3111     if (!maybe_obj->ToObject(&obj)) return false;
3112   }
3113   set_number_string_cache(FixedArray::cast(obj));
3114
3115   // Allocate cache for single character one byte strings.
3116   { MaybeObject* maybe_obj =
3117         AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3118     if (!maybe_obj->ToObject(&obj)) return false;
3119   }
3120   set_single_character_string_cache(FixedArray::cast(obj));
3121
3122   // Allocate cache for string split.
3123   { MaybeObject* maybe_obj = AllocateFixedArray(
3124       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3125     if (!maybe_obj->ToObject(&obj)) return false;
3126   }
3127   set_string_split_cache(FixedArray::cast(obj));
3128
3129   { MaybeObject* maybe_obj = AllocateFixedArray(
3130       RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3131     if (!maybe_obj->ToObject(&obj)) return false;
3132   }
3133   set_regexp_multiple_cache(FixedArray::cast(obj));
3134
3135   // Allocate cache for external strings pointing to native source code.
3136   { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3137     if (!maybe_obj->ToObject(&obj)) return false;
3138   }
3139   set_natives_source_cache(FixedArray::cast(obj));
3140
3141   // Allocate object to hold object observation state.
3142   { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3143     if (!maybe_obj->ToObject(&obj)) return false;
3144   }
3145   { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3146     if (!maybe_obj->ToObject(&obj)) return false;
3147   }
3148   set_observation_state(JSObject::cast(obj));
3149
3150   { MaybeObject* maybe_obj = AllocateSymbol();
3151     if (!maybe_obj->ToObject(&obj)) return false;
3152   }
3153   set_frozen_symbol(Symbol::cast(obj));
3154
3155   { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3156     if (!maybe_obj->ToObject(&obj)) return false;
3157   }
3158   SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3159   set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3160
3161   // Handling of script id generation is in Factory::NewScript.
3162   set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3163
3164   // Initialize keyed lookup cache.
3165   isolate_->keyed_lookup_cache()->Clear();
3166
3167   // Initialize context slot cache.
3168   isolate_->context_slot_cache()->Clear();
3169
3170   // Initialize descriptor cache.
3171   isolate_->descriptor_lookup_cache()->Clear();
3172
3173   // Initialize compilation cache.
3174   isolate_->compilation_cache()->Clear();
3175
3176   return true;
3177 }
3178
3179
3180 bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3181   RootListIndex writable_roots[] = {
3182     kStoreBufferTopRootIndex,
3183     kStackLimitRootIndex,
3184     kNumberStringCacheRootIndex,
3185     kInstanceofCacheFunctionRootIndex,
3186     kInstanceofCacheMapRootIndex,
3187     kInstanceofCacheAnswerRootIndex,
3188     kCodeStubsRootIndex,
3189     kNonMonomorphicCacheRootIndex,
3190     kPolymorphicCodeCacheRootIndex,
3191     kLastScriptIdRootIndex,
3192     kEmptyScriptRootIndex,
3193     kRealStackLimitRootIndex,
3194     kArgumentsAdaptorDeoptPCOffsetRootIndex,
3195     kConstructStubDeoptPCOffsetRootIndex,
3196     kGetterStubDeoptPCOffsetRootIndex,
3197     kSetterStubDeoptPCOffsetRootIndex,
3198     kStringTableRootIndex,
3199   };
3200
3201   for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3202     if (root_index == writable_roots[i])
3203       return true;
3204   }
3205   return false;
3206 }
3207
3208
3209 Object* RegExpResultsCache::Lookup(Heap* heap,
3210                                    String* key_string,
3211                                    Object* key_pattern,
3212                                    ResultsCacheType type) {
3213   FixedArray* cache;
3214   if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3215   if (type == STRING_SPLIT_SUBSTRINGS) {
3216     ASSERT(key_pattern->IsString());
3217     if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3218     cache = heap->string_split_cache();
3219   } else {
3220     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3221     ASSERT(key_pattern->IsFixedArray());
3222     cache = heap->regexp_multiple_cache();
3223   }
3224
3225   uint32_t hash = key_string->Hash();
3226   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3227       ~(kArrayEntriesPerCacheEntry - 1));
3228   if (cache->get(index + kStringOffset) == key_string &&
3229       cache->get(index + kPatternOffset) == key_pattern) {
3230     return cache->get(index + kArrayOffset);
3231   }
3232   index =
3233       ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3234   if (cache->get(index + kStringOffset) == key_string &&
3235       cache->get(index + kPatternOffset) == key_pattern) {
3236     return cache->get(index + kArrayOffset);
3237   }
3238   return Smi::FromInt(0);
3239 }
3240
3241
3242 void RegExpResultsCache::Enter(Heap* heap,
3243                                String* key_string,
3244                                Object* key_pattern,
3245                                FixedArray* value_array,
3246                                ResultsCacheType type) {
3247   FixedArray* cache;
3248   if (!key_string->IsInternalizedString()) return;
3249   if (type == STRING_SPLIT_SUBSTRINGS) {
3250     ASSERT(key_pattern->IsString());
3251     if (!key_pattern->IsInternalizedString()) return;
3252     cache = heap->string_split_cache();
3253   } else {
3254     ASSERT(type == REGEXP_MULTIPLE_INDICES);
3255     ASSERT(key_pattern->IsFixedArray());
3256     cache = heap->regexp_multiple_cache();
3257   }
3258
3259   uint32_t hash = key_string->Hash();
3260   uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3261       ~(kArrayEntriesPerCacheEntry - 1));
3262   if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3263     cache->set(index + kStringOffset, key_string);
3264     cache->set(index + kPatternOffset, key_pattern);
3265     cache->set(index + kArrayOffset, value_array);
3266   } else {
3267     uint32_t index2 =
3268         ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3269     if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3270       cache->set(index2 + kStringOffset, key_string);
3271       cache->set(index2 + kPatternOffset, key_pattern);
3272       cache->set(index2 + kArrayOffset, value_array);
3273     } else {
3274       cache->set(index2 + kStringOffset, Smi::FromInt(0));
3275       cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3276       cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3277       cache->set(index + kStringOffset, key_string);
3278       cache->set(index + kPatternOffset, key_pattern);
3279       cache->set(index + kArrayOffset, value_array);
3280     }
3281   }
3282   // If the array is a reasonably short list of substrings, convert it into a
3283   // list of internalized strings.
3284   if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3285     for (int i = 0; i < value_array->length(); i++) {
3286       String* str = String::cast(value_array->get(i));
3287       Object* internalized_str;
3288       MaybeObject* maybe_string = heap->InternalizeString(str);
3289       if (maybe_string->ToObject(&internalized_str)) {
3290         value_array->set(i, internalized_str);
3291       }
3292     }
3293   }
3294   // Convert backing store to a copy-on-write array.
3295   value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3296 }
3297
3298
3299 void RegExpResultsCache::Clear(FixedArray* cache) {
3300   for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3301     cache->set(i, Smi::FromInt(0));
3302   }
3303 }
3304
3305
3306 MaybeObject* Heap::AllocateInitialNumberStringCache() {
3307   MaybeObject* maybe_obj =
3308       AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3309   return maybe_obj;
3310 }
3311
3312
3313 int Heap::FullSizeNumberStringCacheLength() {
3314   // Compute the size of the number string cache based on the max newspace size.
3315   // The number string cache has a minimum size based on twice the initial cache
3316   // size to ensure that it is bigger after being made 'full size'.
3317   int number_string_cache_size = max_semispace_size_ / 512;
3318   number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3319                                  Min(0x4000, number_string_cache_size));
3320   // There is a string and a number per entry so the length is twice the number
3321   // of entries.
3322   return number_string_cache_size * 2;
3323 }
3324
3325
3326 void Heap::AllocateFullSizeNumberStringCache() {
3327   // The idea is to have a small number string cache in the snapshot to keep
3328   // boot-time memory usage down.  If we expand the number string cache already
3329   // while creating the snapshot then that didn't work out.
3330   ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3331   MaybeObject* maybe_obj =
3332       AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3333   Object* new_cache;
3334   if (maybe_obj->ToObject(&new_cache)) {
3335     // We don't bother to repopulate the cache with entries from the old cache.
3336     // It will be repopulated soon enough with new strings.
3337     set_number_string_cache(FixedArray::cast(new_cache));
3338   }
3339   // If allocation fails then we just return without doing anything.  It is only
3340   // a cache, so best effort is OK here.
3341 }
3342
3343
3344 void Heap::FlushNumberStringCache() {
3345   // Flush the number to string cache.
3346   int len = number_string_cache()->length();
3347   for (int i = 0; i < len; i++) {
3348     number_string_cache()->set_undefined(this, i);
3349   }
3350 }
3351
3352
3353 static inline int double_get_hash(double d) {
3354   DoubleRepresentation rep(d);
3355   return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3356 }
3357
3358
3359 static inline int smi_get_hash(Smi* smi) {
3360   return smi->value();
3361 }
3362
3363
3364 Object* Heap::GetNumberStringCache(Object* number) {
3365   int hash;
3366   int mask = (number_string_cache()->length() >> 1) - 1;
3367   if (number->IsSmi()) {
3368     hash = smi_get_hash(Smi::cast(number)) & mask;
3369   } else {
3370     hash = double_get_hash(number->Number()) & mask;
3371   }
3372   Object* key = number_string_cache()->get(hash * 2);
3373   if (key == number) {
3374     return String::cast(number_string_cache()->get(hash * 2 + 1));
3375   } else if (key->IsHeapNumber() &&
3376              number->IsHeapNumber() &&
3377              key->Number() == number->Number()) {
3378     return String::cast(number_string_cache()->get(hash * 2 + 1));
3379   }
3380   return undefined_value();
3381 }
3382
3383
3384 void Heap::SetNumberStringCache(Object* number, String* string) {
3385   int hash;
3386   int mask = (number_string_cache()->length() >> 1) - 1;
3387   if (number->IsSmi()) {
3388     hash = smi_get_hash(Smi::cast(number)) & mask;
3389   } else {
3390     hash = double_get_hash(number->Number()) & mask;
3391   }
3392   if (number_string_cache()->get(hash * 2) != undefined_value() &&
3393       number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3394     // The first time we have a hash collision, we move to the full sized
3395     // number string cache.
3396     AllocateFullSizeNumberStringCache();
3397     return;
3398   }
3399   number_string_cache()->set(hash * 2, number);
3400   number_string_cache()->set(hash * 2 + 1, string);
3401 }
3402
3403
3404 MaybeObject* Heap::NumberToString(Object* number,
3405                                   bool check_number_string_cache,
3406                                   PretenureFlag pretenure) {
3407   isolate_->counters()->number_to_string_runtime()->Increment();
3408   if (check_number_string_cache) {
3409     Object* cached = GetNumberStringCache(number);
3410     if (cached != undefined_value()) {
3411       return cached;
3412     }
3413   }
3414
3415   char arr[100];
3416   Vector<char> buffer(arr, ARRAY_SIZE(arr));
3417   const char* str;
3418   if (number->IsSmi()) {
3419     int num = Smi::cast(number)->value();
3420     str = IntToCString(num, buffer);
3421   } else {
3422     double num = HeapNumber::cast(number)->value();
3423     str = DoubleToCString(num, buffer);
3424   }
3425
3426   Object* js_string;
3427   MaybeObject* maybe_js_string =
3428       AllocateStringFromOneByte(CStrVector(str), pretenure);
3429   if (maybe_js_string->ToObject(&js_string)) {
3430     SetNumberStringCache(number, String::cast(js_string));
3431   }
3432   return maybe_js_string;
3433 }
3434
3435
3436 MaybeObject* Heap::Uint32ToString(uint32_t value,
3437                                   bool check_number_string_cache) {
3438   Object* number;
3439   MaybeObject* maybe = NumberFromUint32(value);
3440   if (!maybe->To<Object>(&number)) return maybe;
3441   return NumberToString(number, check_number_string_cache);
3442 }
3443
3444
3445 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3446   return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3447 }
3448
3449
3450 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3451     ExternalArrayType array_type) {
3452   switch (array_type) {
3453     case kExternalByteArray:
3454       return kExternalByteArrayMapRootIndex;
3455     case kExternalUnsignedByteArray:
3456       return kExternalUnsignedByteArrayMapRootIndex;
3457     case kExternalShortArray:
3458       return kExternalShortArrayMapRootIndex;
3459     case kExternalUnsignedShortArray:
3460       return kExternalUnsignedShortArrayMapRootIndex;
3461     case kExternalIntArray:
3462       return kExternalIntArrayMapRootIndex;
3463     case kExternalUnsignedIntArray:
3464       return kExternalUnsignedIntArrayMapRootIndex;
3465     case kExternalFloatArray:
3466       return kExternalFloatArrayMapRootIndex;
3467     case kExternalDoubleArray:
3468       return kExternalDoubleArrayMapRootIndex;
3469     case kExternalPixelArray:
3470       return kExternalPixelArrayMapRootIndex;
3471     default:
3472       UNREACHABLE();
3473       return kUndefinedValueRootIndex;
3474   }
3475 }
3476
3477 Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3478     ElementsKind elementsKind) {
3479   switch (elementsKind) {
3480     case EXTERNAL_BYTE_ELEMENTS:
3481       return kEmptyExternalByteArrayRootIndex;
3482     case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3483       return kEmptyExternalUnsignedByteArrayRootIndex;
3484     case EXTERNAL_SHORT_ELEMENTS:
3485       return kEmptyExternalShortArrayRootIndex;
3486     case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3487       return kEmptyExternalUnsignedShortArrayRootIndex;
3488     case EXTERNAL_INT_ELEMENTS:
3489       return kEmptyExternalIntArrayRootIndex;
3490     case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3491       return kEmptyExternalUnsignedIntArrayRootIndex;
3492     case EXTERNAL_FLOAT_ELEMENTS:
3493       return kEmptyExternalFloatArrayRootIndex;
3494     case EXTERNAL_DOUBLE_ELEMENTS:
3495       return kEmptyExternalDoubleArrayRootIndex;
3496     case EXTERNAL_PIXEL_ELEMENTS:
3497       return kEmptyExternalPixelArrayRootIndex;
3498     default:
3499       UNREACHABLE();
3500       return kUndefinedValueRootIndex;
3501   }
3502 }
3503
3504 ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3505   return ExternalArray::cast(
3506       roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3507 }
3508
3509
3510
3511
3512 MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3513   // We need to distinguish the minus zero value and this cannot be
3514   // done after conversion to int. Doing this by comparing bit
3515   // patterns is faster than using fpclassify() et al.
3516   static const DoubleRepresentation minus_zero(-0.0);
3517
3518   DoubleRepresentation rep(value);
3519   if (rep.bits == minus_zero.bits) {
3520     return AllocateHeapNumber(-0.0, pretenure);
3521   }
3522
3523   int int_value = FastD2I(value);
3524   if (value == int_value && Smi::IsValid(int_value)) {
3525     return Smi::FromInt(int_value);
3526   }
3527
3528   // Materialize the value in the heap.
3529   return AllocateHeapNumber(value, pretenure);
3530 }
3531
3532
3533 MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3534   // Statically ensure that it is safe to allocate foreigns in paged spaces.
3535   STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3536   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3537   Foreign* result;
3538   MaybeObject* maybe_result = Allocate(foreign_map(), space);
3539   if (!maybe_result->To(&result)) return maybe_result;
3540   result->set_foreign_address(address);
3541   return result;
3542 }
3543
3544
3545 MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3546   SharedFunctionInfo* share;
3547   MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3548   if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3549
3550   // Set pointer fields.
3551   share->set_name(name);
3552   Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3553   share->set_code(illegal);
3554   share->set_optimized_code_map(Smi::FromInt(0));
3555   share->set_scope_info(ScopeInfo::Empty(isolate_));
3556   Code* construct_stub =
3557       isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3558   share->set_construct_stub(construct_stub);
3559   share->set_instance_class_name(Object_string());
3560   share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3561   share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3562   share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3563   share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3564   share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3565   share->set_ast_node_count(0);
3566   share->set_stress_deopt_counter(FLAG_deopt_every_n_times);
3567   share->set_counters(0);
3568
3569   // Set integer fields (smi or int, depending on the architecture).
3570   share->set_length(0);
3571   share->set_formal_parameter_count(0);
3572   share->set_expected_nof_properties(0);
3573   share->set_num_literals(0);
3574   share->set_start_position_and_type(0);
3575   share->set_end_position(0);
3576   share->set_function_token_position(0);
3577   // All compiler hints default to false or 0.
3578   share->set_compiler_hints(0);
3579   share->set_opt_count(0);
3580
3581   return share;
3582 }
3583
3584
3585 MaybeObject* Heap::AllocateJSMessageObject(String* type,
3586                                            JSArray* arguments,
3587                                            int start_position,
3588                                            int end_position,
3589                                            Object* script,
3590                                            Object* stack_trace,
3591                                            Object* stack_frames) {
3592   Object* result;
3593   { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3594     if (!maybe_result->ToObject(&result)) return maybe_result;
3595   }
3596   JSMessageObject* message = JSMessageObject::cast(result);
3597   message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3598   message->initialize_elements();
3599   message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3600   message->set_type(type);
3601   message->set_arguments(arguments);
3602   message->set_start_position(start_position);
3603   message->set_end_position(end_position);
3604   message->set_script(script);
3605   message->set_stack_trace(stack_trace);
3606   message->set_stack_frames(stack_frames);
3607   return result;
3608 }
3609
3610
3611
3612 // Returns true for a character in a range.  Both limits are inclusive.
3613 static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3614   // This makes uses of the the unsigned wraparound.
3615   return character - from <= to - from;
3616 }
3617
3618
3619 MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3620     Heap* heap,
3621     uint16_t c1,
3622     uint16_t c2) {
3623   String* result;
3624   // Numeric strings have a different hash algorithm not known by
3625   // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3626   if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3627       heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3628     return result;
3629   // Now we know the length is 2, we might as well make use of that fact
3630   // when building the new string.
3631   } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3632     // We can do this.
3633     ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3634     Object* result;
3635     { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3636       if (!maybe_result->ToObject(&result)) return maybe_result;
3637     }
3638     uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3639     dest[0] = static_cast<uint8_t>(c1);
3640     dest[1] = static_cast<uint8_t>(c2);
3641     return result;
3642   } else {
3643     Object* result;
3644     { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3645       if (!maybe_result->ToObject(&result)) return maybe_result;
3646     }
3647     uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3648     dest[0] = c1;
3649     dest[1] = c2;
3650     return result;
3651   }
3652 }
3653
3654
3655 MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3656   int first_length = first->length();
3657   if (first_length == 0) {
3658     return second;
3659   }
3660
3661   int second_length = second->length();
3662   if (second_length == 0) {
3663     return first;
3664   }
3665
3666   int length = first_length + second_length;
3667
3668   // Optimization for 2-byte strings often used as keys in a decompression
3669   // dictionary.  Check whether we already have the string in the string
3670   // table to prevent creation of many unneccesary strings.
3671   if (length == 2) {
3672     uint16_t c1 = first->Get(0);
3673     uint16_t c2 = second->Get(0);
3674     return MakeOrFindTwoCharacterString(this, c1, c2);
3675   }
3676
3677   bool first_is_one_byte = first->IsOneByteRepresentation();
3678   bool second_is_one_byte = second->IsOneByteRepresentation();
3679   bool is_one_byte = first_is_one_byte && second_is_one_byte;
3680   // Make sure that an out of memory exception is thrown if the length
3681   // of the new cons string is too large.
3682   if (length > String::kMaxLength || length < 0) {
3683     isolate()->context()->mark_out_of_memory();
3684     return Failure::OutOfMemoryException(0x4);
3685   }
3686
3687   bool is_one_byte_data_in_two_byte_string = false;
3688   if (!is_one_byte) {
3689     // At least one of the strings uses two-byte representation so we
3690     // can't use the fast case code for short ASCII strings below, but
3691     // we can try to save memory if all chars actually fit in ASCII.
3692     is_one_byte_data_in_two_byte_string =
3693         first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3694     if (is_one_byte_data_in_two_byte_string) {
3695       isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3696     }
3697   }
3698
3699   // If the resulting string is small make a flat string.
3700   if (length < ConsString::kMinLength) {
3701     // Note that neither of the two inputs can be a slice because:
3702     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3703     ASSERT(first->IsFlat());
3704     ASSERT(second->IsFlat());
3705     if (is_one_byte) {
3706       Object* result;
3707       { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3708         if (!maybe_result->ToObject(&result)) return maybe_result;
3709       }
3710       // Copy the characters into the new object.
3711       uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3712       // Copy first part.
3713       const uint8_t* src;
3714       if (first->IsExternalString()) {
3715         src = ExternalAsciiString::cast(first)->GetChars();
3716       } else {
3717         src = SeqOneByteString::cast(first)->GetChars();
3718       }
3719       for (int i = 0; i < first_length; i++) *dest++ = src[i];
3720       // Copy second part.
3721       if (second->IsExternalString()) {
3722         src = ExternalAsciiString::cast(second)->GetChars();
3723       } else {
3724         src = SeqOneByteString::cast(second)->GetChars();
3725       }
3726       for (int i = 0; i < second_length; i++) *dest++ = src[i];
3727       return result;
3728     } else {
3729       if (is_one_byte_data_in_two_byte_string) {
3730         Object* result;
3731         { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3732           if (!maybe_result->ToObject(&result)) return maybe_result;
3733         }
3734         // Copy the characters into the new object.
3735         uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3736         String::WriteToFlat(first, dest, 0, first_length);
3737         String::WriteToFlat(second, dest + first_length, 0, second_length);
3738         isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3739         return result;
3740       }
3741
3742       Object* result;
3743       { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3744         if (!maybe_result->ToObject(&result)) return maybe_result;
3745       }
3746       // Copy the characters into the new object.
3747       uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3748       String::WriteToFlat(first, dest, 0, first_length);
3749       String::WriteToFlat(second, dest + first_length, 0, second_length);
3750       return result;
3751     }
3752   }
3753
3754   Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3755       cons_ascii_string_map() : cons_string_map();
3756
3757   Object* result;
3758   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3759     if (!maybe_result->ToObject(&result)) return maybe_result;
3760   }
3761
3762   DisallowHeapAllocation no_gc;
3763   ConsString* cons_string = ConsString::cast(result);
3764   WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3765   cons_string->set_length(length);
3766   cons_string->set_hash_field(String::kEmptyHashField);
3767   cons_string->set_first(first, mode);
3768   cons_string->set_second(second, mode);
3769   return result;
3770 }
3771
3772
3773 MaybeObject* Heap::AllocateSubString(String* buffer,
3774                                      int start,
3775                                      int end,
3776                                      PretenureFlag pretenure) {
3777   int length = end - start;
3778   if (length <= 0) {
3779     return empty_string();
3780   } else if (length == 1) {
3781     return LookupSingleCharacterStringFromCode(buffer->Get(start));
3782   } else if (length == 2) {
3783     // Optimization for 2-byte strings often used as keys in a decompression
3784     // dictionary.  Check whether we already have the string in the string
3785     // table to prevent creation of many unnecessary strings.
3786     uint16_t c1 = buffer->Get(start);
3787     uint16_t c2 = buffer->Get(start + 1);
3788     return MakeOrFindTwoCharacterString(this, c1, c2);
3789   }
3790
3791   // Make an attempt to flatten the buffer to reduce access time.
3792   buffer = buffer->TryFlattenGetString();
3793
3794   if (!FLAG_string_slices ||
3795       !buffer->IsFlat() ||
3796       length < SlicedString::kMinLength ||
3797       pretenure == TENURED) {
3798     Object* result;
3799     // WriteToFlat takes care of the case when an indirect string has a
3800     // different encoding from its underlying string.  These encodings may
3801     // differ because of externalization.
3802     bool is_one_byte = buffer->IsOneByteRepresentation();
3803     { MaybeObject* maybe_result = is_one_byte
3804                                   ? AllocateRawOneByteString(length, pretenure)
3805                                   : AllocateRawTwoByteString(length, pretenure);
3806       if (!maybe_result->ToObject(&result)) return maybe_result;
3807     }
3808     String* string_result = String::cast(result);
3809     // Copy the characters into the new object.
3810     if (is_one_byte) {
3811       ASSERT(string_result->IsOneByteRepresentation());
3812       uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3813       String::WriteToFlat(buffer, dest, start, end);
3814     } else {
3815       ASSERT(string_result->IsTwoByteRepresentation());
3816       uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3817       String::WriteToFlat(buffer, dest, start, end);
3818     }
3819     return result;
3820   }
3821
3822   ASSERT(buffer->IsFlat());
3823 #if VERIFY_HEAP
3824   if (FLAG_verify_heap) {
3825     buffer->StringVerify();
3826   }
3827 #endif
3828
3829   Object* result;
3830   // When slicing an indirect string we use its encoding for a newly created
3831   // slice and don't check the encoding of the underlying string.  This is safe
3832   // even if the encodings are different because of externalization.  If an
3833   // indirect ASCII string is pointing to a two-byte string, the two-byte char
3834   // codes of the underlying string must still fit into ASCII (because
3835   // externalization must not change char codes).
3836   { Map* map = buffer->IsOneByteRepresentation()
3837                  ? sliced_ascii_string_map()
3838                  : sliced_string_map();
3839     MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3840     if (!maybe_result->ToObject(&result)) return maybe_result;
3841   }
3842
3843   DisallowHeapAllocation no_gc;
3844   SlicedString* sliced_string = SlicedString::cast(result);
3845   sliced_string->set_length(length);
3846   sliced_string->set_hash_field(String::kEmptyHashField);
3847   if (buffer->IsConsString()) {
3848     ConsString* cons = ConsString::cast(buffer);
3849     ASSERT(cons->second()->length() == 0);
3850     sliced_string->set_parent(cons->first());
3851     sliced_string->set_offset(start);
3852   } else if (buffer->IsSlicedString()) {
3853     // Prevent nesting sliced strings.
3854     SlicedString* parent_slice = SlicedString::cast(buffer);
3855     sliced_string->set_parent(parent_slice->parent());
3856     sliced_string->set_offset(start + parent_slice->offset());
3857   } else {
3858     sliced_string->set_parent(buffer);
3859     sliced_string->set_offset(start);
3860   }
3861   ASSERT(sliced_string->parent()->IsSeqString() ||
3862          sliced_string->parent()->IsExternalString());
3863   return result;
3864 }
3865
3866
3867 MaybeObject* Heap::AllocateExternalStringFromAscii(
3868     const ExternalAsciiString::Resource* resource) {
3869   size_t length = resource->length();
3870   if (length > static_cast<size_t>(String::kMaxLength)) {
3871     isolate()->context()->mark_out_of_memory();
3872     return Failure::OutOfMemoryException(0x5);
3873   }
3874
3875   Map* map = external_ascii_string_map();
3876   Object* result;
3877   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3878     if (!maybe_result->ToObject(&result)) return maybe_result;
3879   }
3880
3881   ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
3882   external_string->set_length(static_cast<int>(length));
3883   external_string->set_hash_field(String::kEmptyHashField);
3884   external_string->set_resource(resource);
3885
3886   return result;
3887 }
3888
3889
3890 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
3891     const ExternalTwoByteString::Resource* resource) {
3892   size_t length = resource->length();
3893   if (length > static_cast<size_t>(String::kMaxLength)) {
3894     isolate()->context()->mark_out_of_memory();
3895     return Failure::OutOfMemoryException(0x6);
3896   }
3897
3898   // For small strings we check whether the resource contains only
3899   // one byte characters.  If yes, we use a different string map.
3900   static const size_t kOneByteCheckLengthLimit = 32;
3901   bool is_one_byte = length <= kOneByteCheckLengthLimit &&
3902       String::IsOneByte(resource->data(), static_cast<int>(length));
3903   Map* map = is_one_byte ?
3904       external_string_with_one_byte_data_map() : external_string_map();
3905   Object* result;
3906   { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3907     if (!maybe_result->ToObject(&result)) return maybe_result;
3908   }
3909
3910   ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
3911   external_string->set_length(static_cast<int>(length));
3912   external_string->set_hash_field(String::kEmptyHashField);
3913   external_string->set_resource(resource);
3914
3915   return result;
3916 }
3917
3918
3919 MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
3920   if (code <= String::kMaxOneByteCharCode) {
3921     Object* value = single_character_string_cache()->get(code);
3922     if (value != undefined_value()) return value;
3923
3924     uint8_t buffer[1];
3925     buffer[0] = static_cast<uint8_t>(code);
3926     Object* result;
3927     MaybeObject* maybe_result =
3928         InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
3929
3930     if (!maybe_result->ToObject(&result)) return maybe_result;
3931     single_character_string_cache()->set(code, result);
3932     return result;
3933   }
3934
3935   Object* result;
3936   { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
3937     if (!maybe_result->ToObject(&result)) return maybe_result;
3938   }
3939   String* answer = String::cast(result);
3940   answer->Set(0, code);
3941   return answer;
3942 }
3943
3944
3945 MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
3946   if (length < 0 || length > ByteArray::kMaxLength) {
3947     return Failure::OutOfMemoryException(0x7);
3948   }
3949   if (pretenure == NOT_TENURED) {
3950     return AllocateByteArray(length);
3951   }
3952   int size = ByteArray::SizeFor(length);
3953   Object* result;
3954   { MaybeObject* maybe_result = (size <= Page::kMaxNonCodeHeapObjectSize)
3955                    ? old_data_space_->AllocateRaw(size)
3956                    : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3957     if (!maybe_result->ToObject(&result)) return maybe_result;
3958   }
3959
3960   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3961       byte_array_map());
3962   reinterpret_cast<ByteArray*>(result)->set_length(length);
3963   return result;
3964 }
3965
3966
3967 MaybeObject* Heap::AllocateByteArray(int length) {
3968   if (length < 0 || length > ByteArray::kMaxLength) {
3969     return Failure::OutOfMemoryException(0x8);
3970   }
3971   int size = ByteArray::SizeFor(length);
3972   AllocationSpace space =
3973       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : NEW_SPACE;
3974   Object* result;
3975   { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
3976     if (!maybe_result->ToObject(&result)) return maybe_result;
3977   }
3978
3979   reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
3980       byte_array_map());
3981   reinterpret_cast<ByteArray*>(result)->set_length(length);
3982   return result;
3983 }
3984
3985
3986 void Heap::CreateFillerObjectAt(Address addr, int size) {
3987   if (size == 0) return;
3988   HeapObject* filler = HeapObject::FromAddress(addr);
3989   if (size == kPointerSize) {
3990     filler->set_map_no_write_barrier(one_pointer_filler_map());
3991   } else if (size == 2 * kPointerSize) {
3992     filler->set_map_no_write_barrier(two_pointer_filler_map());
3993   } else {
3994     filler->set_map_no_write_barrier(free_space_map());
3995     FreeSpace::cast(filler)->set_size(size);
3996   }
3997 }
3998
3999
4000 MaybeObject* Heap::AllocateExternalArray(int length,
4001                                          ExternalArrayType array_type,
4002                                          void* external_pointer,
4003                                          PretenureFlag pretenure) {
4004   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
4005   Object* result;
4006   { MaybeObject* maybe_result = AllocateRaw(ExternalArray::kAlignedSize,
4007                                             space,
4008                                             OLD_DATA_SPACE);
4009     if (!maybe_result->ToObject(&result)) return maybe_result;
4010   }
4011
4012   reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4013       MapForExternalArrayType(array_type));
4014   reinterpret_cast<ExternalArray*>(result)->set_length(length);
4015   reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4016       external_pointer);
4017
4018   return result;
4019 }
4020
4021
4022 MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4023                               Code::Flags flags,
4024                               Handle<Object> self_reference,
4025                               bool immovable,
4026                               bool crankshafted) {
4027   // Allocate ByteArray before the Code object, so that we do not risk
4028   // leaving uninitialized Code object (and breaking the heap).
4029   ByteArray* reloc_info;
4030   MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4031   if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4032
4033   // Compute size.
4034   int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4035   int obj_size = Code::SizeFor(body_size);
4036   ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4037   MaybeObject* maybe_result;
4038   // Large code objects and code objects which should stay at a fixed address
4039   // are allocated in large object space.
4040   HeapObject* result;
4041   bool force_lo_space = obj_size > code_space()->AreaSize();
4042   if (force_lo_space) {
4043     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4044   } else {
4045     maybe_result = code_space_->AllocateRaw(obj_size);
4046   }
4047   if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4048
4049   if (immovable && !force_lo_space &&
4050       // Objects on the first page of each space are never moved.
4051       !code_space_->FirstPage()->Contains(result->address())) {
4052     // Discard the first code allocation, which was on a page where it could be
4053     // moved.
4054     CreateFillerObjectAt(result->address(), obj_size);
4055     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4056     if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4057   }
4058
4059   // Initialize the object
4060   result->set_map_no_write_barrier(code_map());
4061   Code* code = Code::cast(result);
4062   ASSERT(!isolate_->code_range()->exists() ||
4063       isolate_->code_range()->contains(code->address()));
4064   code->set_instruction_size(desc.instr_size);
4065   code->set_relocation_info(reloc_info);
4066   code->set_flags(flags);
4067   if (code->is_call_stub() || code->is_keyed_call_stub()) {
4068     code->set_check_type(RECEIVER_MAP_CHECK);
4069   }
4070   code->set_is_crankshafted(crankshafted);
4071   code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4072   code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4073   code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4074   code->set_gc_metadata(Smi::FromInt(0));
4075   code->set_ic_age(global_ic_age_);
4076   code->set_prologue_offset(kPrologueOffsetNotSet);
4077   if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4078     code->set_marked_for_deoptimization(false);
4079   }
4080   // Allow self references to created code object by patching the handle to
4081   // point to the newly allocated Code object.
4082   if (!self_reference.is_null()) {
4083     *(self_reference.location()) = code;
4084   }
4085   // Migrate generated code.
4086   // The generated code can contain Object** values (typically from handles)
4087   // that are dereferenced during the copy to point directly to the actual heap
4088   // objects. These pointers can include references to the code object itself,
4089   // through the self_reference parameter.
4090   code->CopyFrom(desc);
4091
4092 #ifdef VERIFY_HEAP
4093   if (FLAG_verify_heap) {
4094     code->Verify();
4095   }
4096 #endif
4097   return code;
4098 }
4099
4100
4101 MaybeObject* Heap::CopyCode(Code* code) {
4102   // Allocate an object the same size as the code object.
4103   int obj_size = code->Size();
4104   MaybeObject* maybe_result;
4105   if (obj_size > code_space()->AreaSize()) {
4106     maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4107   } else {
4108     maybe_result = code_space_->AllocateRaw(obj_size);
4109   }
4110
4111   Object* result;
4112   if (!maybe_result->ToObject(&result)) return maybe_result;
4113
4114   // Copy code object.
4115   Address old_addr = code->address();
4116   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4117   CopyBlock(new_addr, old_addr, obj_size);
4118   // Relocate the copy.
4119   Code* new_code = Code::cast(result);
4120   ASSERT(!isolate_->code_range()->exists() ||
4121       isolate_->code_range()->contains(code->address()));
4122   new_code->Relocate(new_addr - old_addr);
4123   return new_code;
4124 }
4125
4126
4127 MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4128   // Allocate ByteArray before the Code object, so that we do not risk
4129   // leaving uninitialized Code object (and breaking the heap).
4130   Object* reloc_info_array;
4131   { MaybeObject* maybe_reloc_info_array =
4132         AllocateByteArray(reloc_info.length(), TENURED);
4133     if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4134       return maybe_reloc_info_array;
4135     }
4136   }
4137
4138   int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4139
4140   int new_obj_size = Code::SizeFor(new_body_size);
4141
4142   Address old_addr = code->address();
4143
4144   size_t relocation_offset =
4145       static_cast<size_t>(code->instruction_end() - old_addr);
4146
4147   MaybeObject* maybe_result;
4148   if (new_obj_size > code_space()->AreaSize()) {
4149     maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4150   } else {
4151     maybe_result = code_space_->AllocateRaw(new_obj_size);
4152   }
4153
4154   Object* result;
4155   if (!maybe_result->ToObject(&result)) return maybe_result;
4156
4157   // Copy code object.
4158   Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4159
4160   // Copy header and instructions.
4161   CopyBytes(new_addr, old_addr, relocation_offset);
4162
4163   Code* new_code = Code::cast(result);
4164   new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4165
4166   // Copy patched rinfo.
4167   CopyBytes(new_code->relocation_start(),
4168             reloc_info.start(),
4169             static_cast<size_t>(reloc_info.length()));
4170
4171   // Relocate the copy.
4172   ASSERT(!isolate_->code_range()->exists() ||
4173       isolate_->code_range()->contains(code->address()));
4174   new_code->Relocate(new_addr - old_addr);
4175
4176 #ifdef VERIFY_HEAP
4177   if (FLAG_verify_heap) {
4178     code->Verify();
4179   }
4180 #endif
4181   return new_code;
4182 }
4183
4184
4185 MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4186     Handle<Object> allocation_site_info_payload) {
4187   ASSERT(gc_state_ == NOT_IN_GC);
4188   ASSERT(map->instance_type() != MAP_TYPE);
4189   // If allocation failures are disallowed, we may allocate in a different
4190   // space when new space is full and the object is not a large object.
4191   AllocationSpace retry_space =
4192       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4193   int size = map->instance_size() + AllocationSiteInfo::kSize;
4194   Object* result;
4195   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4196   if (!maybe_result->ToObject(&result)) return maybe_result;
4197   // No need for write barrier since object is white and map is in old space.
4198   HeapObject::cast(result)->set_map_no_write_barrier(map);
4199   AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4200       reinterpret_cast<Address>(result) + map->instance_size());
4201   alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4202   alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER);
4203   return result;
4204 }
4205
4206
4207 MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4208   ASSERT(gc_state_ == NOT_IN_GC);
4209   ASSERT(map->instance_type() != MAP_TYPE);
4210   // If allocation failures are disallowed, we may allocate in a different
4211   // space when new space is full and the object is not a large object.
4212   AllocationSpace retry_space =
4213       (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4214   int size = map->instance_size();
4215   Object* result;
4216   MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4217   if (!maybe_result->ToObject(&result)) return maybe_result;
4218   // No need for write barrier since object is white and map is in old space.
4219   HeapObject::cast(result)->set_map_no_write_barrier(map);
4220   return result;
4221 }
4222
4223
4224 void Heap::InitializeFunction(JSFunction* function,
4225                               SharedFunctionInfo* shared,
4226                               Object* prototype) {
4227   ASSERT(!prototype->IsMap());
4228   function->initialize_properties();
4229   function->initialize_elements();
4230   function->set_shared(shared);
4231   function->set_code(shared->code());
4232   function->set_prototype_or_initial_map(prototype);
4233   function->set_context(undefined_value());
4234   function->set_literals_or_bindings(empty_fixed_array());
4235   function->set_next_function_link(undefined_value());
4236 }
4237
4238
4239 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4240   // Make sure to use globals from the function's context, since the function
4241   // can be from a different context.
4242   Context* native_context = function->context()->native_context();
4243   Map* new_map;
4244   if (function->shared()->is_generator()) {
4245     // Generator prototypes can share maps since they don't have "constructor"
4246     // properties.
4247     new_map = native_context->generator_object_prototype_map();
4248   } else {
4249     // Each function prototype gets a fresh map to avoid unwanted sharing of
4250     // maps between prototypes of different constructors.
4251     JSFunction* object_function = native_context->object_function();
4252     ASSERT(object_function->has_initial_map());
4253     MaybeObject* maybe_map = object_function->initial_map()->Copy();
4254     if (!maybe_map->To(&new_map)) return maybe_map;
4255   }
4256
4257   Object* prototype;
4258   MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4259   if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4260
4261   if (!function->shared()->is_generator()) {
4262     MaybeObject* maybe_failure =
4263         JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
4264             constructor_string(), function, DONT_ENUM);
4265     if (maybe_failure->IsFailure()) return maybe_failure;
4266   }
4267
4268   return prototype;
4269 }
4270
4271
4272 MaybeObject* Heap::AllocateFunction(Map* function_map,
4273                                     SharedFunctionInfo* shared,
4274                                     Object* prototype,
4275                                     PretenureFlag pretenure) {
4276   AllocationSpace space =
4277       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4278   Object* result;
4279   { MaybeObject* maybe_result = Allocate(function_map, space);
4280     if (!maybe_result->ToObject(&result)) return maybe_result;
4281   }
4282   InitializeFunction(JSFunction::cast(result), shared, prototype);
4283   return result;
4284 }
4285
4286
4287 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4288   // To get fast allocation and map sharing for arguments objects we
4289   // allocate them based on an arguments boilerplate.
4290
4291   JSObject* boilerplate;
4292   int arguments_object_size;
4293   bool strict_mode_callee = callee->IsJSFunction() &&
4294       !JSFunction::cast(callee)->shared()->is_classic_mode();
4295   if (strict_mode_callee) {
4296     boilerplate =
4297         isolate()->context()->native_context()->
4298             strict_mode_arguments_boilerplate();
4299     arguments_object_size = kArgumentsObjectSizeStrict;
4300   } else {
4301     boilerplate =
4302         isolate()->context()->native_context()->arguments_boilerplate();
4303     arguments_object_size = kArgumentsObjectSize;
4304   }
4305
4306   // This calls Copy directly rather than using Heap::AllocateRaw so we
4307   // duplicate the check here.
4308   ASSERT(AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
4309
4310   // Check that the size of the boilerplate matches our
4311   // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4312   // on the size being a known constant.
4313   ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4314
4315   // Do the allocation.
4316   Object* result;
4317   { MaybeObject* maybe_result =
4318         AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4319     if (!maybe_result->ToObject(&result)) return maybe_result;
4320   }
4321
4322   // Copy the content. The arguments boilerplate doesn't have any
4323   // fields that point to new space so it's safe to skip the write
4324   // barrier here.
4325   CopyBlock(HeapObject::cast(result)->address(),
4326             boilerplate->address(),
4327             JSObject::kHeaderSize);
4328
4329   // Set the length property.
4330   JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4331                                                 Smi::FromInt(length),
4332                                                 SKIP_WRITE_BARRIER);
4333   // Set the callee property for non-strict mode arguments object only.
4334   if (!strict_mode_callee) {
4335     JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4336                                                   callee);
4337   }
4338
4339   // Check the state of the object
4340   ASSERT(JSObject::cast(result)->HasFastProperties());
4341   ASSERT(JSObject::cast(result)->HasFastObjectElements());
4342
4343   return result;
4344 }
4345
4346
4347 MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4348   ASSERT(!fun->has_initial_map());
4349
4350   // First create a new map with the size and number of in-object properties
4351   // suggested by the function.
4352   InstanceType instance_type;
4353   int instance_size;
4354   int in_object_properties;
4355   if (fun->shared()->is_generator()) {
4356     instance_type = JS_GENERATOR_OBJECT_TYPE;
4357     instance_size = JSGeneratorObject::kSize;
4358     in_object_properties = 0;
4359   } else {
4360     instance_type = JS_OBJECT_TYPE;
4361     instance_size = fun->shared()->CalculateInstanceSize();
4362     in_object_properties = fun->shared()->CalculateInObjectProperties();
4363   }
4364   Map* map;
4365   MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4366   if (!maybe_map->To(&map)) return maybe_map;
4367
4368   // Fetch or allocate prototype.
4369   Object* prototype;
4370   if (fun->has_instance_prototype()) {
4371     prototype = fun->instance_prototype();
4372   } else {
4373     MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4374     if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4375   }
4376   map->set_inobject_properties(in_object_properties);
4377   map->set_unused_property_fields(in_object_properties);
4378   map->set_prototype(prototype);
4379   ASSERT(map->has_fast_object_elements());
4380
4381   if (!fun->shared()->is_generator()) {
4382     fun->shared()->StartInobjectSlackTracking(map);
4383   }
4384
4385   return map;
4386 }
4387
4388
4389 void Heap::InitializeJSObjectFromMap(JSObject* obj,
4390                                      FixedArray* properties,
4391                                      Map* map) {
4392   obj->set_properties(properties);
4393   obj->initialize_elements();
4394   // TODO(1240798): Initialize the object's body using valid initial values
4395   // according to the object's initial map.  For example, if the map's
4396   // instance type is JS_ARRAY_TYPE, the length field should be initialized
4397   // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4398   // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4399   // verification code has to cope with (temporarily) invalid objects.  See
4400   // for example, JSArray::JSArrayVerify).
4401   Object* filler;
4402   // We cannot always fill with one_pointer_filler_map because objects
4403   // created from API functions expect their internal fields to be initialized
4404   // with undefined_value.
4405   // Pre-allocated fields need to be initialized with undefined_value as well
4406   // so that object accesses before the constructor completes (e.g. in the
4407   // debugger) will not cause a crash.
4408   if (map->constructor()->IsJSFunction() &&
4409       JSFunction::cast(map->constructor())->shared()->
4410           IsInobjectSlackTrackingInProgress()) {
4411     // We might want to shrink the object later.
4412     ASSERT(obj->GetInternalFieldCount() == 0);
4413     filler = Heap::one_pointer_filler_map();
4414   } else {
4415     filler = Heap::undefined_value();
4416   }
4417   obj->InitializeBody(map, Heap::undefined_value(), filler);
4418 }
4419
4420
4421 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
4422   // JSFunctions should be allocated using AllocateFunction to be
4423   // properly initialized.
4424   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4425
4426   // Both types of global objects should be allocated using
4427   // AllocateGlobalObject to be properly initialized.
4428   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4429   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4430
4431   // Allocate the backing storage for the properties.
4432   int prop_size =
4433       map->pre_allocated_property_fields() +
4434       map->unused_property_fields() -
4435       map->inobject_properties();
4436   ASSERT(prop_size >= 0);
4437   Object* properties;
4438   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4439     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4440   }
4441
4442   // Allocate the JSObject.
4443   AllocationSpace space =
4444       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4445   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4446   Object* obj;
4447   MaybeObject* maybe_obj = Allocate(map, space);
4448   if (!maybe_obj->To(&obj)) return maybe_obj;
4449
4450   // Initialize the JSObject.
4451   InitializeJSObjectFromMap(JSObject::cast(obj),
4452                             FixedArray::cast(properties),
4453                             map);
4454   ASSERT(JSObject::cast(obj)->HasFastElements() ||
4455          JSObject::cast(obj)->HasExternalArrayElements());
4456   return obj;
4457 }
4458
4459
4460 MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map,
4461     Handle<Object> allocation_site_info_payload) {
4462   // JSFunctions should be allocated using AllocateFunction to be
4463   // properly initialized.
4464   ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4465
4466   // Both types of global objects should be allocated using
4467   // AllocateGlobalObject to be properly initialized.
4468   ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4469   ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4470
4471   // Allocate the backing storage for the properties.
4472   int prop_size =
4473       map->pre_allocated_property_fields() +
4474       map->unused_property_fields() -
4475       map->inobject_properties();
4476   ASSERT(prop_size >= 0);
4477   Object* properties;
4478   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4479     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
4480   }
4481
4482   // Allocate the JSObject.
4483   AllocationSpace space = NEW_SPACE;
4484   if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE;
4485   Object* obj;
4486   MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space,
4487       allocation_site_info_payload);
4488   if (!maybe_obj->To(&obj)) return maybe_obj;
4489
4490   // Initialize the JSObject.
4491   InitializeJSObjectFromMap(JSObject::cast(obj),
4492                             FixedArray::cast(properties),
4493                             map);
4494   ASSERT(JSObject::cast(obj)->HasFastElements());
4495   return obj;
4496 }
4497
4498
4499 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4500                                     PretenureFlag pretenure) {
4501   // Allocate the initial map if absent.
4502   if (!constructor->has_initial_map()) {
4503     Object* initial_map;
4504     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4505       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4506     }
4507     constructor->set_initial_map(Map::cast(initial_map));
4508     Map::cast(initial_map)->set_constructor(constructor);
4509   }
4510   // Allocate the object based on the constructors initial map.
4511   MaybeObject* result = AllocateJSObjectFromMap(
4512       constructor->initial_map(), pretenure);
4513 #ifdef DEBUG
4514   // Make sure result is NOT a global object if valid.
4515   Object* non_failure;
4516   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4517 #endif
4518   return result;
4519 }
4520
4521
4522 MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4523     Handle<Object> allocation_site_info_payload) {
4524   // Allocate the initial map if absent.
4525   if (!constructor->has_initial_map()) {
4526     Object* initial_map;
4527     { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4528       if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4529     }
4530     constructor->set_initial_map(Map::cast(initial_map));
4531     Map::cast(initial_map)->set_constructor(constructor);
4532   }
4533   // Allocate the object based on the constructors initial map, or the payload
4534   // advice
4535   Map* initial_map = constructor->initial_map();
4536
4537   Cell* cell = Cell::cast(*allocation_site_info_payload);
4538   Smi* smi = Smi::cast(cell->value());
4539   ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4540   AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4541   if (to_kind != initial_map->elements_kind()) {
4542     MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4543     if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4544     // Possibly alter the mode, since we found an updated elements kind
4545     // in the type info cell.
4546     mode = AllocationSiteInfo::GetMode(to_kind);
4547   }
4548
4549   MaybeObject* result;
4550   if (mode == TRACK_ALLOCATION_SITE) {
4551     result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4552         allocation_site_info_payload);
4553   } else {
4554     result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4555   }
4556 #ifdef DEBUG
4557   // Make sure result is NOT a global object if valid.
4558   Object* non_failure;
4559   ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4560 #endif
4561   return result;
4562 }
4563
4564
4565 MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4566   ASSERT(function->shared()->is_generator());
4567   Map *map;
4568   if (function->has_initial_map()) {
4569     map = function->initial_map();
4570   } else {
4571     // Allocate the initial map if absent.
4572     MaybeObject* maybe_map = AllocateInitialMap(function);
4573     if (!maybe_map->To(&map)) return maybe_map;
4574     function->set_initial_map(map);
4575     map->set_constructor(function);
4576   }
4577   ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4578   return AllocateJSObjectFromMap(map);
4579 }
4580
4581
4582 MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4583   // Allocate a fresh map. Modules do not have a prototype.
4584   Map* map;
4585   MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4586   if (!maybe_map->To(&map)) return maybe_map;
4587   // Allocate the object based on the map.
4588   JSModule* module;
4589   MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4590   if (!maybe_module->To(&module)) return maybe_module;
4591   module->set_context(context);
4592   module->set_scope_info(scope_info);
4593   return module;
4594 }
4595
4596
4597 MaybeObject* Heap::AllocateJSArrayAndStorage(
4598     ElementsKind elements_kind,
4599     int length,
4600     int capacity,
4601     ArrayStorageAllocationMode mode,
4602     PretenureFlag pretenure) {
4603   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4604   JSArray* array;
4605   if (!maybe_array->To(&array)) return maybe_array;
4606
4607   // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4608   // for performance reasons.
4609   ASSERT(capacity >= length);
4610
4611   if (capacity == 0) {
4612     array->set_length(Smi::FromInt(0));
4613     array->set_elements(empty_fixed_array());
4614     return array;
4615   }
4616
4617   FixedArrayBase* elms;
4618   MaybeObject* maybe_elms = NULL;
4619   if (IsFastDoubleElementsKind(elements_kind)) {
4620     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4621       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4622     } else {
4623       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4624       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4625     }
4626   } else {
4627     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4628     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4629       maybe_elms = AllocateUninitializedFixedArray(capacity);
4630     } else {
4631       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4632       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4633     }
4634   }
4635   if (!maybe_elms->To(&elms)) return maybe_elms;
4636
4637   array->set_elements(elms);
4638   array->set_length(Smi::FromInt(length));
4639   return array;
4640 }
4641
4642
4643 MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite(
4644     ElementsKind elements_kind,
4645     int length,
4646     int capacity,
4647     Handle<Object> allocation_site_payload,
4648     ArrayStorageAllocationMode mode) {
4649   MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind,
4650       allocation_site_payload);
4651   JSArray* array;
4652   if (!maybe_array->To(&array)) return maybe_array;
4653   return AllocateJSArrayStorage(array, length, capacity, mode);
4654 }
4655
4656
4657 MaybeObject* Heap::AllocateJSArrayStorage(
4658     JSArray* array,
4659     int length,
4660     int capacity,
4661     ArrayStorageAllocationMode mode) {
4662   ASSERT(capacity >= length);
4663
4664   if (capacity == 0) {
4665     array->set_length(Smi::FromInt(0));
4666     array->set_elements(empty_fixed_array());
4667     return array;
4668   }
4669
4670   FixedArrayBase* elms;
4671   MaybeObject* maybe_elms = NULL;
4672   ElementsKind elements_kind = array->GetElementsKind();
4673   if (IsFastDoubleElementsKind(elements_kind)) {
4674     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4675       maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4676     } else {
4677       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4678       maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4679     }
4680   } else {
4681     ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4682     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4683       maybe_elms = AllocateUninitializedFixedArray(capacity);
4684     } else {
4685       ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4686       maybe_elms = AllocateFixedArrayWithHoles(capacity);
4687     }
4688   }
4689   if (!maybe_elms->To(&elms)) return maybe_elms;
4690
4691   array->set_elements(elms);
4692   array->set_length(Smi::FromInt(length));
4693   return array;
4694 }
4695
4696
4697 MaybeObject* Heap::AllocateJSArrayWithElements(
4698     FixedArrayBase* elements,
4699     ElementsKind elements_kind,
4700     int length,
4701     PretenureFlag pretenure) {
4702   MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4703   JSArray* array;
4704   if (!maybe_array->To(&array)) return maybe_array;
4705
4706   array->set_elements(elements);
4707   array->set_length(Smi::FromInt(length));
4708   array->ValidateElements();
4709   return array;
4710 }
4711
4712
4713 MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4714   // Allocate map.
4715   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4716   // maps. Will probably depend on the identity of the handler object, too.
4717   Map* map;
4718   MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4719   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4720   map->set_prototype(prototype);
4721
4722   // Allocate the proxy object.
4723   JSProxy* result;
4724   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4725   if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4726   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4727   result->set_handler(handler);
4728   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4729   return result;
4730 }
4731
4732
4733 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4734                                            Object* call_trap,
4735                                            Object* construct_trap,
4736                                            Object* prototype) {
4737   // Allocate map.
4738   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4739   // maps. Will probably depend on the identity of the handler object, too.
4740   Map* map;
4741   MaybeObject* maybe_map_obj =
4742       AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4743   if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4744   map->set_prototype(prototype);
4745
4746   // Allocate the proxy object.
4747   JSFunctionProxy* result;
4748   MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4749   if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4750   result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4751   result->set_handler(handler);
4752   result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4753   result->set_call_trap(call_trap);
4754   result->set_construct_trap(construct_trap);
4755   return result;
4756 }
4757
4758
4759 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
4760   ASSERT(constructor->has_initial_map());
4761   Map* map = constructor->initial_map();
4762   ASSERT(map->is_dictionary_map());
4763
4764   // Make sure no field properties are described in the initial map.
4765   // This guarantees us that normalizing the properties does not
4766   // require us to change property values to PropertyCells.
4767   ASSERT(map->NextFreePropertyIndex() == 0);
4768
4769   // Make sure we don't have a ton of pre-allocated slots in the
4770   // global objects. They will be unused once we normalize the object.
4771   ASSERT(map->unused_property_fields() == 0);
4772   ASSERT(map->inobject_properties() == 0);
4773
4774   // Initial size of the backing store to avoid resize of the storage during
4775   // bootstrapping. The size differs between the JS global object ad the
4776   // builtins object.
4777   int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512;
4778
4779   // Allocate a dictionary object for backing storage.
4780   NameDictionary* dictionary;
4781   MaybeObject* maybe_dictionary =
4782       NameDictionary::Allocate(
4783           this,
4784           map->NumberOfOwnDescriptors() * 2 + initial_size);
4785   if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary;
4786
4787   // The global object might be created from an object template with accessors.
4788   // Fill these accessors into the dictionary.
4789   DescriptorArray* descs = map->instance_descriptors();
4790   for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
4791     PropertyDetails details = descs->GetDetails(i);
4792     ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
4793     PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
4794     Object* value = descs->GetCallbacksObject(i);
4795     MaybeObject* maybe_value = AllocatePropertyCell(value);
4796     if (!maybe_value->ToObject(&value)) return maybe_value;
4797
4798     MaybeObject* maybe_added = dictionary->Add(descs->GetKey(i), value, d);
4799     if (!maybe_added->To(&dictionary)) return maybe_added;
4800   }
4801
4802   // Allocate the global object and initialize it with the backing store.
4803   JSObject* global;
4804   MaybeObject* maybe_global = Allocate(map, OLD_POINTER_SPACE);
4805   if (!maybe_global->To(&global)) return maybe_global;
4806
4807   InitializeJSObjectFromMap(global, dictionary, map);
4808
4809   // Create a new map for the global object.
4810   Map* new_map;
4811   MaybeObject* maybe_map = map->CopyDropDescriptors();
4812   if (!maybe_map->To(&new_map)) return maybe_map;
4813   new_map->set_dictionary_map(true);
4814
4815   // Set up the global object as a normalized object.
4816   global->set_map(new_map);
4817   global->set_properties(dictionary);
4818
4819   // Make sure result is a global object with properties in dictionary.
4820   ASSERT(global->IsGlobalObject());
4821   ASSERT(!global->HasFastProperties());
4822   return global;
4823 }
4824
4825
4826 MaybeObject* Heap::CopyJSObject(JSObject* source) {
4827   // Never used to copy functions.  If functions need to be copied we
4828   // have to be careful to clear the literals array.
4829   SLOW_ASSERT(!source->IsJSFunction());
4830
4831   // Make the clone.
4832   Map* map = source->map();
4833   int object_size = map->instance_size();
4834   Object* clone;
4835
4836   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4837
4838   // If we're forced to always allocate, we use the general allocation
4839   // functions which may leave us with an object in old space.
4840   if (always_allocate()) {
4841     { MaybeObject* maybe_clone =
4842           AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4843       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4844     }
4845     Address clone_address = HeapObject::cast(clone)->address();
4846     CopyBlock(clone_address,
4847               source->address(),
4848               object_size);
4849     // Update write barrier for all fields that lie beyond the header.
4850     RecordWrites(clone_address,
4851                  JSObject::kHeaderSize,
4852                  (object_size - JSObject::kHeaderSize) / kPointerSize);
4853   } else {
4854     wb_mode = SKIP_WRITE_BARRIER;
4855
4856     { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
4857       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4858     }
4859     SLOW_ASSERT(InNewSpace(clone));
4860     // Since we know the clone is allocated in new space, we can copy
4861     // the contents without worrying about updating the write barrier.
4862     CopyBlock(HeapObject::cast(clone)->address(),
4863               source->address(),
4864               object_size);
4865   }
4866
4867   SLOW_ASSERT(
4868       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4869   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4870   FixedArray* properties = FixedArray::cast(source->properties());
4871   // Update elements if necessary.
4872   if (elements->length() > 0) {
4873     Object* elem;
4874     { MaybeObject* maybe_elem;
4875       if (elements->map() == fixed_cow_array_map()) {
4876         maybe_elem = FixedArray::cast(elements);
4877       } else if (source->HasFastDoubleElements()) {
4878         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4879       } else {
4880         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4881       }
4882       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4883     }
4884     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4885   }
4886   // Update properties if necessary.
4887   if (properties->length() > 0) {
4888     Object* prop;
4889     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4890       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4891     }
4892     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4893   }
4894   // Return the new clone.
4895   return clone;
4896 }
4897
4898
4899 MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) {
4900   // Never used to copy functions.  If functions need to be copied we
4901   // have to be careful to clear the literals array.
4902   SLOW_ASSERT(!source->IsJSFunction());
4903
4904   // Make the clone.
4905   Map* map = source->map();
4906   int object_size = map->instance_size();
4907   Object* clone;
4908
4909   ASSERT(map->CanTrackAllocationSite());
4910   ASSERT(map->instance_type() == JS_ARRAY_TYPE);
4911   WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4912
4913   // If we're forced to always allocate, we use the general allocation
4914   // functions which may leave us with an object in old space.
4915   int adjusted_object_size = object_size;
4916   if (always_allocate()) {
4917     // We'll only track origin if we are certain to allocate in new space
4918     const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4;
4919     if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) {
4920       adjusted_object_size += AllocationSiteInfo::kSize;
4921     }
4922
4923     { MaybeObject* maybe_clone =
4924           AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4925       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4926     }
4927     Address clone_address = HeapObject::cast(clone)->address();
4928     CopyBlock(clone_address,
4929               source->address(),
4930               object_size);
4931     // Update write barrier for all fields that lie beyond the header.
4932     int write_barrier_offset = adjusted_object_size > object_size
4933         ? JSArray::kSize + AllocationSiteInfo::kSize
4934         : JSObject::kHeaderSize;
4935     if (((object_size - write_barrier_offset) / kPointerSize) > 0) {
4936       RecordWrites(clone_address,
4937                    write_barrier_offset,
4938                    (object_size - write_barrier_offset) / kPointerSize);
4939     }
4940
4941     // Track allocation site information, if we failed to allocate it inline.
4942     if (InNewSpace(clone) &&
4943         adjusted_object_size == object_size) {
4944       MaybeObject* maybe_alloc_info =
4945           AllocateStruct(ALLOCATION_SITE_INFO_TYPE);
4946       AllocationSiteInfo* alloc_info;
4947       if (maybe_alloc_info->To(&alloc_info)) {
4948         alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4949         alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4950       }
4951     }
4952   } else {
4953     wb_mode = SKIP_WRITE_BARRIER;
4954     adjusted_object_size += AllocationSiteInfo::kSize;
4955
4956     { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4957       if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4958     }
4959     SLOW_ASSERT(InNewSpace(clone));
4960     // Since we know the clone is allocated in new space, we can copy
4961     // the contents without worrying about updating the write barrier.
4962     CopyBlock(HeapObject::cast(clone)->address(),
4963               source->address(),
4964               object_size);
4965   }
4966
4967   if (adjusted_object_size > object_size) {
4968     AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>(
4969         reinterpret_cast<Address>(clone) + object_size);
4970     alloc_info->set_map_no_write_barrier(allocation_site_info_map());
4971     alloc_info->set_payload(source, SKIP_WRITE_BARRIER);
4972   }
4973
4974   SLOW_ASSERT(
4975       JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4976   FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4977   FixedArray* properties = FixedArray::cast(source->properties());
4978   // Update elements if necessary.
4979   if (elements->length() > 0) {
4980     Object* elem;
4981     { MaybeObject* maybe_elem;
4982       if (elements->map() == fixed_cow_array_map()) {
4983         maybe_elem = FixedArray::cast(elements);
4984       } else if (source->HasFastDoubleElements()) {
4985         maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4986       } else {
4987         maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4988       }
4989       if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4990     }
4991     JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4992   }
4993   // Update properties if necessary.
4994   if (properties->length() > 0) {
4995     Object* prop;
4996     { MaybeObject* maybe_prop = CopyFixedArray(properties);
4997       if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4998     }
4999     JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
5000   }
5001   // Return the new clone.
5002   return clone;
5003 }
5004
5005
5006 MaybeObject* Heap::ReinitializeJSReceiver(
5007     JSReceiver* object, InstanceType type, int size) {
5008   ASSERT(type >= FIRST_JS_OBJECT_TYPE);
5009
5010   // Allocate fresh map.
5011   // TODO(rossberg): Once we optimize proxies, cache these maps.
5012   Map* map;
5013   MaybeObject* maybe = AllocateMap(type, size);
5014   if (!maybe->To<Map>(&map)) return maybe;
5015
5016   // Check that the receiver has at least the size of the fresh object.
5017   int size_difference = object->map()->instance_size() - map->instance_size();
5018   ASSERT(size_difference >= 0);
5019
5020   map->set_prototype(object->map()->prototype());
5021
5022   // Allocate the backing storage for the properties.
5023   int prop_size = map->unused_property_fields() - map->inobject_properties();
5024   Object* properties;
5025   maybe = AllocateFixedArray(prop_size, TENURED);
5026   if (!maybe->ToObject(&properties)) return maybe;
5027
5028   // Functions require some allocation, which might fail here.
5029   SharedFunctionInfo* shared = NULL;
5030   if (type == JS_FUNCTION_TYPE) {
5031     String* name;
5032     maybe =
5033         InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
5034     if (!maybe->To<String>(&name)) return maybe;
5035     maybe = AllocateSharedFunctionInfo(name);
5036     if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
5037   }
5038
5039   // Because of possible retries of this function after failure,
5040   // we must NOT fail after this point, where we have changed the type!
5041
5042   // Reset the map for the object.
5043   object->set_map(map);
5044   JSObject* jsobj = JSObject::cast(object);
5045
5046   // Reinitialize the object from the constructor map.
5047   InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
5048
5049   // Functions require some minimal initialization.
5050   if (type == JS_FUNCTION_TYPE) {
5051     map->set_function_with_prototype(true);
5052     InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
5053     JSFunction::cast(object)->set_context(
5054         isolate()->context()->native_context());
5055   }
5056
5057   // Put in filler if the new object is smaller than the old.
5058   if (size_difference > 0) {
5059     CreateFillerObjectAt(
5060         object->address() + map->instance_size(), size_difference);
5061   }
5062
5063   return object;
5064 }
5065
5066
5067 MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5068                                              JSGlobalProxy* object) {
5069   ASSERT(constructor->has_initial_map());
5070   Map* map = constructor->initial_map();
5071
5072   // Check that the already allocated object has the same size and type as
5073   // objects allocated using the constructor.
5074   ASSERT(map->instance_size() == object->map()->instance_size());
5075   ASSERT(map->instance_type() == object->map()->instance_type());
5076
5077   // Allocate the backing storage for the properties.
5078   int prop_size = map->unused_property_fields() - map->inobject_properties();
5079   Object* properties;
5080   { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5081     if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5082   }
5083
5084   // Reset the map for the object.
5085   object->set_map(constructor->initial_map());
5086
5087   // Reinitialize the object from the constructor map.
5088   InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5089   return object;
5090 }
5091
5092
5093 MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5094                                            PretenureFlag pretenure) {
5095   int length = string.length();
5096   if (length == 1) {
5097     return Heap::LookupSingleCharacterStringFromCode(string[0]);
5098   }
5099   Object* result;
5100   { MaybeObject* maybe_result =
5101         AllocateRawOneByteString(string.length(), pretenure);
5102     if (!maybe_result->ToObject(&result)) return maybe_result;
5103   }
5104
5105   // Copy the characters into the new object.
5106   CopyChars(SeqOneByteString::cast(result)->GetChars(),
5107             string.start(),
5108             length);
5109   return result;
5110 }
5111
5112
5113 MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5114                                               int non_ascii_start,
5115                                               PretenureFlag pretenure) {
5116   // Continue counting the number of characters in the UTF-8 string, starting
5117   // from the first non-ascii character or word.
5118   Access<UnicodeCache::Utf8Decoder>
5119       decoder(isolate_->unicode_cache()->utf8_decoder());
5120   decoder->Reset(string.start() + non_ascii_start,
5121                  string.length() - non_ascii_start);
5122   int utf16_length = decoder->Utf16Length();
5123   ASSERT(utf16_length > 0);
5124   // Allocate string.
5125   Object* result;
5126   {
5127     int chars = non_ascii_start + utf16_length;
5128     MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5129     if (!maybe_result->ToObject(&result)) return maybe_result;
5130   }
5131   // Convert and copy the characters into the new object.
5132   SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5133   // Copy ascii portion.
5134   uint16_t* data = twobyte->GetChars();
5135   if (non_ascii_start != 0) {
5136     const char* ascii_data = string.start();
5137     for (int i = 0; i < non_ascii_start; i++) {
5138       *data++ = *ascii_data++;
5139     }
5140   }
5141   // Now write the remainder.
5142   decoder->WriteUtf16(data, utf16_length);
5143   return result;
5144 }
5145
5146
5147 MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5148                                              PretenureFlag pretenure) {
5149   // Check if the string is an ASCII string.
5150   Object* result;
5151   int length = string.length();
5152   const uc16* start = string.start();
5153
5154   if (String::IsOneByte(start, length)) {
5155     MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5156     if (!maybe_result->ToObject(&result)) return maybe_result;
5157     CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5158   } else {  // It's not a one byte string.
5159     MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5160     if (!maybe_result->ToObject(&result)) return maybe_result;
5161     CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5162   }
5163   return result;
5164 }
5165
5166
5167 Map* Heap::InternalizedStringMapForString(String* string) {
5168   // If the string is in new space it cannot be used as internalized.
5169   if (InNewSpace(string)) return NULL;
5170
5171   // Find the corresponding internalized string map for strings.
5172   switch (string->map()->instance_type()) {
5173     case STRING_TYPE: return internalized_string_map();
5174     case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5175     case CONS_STRING_TYPE: return cons_internalized_string_map();
5176     case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5177     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5178     case EXTERNAL_ASCII_STRING_TYPE:
5179       return external_ascii_internalized_string_map();
5180     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5181       return external_internalized_string_with_one_byte_data_map();
5182     case SHORT_EXTERNAL_STRING_TYPE:
5183       return short_external_internalized_string_map();
5184     case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5185       return short_external_ascii_internalized_string_map();
5186     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5187       return short_external_internalized_string_with_one_byte_data_map();
5188     default: return NULL;  // No match found.
5189   }
5190 }
5191
5192
5193 static inline void WriteOneByteData(Vector<const char> vector,
5194                                     uint8_t* chars,
5195                                     int len) {
5196   // Only works for ascii.
5197   ASSERT(vector.length() == len);
5198   OS::MemCopy(chars, vector.start(), len);
5199 }
5200
5201 static inline void WriteTwoByteData(Vector<const char> vector,
5202                                     uint16_t* chars,
5203                                     int len) {
5204   const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5205   unsigned stream_length = vector.length();
5206   while (stream_length != 0) {
5207     unsigned consumed = 0;
5208     uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5209     ASSERT(c != unibrow::Utf8::kBadChar);
5210     ASSERT(consumed <= stream_length);
5211     stream_length -= consumed;
5212     stream += consumed;
5213     if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5214       len -= 2;
5215       if (len < 0) break;
5216       *chars++ = unibrow::Utf16::LeadSurrogate(c);
5217       *chars++ = unibrow::Utf16::TrailSurrogate(c);
5218     } else {
5219       len -= 1;
5220       if (len < 0) break;
5221       *chars++ = c;
5222     }
5223   }
5224   ASSERT(stream_length == 0);
5225   ASSERT(len == 0);
5226 }
5227
5228
5229 static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5230   ASSERT(s->length() == len);
5231   String::WriteToFlat(s, chars, 0, len);
5232 }
5233
5234 static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5235   ASSERT(s->length() == len);
5236   String::WriteToFlat(s, chars, 0, len);
5237 }
5238
5239
5240 template<bool is_one_byte, typename T>
5241 MaybeObject* Heap::AllocateInternalizedStringImpl(
5242     T t, int chars, uint32_t hash_field) {
5243   ASSERT(chars >= 0);
5244   // Compute map and object size.
5245   int size;
5246   Map* map;
5247
5248   if (is_one_byte) {
5249     if (chars > SeqOneByteString::kMaxLength) {
5250       return Failure::OutOfMemoryException(0x9);
5251     }
5252     map = ascii_internalized_string_map();
5253     size = SeqOneByteString::SizeFor(chars);
5254   } else {
5255     if (chars > SeqTwoByteString::kMaxLength) {
5256       return Failure::OutOfMemoryException(0xa);
5257     }
5258     map = internalized_string_map();
5259     size = SeqTwoByteString::SizeFor(chars);
5260   }
5261
5262   // Allocate string.
5263   Object* result;
5264   { MaybeObject* maybe_result = (size > Page::kMaxNonCodeHeapObjectSize)
5265                    ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
5266                    : old_data_space_->AllocateRaw(size);
5267     if (!maybe_result->ToObject(&result)) return maybe_result;
5268   }
5269
5270   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5271   // Set length and hash fields of the allocated string.
5272   String* answer = String::cast(result);
5273   answer->set_length(chars);
5274   answer->set_hash_field(hash_field);
5275
5276   ASSERT_EQ(size, answer->Size());
5277
5278   if (is_one_byte) {
5279     WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5280   } else {
5281     WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5282   }
5283   return answer;
5284 }
5285
5286
5287 // Need explicit instantiations.
5288 template
5289 MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5290 template
5291 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5292     String*, int, uint32_t);
5293 template
5294 MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5295     Vector<const char>, int, uint32_t);
5296
5297
5298 MaybeObject* Heap::AllocateRawOneByteString(int length,
5299                                             PretenureFlag pretenure) {
5300   if (length < 0 || length > SeqOneByteString::kMaxLength) {
5301     return Failure::OutOfMemoryException(0xb);
5302   }
5303
5304   int size = SeqOneByteString::SizeFor(length);
5305   ASSERT(size <= SeqOneByteString::kMaxSize);
5306
5307   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5308   AllocationSpace retry_space = OLD_DATA_SPACE;
5309
5310   if (space == NEW_SPACE) {
5311     if (size > kMaxObjectSizeInNewSpace) {
5312       // Allocate in large object space, retry space will be ignored.
5313       space = LO_SPACE;
5314     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5315       // Allocate in new space, retry in large object space.
5316       retry_space = LO_SPACE;
5317     }
5318   } else if (space == OLD_DATA_SPACE &&
5319              size > Page::kMaxNonCodeHeapObjectSize) {
5320     space = LO_SPACE;
5321   }
5322   Object* result;
5323   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5324     if (!maybe_result->ToObject(&result)) return maybe_result;
5325   }
5326
5327   // Partially initialize the object.
5328   HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5329   String::cast(result)->set_length(length);
5330   String::cast(result)->set_hash_field(String::kEmptyHashField);
5331   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5332
5333   return result;
5334 }
5335
5336
5337 MaybeObject* Heap::AllocateRawTwoByteString(int length,
5338                                             PretenureFlag pretenure) {
5339   if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5340     return Failure::OutOfMemoryException(0xc);
5341   }
5342   int size = SeqTwoByteString::SizeFor(length);
5343   ASSERT(size <= SeqTwoByteString::kMaxSize);
5344   AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5345   AllocationSpace retry_space = OLD_DATA_SPACE;
5346
5347   if (space == NEW_SPACE) {
5348     if (size > kMaxObjectSizeInNewSpace) {
5349       // Allocate in large object space, retry space will be ignored.
5350       space = LO_SPACE;
5351     } else if (size > Page::kMaxNonCodeHeapObjectSize) {
5352       // Allocate in new space, retry in large object space.
5353       retry_space = LO_SPACE;
5354     }
5355   } else if (space == OLD_DATA_SPACE &&
5356              size > Page::kMaxNonCodeHeapObjectSize) {
5357     space = LO_SPACE;
5358   }
5359   Object* result;
5360   { MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
5361     if (!maybe_result->ToObject(&result)) return maybe_result;
5362   }
5363
5364   // Partially initialize the object.
5365   HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5366   String::cast(result)->set_length(length);
5367   String::cast(result)->set_hash_field(String::kEmptyHashField);
5368   ASSERT_EQ(size, HeapObject::cast(result)->Size());
5369   return result;
5370 }
5371
5372
5373 MaybeObject* Heap::AllocateJSArray(
5374     ElementsKind elements_kind,
5375     PretenureFlag pretenure) {
5376   Context* native_context = isolate()->context()->native_context();
5377   JSFunction* array_function = native_context->array_function();
5378   Map* map = array_function->initial_map();
5379   Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5380   if (transition_map != NULL) map = transition_map;
5381   return AllocateJSObjectFromMap(map, pretenure);
5382 }
5383
5384
5385 MaybeObject* Heap::AllocateJSArrayWithAllocationSite(
5386     ElementsKind elements_kind,
5387     Handle<Object> allocation_site_info_payload) {
5388   Context* native_context = isolate()->context()->native_context();
5389   JSFunction* array_function = native_context->array_function();
5390   Map* map = array_function->initial_map();
5391   Object* maybe_map_array = native_context->js_array_maps();
5392   if (!maybe_map_array->IsUndefined()) {
5393     Object* maybe_transitioned_map =
5394         FixedArray::cast(maybe_map_array)->get(elements_kind);
5395     if (!maybe_transitioned_map->IsUndefined()) {
5396       map = Map::cast(maybe_transitioned_map);
5397     }
5398   }
5399   return AllocateJSObjectFromMapWithAllocationSite(map,
5400       allocation_site_info_payload);
5401 }
5402
5403
5404 MaybeObject* Heap::AllocateEmptyFixedArray() {
5405   int size = FixedArray::SizeFor(0);
5406   Object* result;
5407   { MaybeObject* maybe_result =
5408         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5409     if (!maybe_result->ToObject(&result)) return maybe_result;
5410   }
5411   // Initialize the object.
5412   reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5413       fixed_array_map());
5414   reinterpret_cast<FixedArray*>(result)->set_length(0);
5415   return result;
5416 }
5417
5418 MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5419   return AllocateExternalArray(0, array_type, NULL, TENURED);
5420 }
5421
5422
5423 MaybeObject* Heap::AllocateRawFixedArray(int length) {
5424   if (length < 0 || length > FixedArray::kMaxLength) {
5425     return Failure::OutOfMemoryException(0xd);
5426   }
5427   ASSERT(length > 0);
5428   // Use the general function if we're forced to always allocate.
5429   if (always_allocate()) return AllocateFixedArray(length, TENURED);
5430   // Allocate the raw data for a fixed array.
5431   int size = FixedArray::SizeFor(length);
5432   return size <= kMaxObjectSizeInNewSpace
5433       ? new_space_.AllocateRaw(size)
5434       : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
5435 }
5436
5437
5438 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5439   int len = src->length();
5440   Object* obj;
5441   { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
5442     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5443   }
5444   if (InNewSpace(obj)) {
5445     HeapObject* dst = HeapObject::cast(obj);
5446     dst->set_map_no_write_barrier(map);
5447     CopyBlock(dst->address() + kPointerSize,
5448               src->address() + kPointerSize,
5449               FixedArray::SizeFor(len) - kPointerSize);
5450     return obj;
5451   }
5452   HeapObject::cast(obj)->set_map_no_write_barrier(map);
5453   FixedArray* result = FixedArray::cast(obj);
5454   result->set_length(len);
5455
5456   // Copy the content
5457   DisallowHeapAllocation no_gc;
5458   WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5459   for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5460   return result;
5461 }
5462
5463
5464 MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5465                                                Map* map) {
5466   int len = src->length();
5467   Object* obj;
5468   { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5469     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5470   }
5471   HeapObject* dst = HeapObject::cast(obj);
5472   dst->set_map_no_write_barrier(map);
5473   CopyBlock(
5474       dst->address() + FixedDoubleArray::kLengthOffset,
5475       src->address() + FixedDoubleArray::kLengthOffset,
5476       FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5477   return obj;
5478 }
5479
5480
5481 MaybeObject* Heap::AllocateFixedArray(int length) {
5482   ASSERT(length >= 0);
5483   if (length == 0) return empty_fixed_array();
5484   Object* result;
5485   { MaybeObject* maybe_result = AllocateRawFixedArray(length);
5486     if (!maybe_result->ToObject(&result)) return maybe_result;
5487   }
5488   // Initialize header.
5489   FixedArray* array = reinterpret_cast<FixedArray*>(result);
5490   array->set_map_no_write_barrier(fixed_array_map());
5491   array->set_length(length);
5492   // Initialize body.
5493   ASSERT(!InNewSpace(undefined_value()));
5494   MemsetPointer(array->data_start(), undefined_value(), length);
5495   return result;
5496 }
5497
5498
5499 MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5500   if (length < 0 || length > FixedArray::kMaxLength) {
5501     return Failure::OutOfMemoryException(0xe);
5502   }
5503
5504   AllocationSpace space =
5505       (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
5506   int size = FixedArray::SizeFor(length);
5507   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5508     // Too big for new space.
5509     space = LO_SPACE;
5510   } else if (space == OLD_POINTER_SPACE &&
5511              size > Page::kMaxNonCodeHeapObjectSize) {
5512     // Too big for old pointer space.
5513     space = LO_SPACE;
5514   }
5515
5516   AllocationSpace retry_space =
5517       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_POINTER_SPACE : LO_SPACE;
5518
5519   return AllocateRaw(size, space, retry_space);
5520 }
5521
5522
5523 MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
5524     Heap* heap,
5525     int length,
5526     PretenureFlag pretenure,
5527     Object* filler) {
5528   ASSERT(length >= 0);
5529   ASSERT(heap->empty_fixed_array()->IsFixedArray());
5530   if (length == 0) return heap->empty_fixed_array();
5531
5532   ASSERT(!heap->InNewSpace(filler));
5533   Object* result;
5534   { MaybeObject* maybe_result = heap->AllocateRawFixedArray(length, pretenure);
5535     if (!maybe_result->ToObject(&result)) return maybe_result;
5536   }
5537
5538   HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
5539   FixedArray* array = FixedArray::cast(result);
5540   array->set_length(length);
5541   MemsetPointer(array->data_start(), filler, length);
5542   return array;
5543 }
5544
5545
5546 MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5547   return AllocateFixedArrayWithFiller(this,
5548                                       length,
5549                                       pretenure,
5550                                       undefined_value());
5551 }
5552
5553
5554 MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5555                                                PretenureFlag pretenure) {
5556   return AllocateFixedArrayWithFiller(this,
5557                                       length,
5558                                       pretenure,
5559                                       the_hole_value());
5560 }
5561
5562
5563 MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5564   if (length == 0) return empty_fixed_array();
5565
5566   Object* obj;
5567   { MaybeObject* maybe_obj = AllocateRawFixedArray(length);
5568     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5569   }
5570
5571   reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5572       fixed_array_map());
5573   FixedArray::cast(obj)->set_length(length);
5574   return obj;
5575 }
5576
5577
5578 MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5579   int size = FixedDoubleArray::SizeFor(0);
5580   Object* result;
5581   { MaybeObject* maybe_result =
5582         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5583     if (!maybe_result->ToObject(&result)) return maybe_result;
5584   }
5585   // Initialize the object.
5586   reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5587       fixed_double_array_map());
5588   reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5589   return result;
5590 }
5591
5592
5593 MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5594     int length,
5595     PretenureFlag pretenure) {
5596   if (length == 0) return empty_fixed_array();
5597
5598   Object* elements_object;
5599   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5600   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5601   FixedDoubleArray* elements =
5602       reinterpret_cast<FixedDoubleArray*>(elements_object);
5603
5604   elements->set_map_no_write_barrier(fixed_double_array_map());
5605   elements->set_length(length);
5606   return elements;
5607 }
5608
5609
5610 MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5611     int length,
5612     PretenureFlag pretenure) {
5613   if (length == 0) return empty_fixed_array();
5614
5615   Object* elements_object;
5616   MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5617   if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5618   FixedDoubleArray* elements =
5619       reinterpret_cast<FixedDoubleArray*>(elements_object);
5620
5621   for (int i = 0; i < length; ++i) {
5622     elements->set_the_hole(i);
5623   }
5624
5625   elements->set_map_no_write_barrier(fixed_double_array_map());
5626   elements->set_length(length);
5627   return elements;
5628 }
5629
5630
5631 MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5632                                                PretenureFlag pretenure) {
5633   if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5634     return Failure::OutOfMemoryException(0xf);
5635   }
5636
5637   AllocationSpace space =
5638       (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
5639   int size = FixedDoubleArray::SizeFor(length);
5640
5641 #ifndef V8_HOST_ARCH_64_BIT
5642   size += kPointerSize;
5643 #endif
5644
5645   if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
5646     // Too big for new space.
5647     space = LO_SPACE;
5648   } else if (space == OLD_DATA_SPACE &&
5649              size > Page::kMaxNonCodeHeapObjectSize) {
5650     // Too big for old data space.
5651     space = LO_SPACE;
5652   }
5653
5654   AllocationSpace retry_space =
5655       (size <= Page::kMaxNonCodeHeapObjectSize) ? OLD_DATA_SPACE : LO_SPACE;
5656
5657   HeapObject* object;
5658   { MaybeObject* maybe_object = AllocateRaw(size, space, retry_space);
5659     if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5660   }
5661
5662   return EnsureDoubleAligned(this, object, size);
5663 }
5664
5665
5666 MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5667   Object* result;
5668   { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5669     if (!maybe_result->ToObject(&result)) return maybe_result;
5670   }
5671   reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5672       hash_table_map());
5673   ASSERT(result->IsHashTable());
5674   return result;
5675 }
5676
5677
5678 MaybeObject* Heap::AllocateSymbol() {
5679   // Statically ensure that it is safe to allocate symbols in paged spaces.
5680   STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5681
5682   Object* result;
5683   MaybeObject* maybe =
5684       AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5685   if (!maybe->ToObject(&result)) return maybe;
5686
5687   HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5688
5689   // Generate a random hash value.
5690   int hash;
5691   int attempts = 0;
5692   do {
5693     hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask;
5694     attempts++;
5695   } while (hash == 0 && attempts < 30);
5696   if (hash == 0) hash = 1;  // never return 0
5697
5698   Symbol::cast(result)->set_hash_field(
5699       Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5700   Symbol::cast(result)->set_name(undefined_value());
5701
5702   ASSERT(result->IsSymbol());
5703   return result;
5704 }
5705
5706
5707 MaybeObject* Heap::AllocateNativeContext() {
5708   Object* result;
5709   { MaybeObject* maybe_result =
5710         AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5711     if (!maybe_result->ToObject(&result)) return maybe_result;
5712   }
5713   Context* context = reinterpret_cast<Context*>(result);
5714   context->set_map_no_write_barrier(native_context_map());
5715   context->set_js_array_maps(undefined_value());
5716   ASSERT(context->IsNativeContext());
5717   ASSERT(result->IsContext());
5718   return result;
5719 }
5720
5721
5722 MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5723                                          ScopeInfo* scope_info) {
5724   Object* result;
5725   { MaybeObject* maybe_result =
5726         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5727     if (!maybe_result->ToObject(&result)) return maybe_result;
5728   }
5729   Context* context = reinterpret_cast<Context*>(result);
5730   context->set_map_no_write_barrier(global_context_map());
5731   context->set_closure(function);
5732   context->set_previous(function->context());
5733   context->set_extension(scope_info);
5734   context->set_global_object(function->context()->global_object());
5735   ASSERT(context->IsGlobalContext());
5736   ASSERT(result->IsContext());
5737   return context;
5738 }
5739
5740
5741 MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5742   Object* result;
5743   { MaybeObject* maybe_result =
5744         AllocateFixedArray(scope_info->ContextLength(), TENURED);
5745     if (!maybe_result->ToObject(&result)) return maybe_result;
5746   }
5747   Context* context = reinterpret_cast<Context*>(result);
5748   context->set_map_no_write_barrier(module_context_map());
5749   // Instance link will be set later.
5750   context->set_extension(Smi::FromInt(0));
5751   return context;
5752 }
5753
5754
5755 MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5756   ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5757   Object* result;
5758   { MaybeObject* maybe_result = AllocateFixedArray(length);
5759     if (!maybe_result->ToObject(&result)) return maybe_result;
5760   }
5761   Context* context = reinterpret_cast<Context*>(result);
5762   context->set_map_no_write_barrier(function_context_map());
5763   context->set_closure(function);
5764   context->set_previous(function->context());
5765   context->set_extension(Smi::FromInt(0));
5766   context->set_global_object(function->context()->global_object());
5767   return context;
5768 }
5769
5770
5771 MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5772                                         Context* previous,
5773                                         String* name,
5774                                         Object* thrown_object) {
5775   STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5776   Object* result;
5777   { MaybeObject* maybe_result =
5778         AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5779     if (!maybe_result->ToObject(&result)) return maybe_result;
5780   }
5781   Context* context = reinterpret_cast<Context*>(result);
5782   context->set_map_no_write_barrier(catch_context_map());
5783   context->set_closure(function);
5784   context->set_previous(previous);
5785   context->set_extension(name);
5786   context->set_global_object(previous->global_object());
5787   context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5788   return context;
5789 }
5790
5791
5792 MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5793                                        Context* previous,
5794                                        JSObject* extension) {
5795   Object* result;
5796   { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5797     if (!maybe_result->ToObject(&result)) return maybe_result;
5798   }
5799   Context* context = reinterpret_cast<Context*>(result);
5800   context->set_map_no_write_barrier(with_context_map());
5801   context->set_closure(function);
5802   context->set_previous(previous);
5803   context->set_extension(extension);
5804   context->set_global_object(previous->global_object());
5805   return context;
5806 }
5807
5808
5809 MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5810                                         Context* previous,
5811                                         ScopeInfo* scope_info) {
5812   Object* result;
5813   { MaybeObject* maybe_result =
5814         AllocateFixedArrayWithHoles(scope_info->ContextLength());
5815     if (!maybe_result->ToObject(&result)) return maybe_result;
5816   }
5817   Context* context = reinterpret_cast<Context*>(result);
5818   context->set_map_no_write_barrier(block_context_map());
5819   context->set_closure(function);
5820   context->set_previous(previous);
5821   context->set_extension(scope_info);
5822   context->set_global_object(previous->global_object());
5823   return context;
5824 }
5825
5826
5827 MaybeObject* Heap::AllocateScopeInfo(int length) {
5828   FixedArray* scope_info;
5829   MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5830   if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5831   scope_info->set_map_no_write_barrier(scope_info_map());
5832   return scope_info;
5833 }
5834
5835
5836 MaybeObject* Heap::AllocateExternal(void* value) {
5837   Foreign* foreign;
5838   { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5839     if (!maybe_result->To(&foreign)) return maybe_result;
5840   }
5841   JSObject* external;
5842   { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5843     if (!maybe_result->To(&external)) return maybe_result;
5844   }
5845   external->SetInternalField(0, foreign);
5846   return external;
5847 }
5848
5849
5850 MaybeObject* Heap::AllocateStruct(InstanceType type) {
5851   Map* map;
5852   switch (type) {
5853 #define MAKE_CASE(NAME, Name, name) \
5854     case NAME##_TYPE: map = name##_map(); break;
5855 STRUCT_LIST(MAKE_CASE)
5856 #undef MAKE_CASE
5857     default:
5858       UNREACHABLE();
5859       return Failure::InternalError();
5860   }
5861   int size = map->instance_size();
5862   AllocationSpace space =
5863       (size > Page::kMaxNonCodeHeapObjectSize) ? LO_SPACE : OLD_POINTER_SPACE;
5864   Object* result;
5865   { MaybeObject* maybe_result = Allocate(map, space);
5866     if (!maybe_result->ToObject(&result)) return maybe_result;
5867   }
5868   Struct::cast(result)->InitializeBody(size);
5869   return result;
5870 }
5871
5872
5873 bool Heap::IsHeapIterable() {
5874   return (!old_pointer_space()->was_swept_conservatively() &&
5875           !old_data_space()->was_swept_conservatively());
5876 }
5877
5878
5879 void Heap::EnsureHeapIsIterable() {
5880   ASSERT(AllowHeapAllocation::IsAllowed());
5881   if (!IsHeapIterable()) {
5882     CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5883   }
5884   ASSERT(IsHeapIterable());
5885 }
5886
5887
5888 void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5889   incremental_marking()->Step(step_size,
5890                               IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5891
5892   if (incremental_marking()->IsComplete()) {
5893     bool uncommit = false;
5894     if (gc_count_at_last_idle_gc_ == gc_count_) {
5895       // No GC since the last full GC, the mutator is probably not active.
5896       isolate_->compilation_cache()->Clear();
5897       uncommit = true;
5898     }
5899     CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5900     mark_sweeps_since_idle_round_started_++;
5901     gc_count_at_last_idle_gc_ = gc_count_;
5902     if (uncommit) {
5903       new_space_.Shrink();
5904       UncommitFromSpace();
5905     }
5906   }
5907 }
5908
5909
5910 bool Heap::IdleNotification(int hint) {
5911   // Hints greater than this value indicate that
5912   // the embedder is requesting a lot of GC work.
5913   const int kMaxHint = 1000;
5914   const int kMinHintForIncrementalMarking = 10;
5915   // Minimal hint that allows to do full GC.
5916   const int kMinHintForFullGC = 100;
5917   intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5918   // The size factor is in range [5..250]. The numbers here are chosen from
5919   // experiments. If you changes them, make sure to test with
5920   // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5921   intptr_t step_size =
5922       size_factor * IncrementalMarking::kAllocatedThreshold;
5923
5924   if (contexts_disposed_ > 0) {
5925     if (hint >= kMaxHint) {
5926       // The embedder is requesting a lot of GC work after context disposal,
5927       // we age inline caches so that they don't keep objects from
5928       // the old context alive.
5929       AgeInlineCaches();
5930     }
5931     int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5932     if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5933         incremental_marking()->IsStopped()) {
5934       HistogramTimerScope scope(isolate_->counters()->gc_context());
5935       CollectAllGarbage(kReduceMemoryFootprintMask,
5936                         "idle notification: contexts disposed");
5937     } else {
5938       AdvanceIdleIncrementalMarking(step_size);
5939       contexts_disposed_ = 0;
5940     }
5941     // After context disposal there is likely a lot of garbage remaining, reset
5942     // the idle notification counters in order to trigger more incremental GCs
5943     // on subsequent idle notifications.
5944     StartIdleRound();
5945     return false;
5946   }
5947
5948   if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5949     return IdleGlobalGC();
5950   }
5951
5952   // By doing small chunks of GC work in each IdleNotification,
5953   // perform a round of incremental GCs and after that wait until
5954   // the mutator creates enough garbage to justify a new round.
5955   // An incremental GC progresses as follows:
5956   // 1. many incremental marking steps,
5957   // 2. one old space mark-sweep-compact,
5958   // 3. many lazy sweep steps.
5959   // Use mark-sweep-compact events to count incremental GCs in a round.
5960
5961   if (incremental_marking()->IsStopped()) {
5962     if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5963         !IsSweepingComplete() &&
5964         !AdvanceSweepers(static_cast<int>(step_size))) {
5965       return false;
5966     }
5967   }
5968
5969   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5970     if (EnoughGarbageSinceLastIdleRound()) {
5971       StartIdleRound();
5972     } else {
5973       return true;
5974     }
5975   }
5976
5977   int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5978                               mark_sweeps_since_idle_round_started_;
5979
5980   if (incremental_marking()->IsStopped()) {
5981     // If there are no more than two GCs left in this idle round and we are
5982     // allowed to do a full GC, then make those GCs full in order to compact
5983     // the code space.
5984     // TODO(ulan): Once we enable code compaction for incremental marking,
5985     // we can get rid of this special case and always start incremental marking.
5986     if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5987       CollectAllGarbage(kReduceMemoryFootprintMask,
5988                         "idle notification: finalize idle round");
5989       mark_sweeps_since_idle_round_started_++;
5990     } else if (hint > kMinHintForIncrementalMarking) {
5991       incremental_marking()->Start();
5992     }
5993   }
5994   if (!incremental_marking()->IsStopped() &&
5995       hint > kMinHintForIncrementalMarking) {
5996     AdvanceIdleIncrementalMarking(step_size);
5997   }
5998
5999   if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
6000     FinishIdleRound();
6001     return true;
6002   }
6003
6004   return false;
6005 }
6006
6007
6008 bool Heap::IdleGlobalGC() {
6009   static const int kIdlesBeforeScavenge = 4;
6010   static const int kIdlesBeforeMarkSweep = 7;
6011   static const int kIdlesBeforeMarkCompact = 8;
6012   static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
6013   static const unsigned int kGCsBetweenCleanup = 4;
6014
6015   if (!last_idle_notification_gc_count_init_) {
6016     last_idle_notification_gc_count_ = gc_count_;
6017     last_idle_notification_gc_count_init_ = true;
6018   }
6019
6020   bool uncommit = true;
6021   bool finished = false;
6022
6023   // Reset the number of idle notifications received when a number of
6024   // GCs have taken place. This allows another round of cleanup based
6025   // on idle notifications if enough work has been carried out to
6026   // provoke a number of garbage collections.
6027   if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
6028     number_idle_notifications_ =
6029         Min(number_idle_notifications_ + 1, kMaxIdleCount);
6030   } else {
6031     number_idle_notifications_ = 0;
6032     last_idle_notification_gc_count_ = gc_count_;
6033   }
6034
6035   if (number_idle_notifications_ == kIdlesBeforeScavenge) {
6036     CollectGarbage(NEW_SPACE, "idle notification");
6037     new_space_.Shrink();
6038     last_idle_notification_gc_count_ = gc_count_;
6039   } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
6040     // Before doing the mark-sweep collections we clear the
6041     // compilation cache to avoid hanging on to source code and
6042     // generated code for cached functions.
6043     isolate_->compilation_cache()->Clear();
6044
6045     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6046     new_space_.Shrink();
6047     last_idle_notification_gc_count_ = gc_count_;
6048
6049   } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
6050     CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
6051     new_space_.Shrink();
6052     last_idle_notification_gc_count_ = gc_count_;
6053     number_idle_notifications_ = 0;
6054     finished = true;
6055   } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
6056     // If we have received more than kIdlesBeforeMarkCompact idle
6057     // notifications we do not perform any cleanup because we don't
6058     // expect to gain much by doing so.
6059     finished = true;
6060   }
6061
6062   if (uncommit) UncommitFromSpace();
6063
6064   return finished;
6065 }
6066
6067
6068 #ifdef DEBUG
6069
6070 void Heap::Print() {
6071   if (!HasBeenSetUp()) return;
6072   isolate()->PrintStack(stdout);
6073   AllSpaces spaces(this);
6074   for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
6075     space->Print();
6076   }
6077 }
6078
6079
6080 void Heap::ReportCodeStatistics(const char* title) {
6081   PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
6082   PagedSpace::ResetCodeStatistics();
6083   // We do not look for code in new space, map space, or old space.  If code
6084   // somehow ends up in those spaces, we would miss it here.
6085   code_space_->CollectCodeStatistics();
6086   lo_space_->CollectCodeStatistics();
6087   PagedSpace::ReportCodeStatistics();
6088 }
6089
6090
6091 // This function expects that NewSpace's allocated objects histogram is
6092 // populated (via a call to CollectStatistics or else as a side effect of a
6093 // just-completed scavenge collection).
6094 void Heap::ReportHeapStatistics(const char* title) {
6095   USE(title);
6096   PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
6097          title, gc_count_);
6098   PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
6099          old_generation_allocation_limit_);
6100
6101   PrintF("\n");
6102   PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
6103   isolate_->global_handles()->PrintStats();
6104   PrintF("\n");
6105
6106   PrintF("Heap statistics : ");
6107   isolate_->memory_allocator()->ReportStatistics();
6108   PrintF("To space : ");
6109   new_space_.ReportStatistics();
6110   PrintF("Old pointer space : ");
6111   old_pointer_space_->ReportStatistics();
6112   PrintF("Old data space : ");
6113   old_data_space_->ReportStatistics();
6114   PrintF("Code space : ");
6115   code_space_->ReportStatistics();
6116   PrintF("Map space : ");
6117   map_space_->ReportStatistics();
6118   PrintF("Cell space : ");
6119   cell_space_->ReportStatistics();
6120   PrintF("PropertyCell space : ");
6121   property_cell_space_->ReportStatistics();
6122   PrintF("Large object space : ");
6123   lo_space_->ReportStatistics();
6124   PrintF(">>>>>> ========================================= >>>>>>\n");
6125 }
6126
6127 #endif  // DEBUG
6128
6129 bool Heap::Contains(HeapObject* value) {
6130   return Contains(value->address());
6131 }
6132
6133
6134 bool Heap::Contains(Address addr) {
6135   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6136   return HasBeenSetUp() &&
6137     (new_space_.ToSpaceContains(addr) ||
6138      old_pointer_space_->Contains(addr) ||
6139      old_data_space_->Contains(addr) ||
6140      code_space_->Contains(addr) ||
6141      map_space_->Contains(addr) ||
6142      cell_space_->Contains(addr) ||
6143      property_cell_space_->Contains(addr) ||
6144      lo_space_->SlowContains(addr));
6145 }
6146
6147
6148 bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6149   return InSpace(value->address(), space);
6150 }
6151
6152
6153 bool Heap::InSpace(Address addr, AllocationSpace space) {
6154   if (OS::IsOutsideAllocatedSpace(addr)) return false;
6155   if (!HasBeenSetUp()) return false;
6156
6157   switch (space) {
6158     case NEW_SPACE:
6159       return new_space_.ToSpaceContains(addr);
6160     case OLD_POINTER_SPACE:
6161       return old_pointer_space_->Contains(addr);
6162     case OLD_DATA_SPACE:
6163       return old_data_space_->Contains(addr);
6164     case CODE_SPACE:
6165       return code_space_->Contains(addr);
6166     case MAP_SPACE:
6167       return map_space_->Contains(addr);
6168     case CELL_SPACE:
6169       return cell_space_->Contains(addr);
6170     case PROPERTY_CELL_SPACE:
6171       return property_cell_space_->Contains(addr);
6172     case LO_SPACE:
6173       return lo_space_->SlowContains(addr);
6174   }
6175
6176   return false;
6177 }
6178
6179
6180 #ifdef VERIFY_HEAP
6181 void Heap::Verify() {
6182   CHECK(HasBeenSetUp());
6183
6184   store_buffer()->Verify();
6185
6186   VerifyPointersVisitor visitor;
6187   IterateRoots(&visitor, VISIT_ONLY_STRONG);
6188
6189   new_space_.Verify();
6190
6191   old_pointer_space_->Verify(&visitor);
6192   map_space_->Verify(&visitor);
6193
6194   VerifyPointersVisitor no_dirty_regions_visitor;
6195   old_data_space_->Verify(&no_dirty_regions_visitor);
6196   code_space_->Verify(&no_dirty_regions_visitor);
6197   cell_space_->Verify(&no_dirty_regions_visitor);
6198   property_cell_space_->Verify(&no_dirty_regions_visitor);
6199
6200   lo_space_->Verify();
6201 }
6202 #endif
6203
6204
6205 MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6206   Object* result = NULL;
6207   Object* new_table;
6208   { MaybeObject* maybe_new_table =
6209         string_table()->LookupUtf8String(string, &result);
6210     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6211   }
6212   // Can't use set_string_table because StringTable::cast knows that
6213   // StringTable is a singleton and checks for identity.
6214   roots_[kStringTableRootIndex] = new_table;
6215   ASSERT(result != NULL);
6216   return result;
6217 }
6218
6219
6220 MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6221   Object* result = NULL;
6222   Object* new_table;
6223   { MaybeObject* maybe_new_table =
6224         string_table()->LookupOneByteString(string, &result);
6225     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6226   }
6227   // Can't use set_string_table because StringTable::cast knows that
6228   // StringTable is a singleton and checks for identity.
6229   roots_[kStringTableRootIndex] = new_table;
6230   ASSERT(result != NULL);
6231   return result;
6232 }
6233
6234
6235 MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6236                                      int from,
6237                                      int length) {
6238   Object* result = NULL;
6239   Object* new_table;
6240   { MaybeObject* maybe_new_table =
6241         string_table()->LookupSubStringOneByteString(string,
6242                                                    from,
6243                                                    length,
6244                                                    &result);
6245     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6246   }
6247   // Can't use set_string_table because StringTable::cast knows that
6248   // StringTable is a singleton and checks for identity.
6249   roots_[kStringTableRootIndex] = new_table;
6250   ASSERT(result != NULL);
6251   return result;
6252 }
6253
6254
6255 MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6256   Object* result = NULL;
6257   Object* new_table;
6258   { MaybeObject* maybe_new_table =
6259         string_table()->LookupTwoByteString(string, &result);
6260     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6261   }
6262   // Can't use set_string_table because StringTable::cast knows that
6263   // StringTable is a singleton and checks for identity.
6264   roots_[kStringTableRootIndex] = new_table;
6265   ASSERT(result != NULL);
6266   return result;
6267 }
6268
6269
6270 MaybeObject* Heap::InternalizeString(String* string) {
6271   if (string->IsInternalizedString()) return string;
6272   Object* result = NULL;
6273   Object* new_table;
6274   { MaybeObject* maybe_new_table =
6275         string_table()->LookupString(string, &result);
6276     if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6277   }
6278   // Can't use set_string_table because StringTable::cast knows that
6279   // StringTable is a singleton and checks for identity.
6280   roots_[kStringTableRootIndex] = new_table;
6281   ASSERT(result != NULL);
6282   return result;
6283 }
6284
6285
6286 bool Heap::InternalizeStringIfExists(String* string, String** result) {
6287   if (string->IsInternalizedString()) {
6288     *result = string;
6289     return true;
6290   }
6291   return string_table()->LookupStringIfExists(string, result);
6292 }
6293
6294
6295 void Heap::ZapFromSpace() {
6296   NewSpacePageIterator it(new_space_.FromSpaceStart(),
6297                           new_space_.FromSpaceEnd());
6298   while (it.has_next()) {
6299     NewSpacePage* page = it.next();
6300     for (Address cursor = page->area_start(), limit = page->area_end();
6301          cursor < limit;
6302          cursor += kPointerSize) {
6303       Memory::Address_at(cursor) = kFromSpaceZapValue;
6304     }
6305   }
6306 }
6307
6308
6309 void Heap::IterateAndMarkPointersToFromSpace(Address start,
6310                                              Address end,
6311                                              ObjectSlotCallback callback) {
6312   Address slot_address = start;
6313
6314   // We are not collecting slots on new space objects during mutation
6315   // thus we have to scan for pointers to evacuation candidates when we
6316   // promote objects. But we should not record any slots in non-black
6317   // objects. Grey object's slots would be rescanned.
6318   // White object might not survive until the end of collection
6319   // it would be a violation of the invariant to record it's slots.
6320   bool record_slots = false;
6321   if (incremental_marking()->IsCompacting()) {
6322     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6323     record_slots = Marking::IsBlack(mark_bit);
6324   }
6325
6326   while (slot_address < end) {
6327     Object** slot = reinterpret_cast<Object**>(slot_address);
6328     Object* object = *slot;
6329     // If the store buffer becomes overfull we mark pages as being exempt from
6330     // the store buffer.  These pages are scanned to find pointers that point
6331     // to the new space.  In that case we may hit newly promoted objects and
6332     // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6333     if (object->IsHeapObject()) {
6334       if (Heap::InFromSpace(object)) {
6335         callback(reinterpret_cast<HeapObject**>(slot),
6336                  HeapObject::cast(object));
6337         Object* new_object = *slot;
6338         if (InNewSpace(new_object)) {
6339           SLOW_ASSERT(Heap::InToSpace(new_object));
6340           SLOW_ASSERT(new_object->IsHeapObject());
6341           store_buffer_.EnterDirectlyIntoStoreBuffer(
6342               reinterpret_cast<Address>(slot));
6343         }
6344         SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6345       } else if (record_slots &&
6346                  MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6347         mark_compact_collector()->RecordSlot(slot, slot, object);
6348       }
6349     }
6350     slot_address += kPointerSize;
6351   }
6352 }
6353
6354
6355 #ifdef DEBUG
6356 typedef bool (*CheckStoreBufferFilter)(Object** addr);
6357
6358
6359 bool IsAMapPointerAddress(Object** addr) {
6360   uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6361   int mod = a % Map::kSize;
6362   return mod >= Map::kPointerFieldsBeginOffset &&
6363          mod < Map::kPointerFieldsEndOffset;
6364 }
6365
6366
6367 bool EverythingsAPointer(Object** addr) {
6368   return true;
6369 }
6370
6371
6372 static void CheckStoreBuffer(Heap* heap,
6373                              Object** current,
6374                              Object** limit,
6375                              Object**** store_buffer_position,
6376                              Object*** store_buffer_top,
6377                              CheckStoreBufferFilter filter,
6378                              Address special_garbage_start,
6379                              Address special_garbage_end) {
6380   Map* free_space_map = heap->free_space_map();
6381   for ( ; current < limit; current++) {
6382     Object* o = *current;
6383     Address current_address = reinterpret_cast<Address>(current);
6384     // Skip free space.
6385     if (o == free_space_map) {
6386       Address current_address = reinterpret_cast<Address>(current);
6387       FreeSpace* free_space =
6388           FreeSpace::cast(HeapObject::FromAddress(current_address));
6389       int skip = free_space->Size();
6390       ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6391       ASSERT(skip > 0);
6392       current_address += skip - kPointerSize;
6393       current = reinterpret_cast<Object**>(current_address);
6394       continue;
6395     }
6396     // Skip the current linear allocation space between top and limit which is
6397     // unmarked with the free space map, but can contain junk.
6398     if (current_address == special_garbage_start &&
6399         special_garbage_end != special_garbage_start) {
6400       current_address = special_garbage_end - kPointerSize;
6401       current = reinterpret_cast<Object**>(current_address);
6402       continue;
6403     }
6404     if (!(*filter)(current)) continue;
6405     ASSERT(current_address < special_garbage_start ||
6406            current_address >= special_garbage_end);
6407     ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6408     // We have to check that the pointer does not point into new space
6409     // without trying to cast it to a heap object since the hash field of
6410     // a string can contain values like 1 and 3 which are tagged null
6411     // pointers.
6412     if (!heap->InNewSpace(o)) continue;
6413     while (**store_buffer_position < current &&
6414            *store_buffer_position < store_buffer_top) {
6415       (*store_buffer_position)++;
6416     }
6417     if (**store_buffer_position != current ||
6418         *store_buffer_position == store_buffer_top) {
6419       Object** obj_start = current;
6420       while (!(*obj_start)->IsMap()) obj_start--;
6421       UNREACHABLE();
6422     }
6423   }
6424 }
6425
6426
6427 // Check that the store buffer contains all intergenerational pointers by
6428 // scanning a page and ensuring that all pointers to young space are in the
6429 // store buffer.
6430 void Heap::OldPointerSpaceCheckStoreBuffer() {
6431   OldSpace* space = old_pointer_space();
6432   PageIterator pages(space);
6433
6434   store_buffer()->SortUniq();
6435
6436   while (pages.has_next()) {
6437     Page* page = pages.next();
6438     Object** current = reinterpret_cast<Object**>(page->area_start());
6439
6440     Address end = page->area_end();
6441
6442     Object*** store_buffer_position = store_buffer()->Start();
6443     Object*** store_buffer_top = store_buffer()->Top();
6444
6445     Object** limit = reinterpret_cast<Object**>(end);
6446     CheckStoreBuffer(this,
6447                      current,
6448                      limit,
6449                      &store_buffer_position,
6450                      store_buffer_top,
6451                      &EverythingsAPointer,
6452                      space->top(),
6453                      space->limit());
6454   }
6455 }
6456
6457
6458 void Heap::MapSpaceCheckStoreBuffer() {
6459   MapSpace* space = map_space();
6460   PageIterator pages(space);
6461
6462   store_buffer()->SortUniq();
6463
6464   while (pages.has_next()) {
6465     Page* page = pages.next();
6466     Object** current = reinterpret_cast<Object**>(page->area_start());
6467
6468     Address end = page->area_end();
6469
6470     Object*** store_buffer_position = store_buffer()->Start();
6471     Object*** store_buffer_top = store_buffer()->Top();
6472
6473     Object** limit = reinterpret_cast<Object**>(end);
6474     CheckStoreBuffer(this,
6475                      current,
6476                      limit,
6477                      &store_buffer_position,
6478                      store_buffer_top,
6479                      &IsAMapPointerAddress,
6480                      space->top(),
6481                      space->limit());
6482   }
6483 }
6484
6485
6486 void Heap::LargeObjectSpaceCheckStoreBuffer() {
6487   LargeObjectIterator it(lo_space());
6488   for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6489     // We only have code, sequential strings, or fixed arrays in large
6490     // object space, and only fixed arrays can possibly contain pointers to
6491     // the young generation.
6492     if (object->IsFixedArray()) {
6493       Object*** store_buffer_position = store_buffer()->Start();
6494       Object*** store_buffer_top = store_buffer()->Top();
6495       Object** current = reinterpret_cast<Object**>(object->address());
6496       Object** limit =
6497           reinterpret_cast<Object**>(object->address() + object->Size());
6498       CheckStoreBuffer(this,
6499                        current,
6500                        limit,
6501                        &store_buffer_position,
6502                        store_buffer_top,
6503                        &EverythingsAPointer,
6504                        NULL,
6505                        NULL);
6506     }
6507   }
6508 }
6509 #endif
6510
6511
6512 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6513   IterateStrongRoots(v, mode);
6514   IterateWeakRoots(v, mode);
6515 }
6516
6517
6518 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6519   v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6520   v->Synchronize(VisitorSynchronization::kStringTable);
6521   if (mode != VISIT_ALL_IN_SCAVENGE &&
6522       mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6523     // Scavenge collections have special processing for this.
6524     external_string_table_.Iterate(v);
6525     error_object_list_.Iterate(v);
6526   }
6527   v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6528 }
6529
6530
6531 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6532   v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6533   v->Synchronize(VisitorSynchronization::kStrongRootList);
6534
6535   v->VisitPointer(BitCast<Object**>(&hidden_string_));
6536   v->Synchronize(VisitorSynchronization::kInternalizedString);
6537
6538   isolate_->bootstrapper()->Iterate(v);
6539   v->Synchronize(VisitorSynchronization::kBootstrapper);
6540   isolate_->Iterate(v);
6541   v->Synchronize(VisitorSynchronization::kTop);
6542   Relocatable::Iterate(v);
6543   v->Synchronize(VisitorSynchronization::kRelocatable);
6544
6545 #ifdef ENABLE_DEBUGGER_SUPPORT
6546   isolate_->debug()->Iterate(v);
6547   if (isolate_->deoptimizer_data() != NULL) {
6548     isolate_->deoptimizer_data()->Iterate(v);
6549   }
6550 #endif
6551   v->Synchronize(VisitorSynchronization::kDebug);
6552   isolate_->compilation_cache()->Iterate(v);
6553   v->Synchronize(VisitorSynchronization::kCompilationCache);
6554
6555   // Iterate over local handles in handle scopes.
6556   isolate_->handle_scope_implementer()->Iterate(v);
6557   isolate_->IterateDeferredHandles(v);
6558   v->Synchronize(VisitorSynchronization::kHandleScope);
6559
6560   // Iterate over the builtin code objects and code stubs in the
6561   // heap. Note that it is not necessary to iterate over code objects
6562   // on scavenge collections.
6563   if (mode != VISIT_ALL_IN_SCAVENGE) {
6564     isolate_->builtins()->IterateBuiltins(v);
6565   }
6566   v->Synchronize(VisitorSynchronization::kBuiltins);
6567
6568   // Iterate over global handles.
6569   switch (mode) {
6570     case VISIT_ONLY_STRONG:
6571       isolate_->global_handles()->IterateStrongRoots(v);
6572       break;
6573     case VISIT_ALL_IN_SCAVENGE:
6574       isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6575       break;
6576     case VISIT_ALL_IN_SWEEP_NEWSPACE:
6577     case VISIT_ALL:
6578       isolate_->global_handles()->IterateAllRoots(v);
6579       break;
6580   }
6581   v->Synchronize(VisitorSynchronization::kGlobalHandles);
6582
6583   // Iterate over pointers being held by inactive threads.
6584   isolate_->thread_manager()->Iterate(v);
6585   v->Synchronize(VisitorSynchronization::kThreadManager);
6586
6587   // Iterate over the pointers the Serialization/Deserialization code is
6588   // holding.
6589   // During garbage collection this keeps the partial snapshot cache alive.
6590   // During deserialization of the startup snapshot this creates the partial
6591   // snapshot cache and deserializes the objects it refers to.  During
6592   // serialization this does nothing, since the partial snapshot cache is
6593   // empty.  However the next thing we do is create the partial snapshot,
6594   // filling up the partial snapshot cache with objects it needs as we go.
6595   SerializerDeserializer::Iterate(v);
6596   // We don't do a v->Synchronize call here, because in debug mode that will
6597   // output a flag to the snapshot.  However at this point the serializer and
6598   // deserializer are deliberately a little unsynchronized (see above) so the
6599   // checking of the sync flag in the snapshot would fail.
6600 }
6601
6602
6603 // TODO(1236194): Since the heap size is configurable on the command line
6604 // and through the API, we should gracefully handle the case that the heap
6605 // size is not big enough to fit all the initial objects.
6606 bool Heap::ConfigureHeap(int max_semispace_size,
6607                          intptr_t max_old_gen_size,
6608                          intptr_t max_executable_size) {
6609   if (HasBeenSetUp()) return false;
6610
6611   if (FLAG_stress_compaction) {
6612     // This will cause more frequent GCs when stressing.
6613     max_semispace_size_ = Page::kPageSize;
6614   }
6615
6616   if (max_semispace_size > 0) {
6617     if (max_semispace_size < Page::kPageSize) {
6618       max_semispace_size = Page::kPageSize;
6619       if (FLAG_trace_gc) {
6620         PrintPID("Max semispace size cannot be less than %dkbytes\n",
6621                  Page::kPageSize >> 10);
6622       }
6623     }
6624     max_semispace_size_ = max_semispace_size;
6625   }
6626
6627   if (Snapshot::IsEnabled()) {
6628     // If we are using a snapshot we always reserve the default amount
6629     // of memory for each semispace because code in the snapshot has
6630     // write-barrier code that relies on the size and alignment of new
6631     // space.  We therefore cannot use a larger max semispace size
6632     // than the default reserved semispace size.
6633     if (max_semispace_size_ > reserved_semispace_size_) {
6634       max_semispace_size_ = reserved_semispace_size_;
6635       if (FLAG_trace_gc) {
6636         PrintPID("Max semispace size cannot be more than %dkbytes\n",
6637                  reserved_semispace_size_ >> 10);
6638       }
6639     }
6640   } else {
6641     // If we are not using snapshots we reserve space for the actual
6642     // max semispace size.
6643     reserved_semispace_size_ = max_semispace_size_;
6644   }
6645
6646   if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6647   if (max_executable_size > 0) {
6648     max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6649   }
6650
6651   // The max executable size must be less than or equal to the max old
6652   // generation size.
6653   if (max_executable_size_ > max_old_generation_size_) {
6654     max_executable_size_ = max_old_generation_size_;
6655   }
6656
6657   // The new space size must be a power of two to support single-bit testing
6658   // for containment.
6659   max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6660   reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6661   initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6662
6663   // The external allocation limit should be below 256 MB on all architectures
6664   // to avoid unnecessary low memory notifications, as that is the threshold
6665   // for some embedders.
6666   external_allocation_limit_ = 12 * max_semispace_size_;
6667   ASSERT(external_allocation_limit_ <= 256 * MB);
6668
6669   // The old generation is paged and needs at least one page for each space.
6670   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6671   max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6672                                                        Page::kPageSize),
6673                                  RoundUp(max_old_generation_size_,
6674                                          Page::kPageSize));
6675
6676   configured_ = true;
6677   return true;
6678 }
6679
6680
6681 bool Heap::ConfigureHeapDefault() {
6682   return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6683                        static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6684                        static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6685 }
6686
6687
6688 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6689   *stats->start_marker = HeapStats::kStartMarker;
6690   *stats->end_marker = HeapStats::kEndMarker;
6691   *stats->new_space_size = new_space_.SizeAsInt();
6692   *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6693   *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6694   *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6695   *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6696   *stats->old_data_space_capacity = old_data_space_->Capacity();
6697   *stats->code_space_size = code_space_->SizeOfObjects();
6698   *stats->code_space_capacity = code_space_->Capacity();
6699   *stats->map_space_size = map_space_->SizeOfObjects();
6700   *stats->map_space_capacity = map_space_->Capacity();
6701   *stats->cell_space_size = cell_space_->SizeOfObjects();
6702   *stats->cell_space_capacity = cell_space_->Capacity();
6703   *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6704   *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6705   *stats->lo_space_size = lo_space_->Size();
6706   isolate_->global_handles()->RecordStats(stats);
6707   *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6708   *stats->memory_allocator_capacity =
6709       isolate()->memory_allocator()->Size() +
6710       isolate()->memory_allocator()->Available();
6711   *stats->os_error = OS::GetLastError();
6712       isolate()->memory_allocator()->Available();
6713   if (take_snapshot) {
6714     HeapIterator iterator(this);
6715     for (HeapObject* obj = iterator.next();
6716          obj != NULL;
6717          obj = iterator.next()) {
6718       InstanceType type = obj->map()->instance_type();
6719       ASSERT(0 <= type && type <= LAST_TYPE);
6720       stats->objects_per_type[type]++;
6721       stats->size_per_type[type] += obj->Size();
6722     }
6723   }
6724 }
6725
6726
6727 intptr_t Heap::PromotedSpaceSizeOfObjects() {
6728   return old_pointer_space_->SizeOfObjects()
6729       + old_data_space_->SizeOfObjects()
6730       + code_space_->SizeOfObjects()
6731       + map_space_->SizeOfObjects()
6732       + cell_space_->SizeOfObjects()
6733       + property_cell_space_->SizeOfObjects()
6734       + lo_space_->SizeOfObjects();
6735 }
6736
6737
6738 intptr_t Heap::PromotedExternalMemorySize() {
6739   if (amount_of_external_allocated_memory_
6740       <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6741   return amount_of_external_allocated_memory_
6742       - amount_of_external_allocated_memory_at_last_global_gc_;
6743 }
6744
6745
6746 V8_DECLARE_ONCE(initialize_gc_once);
6747
6748 static void InitializeGCOnce() {
6749   InitializeScavengingVisitorsTables();
6750   NewSpaceScavenger::Initialize();
6751   MarkCompactCollector::Initialize();
6752 }
6753
6754 bool Heap::SetUp() {
6755 #ifdef DEBUG
6756   allocation_timeout_ = FLAG_gc_interval;
6757 #endif
6758
6759   // Initialize heap spaces and initial maps and objects. Whenever something
6760   // goes wrong, just return false. The caller should check the results and
6761   // call Heap::TearDown() to release allocated memory.
6762   //
6763   // If the heap is not yet configured (e.g. through the API), configure it.
6764   // Configuration is based on the flags new-space-size (really the semispace
6765   // size) and old-space-size if set or the initial values of semispace_size_
6766   // and old_generation_size_ otherwise.
6767   if (!configured_) {
6768     if (!ConfigureHeapDefault()) return false;
6769   }
6770
6771   CallOnce(&initialize_gc_once, &InitializeGCOnce);
6772
6773   MarkMapPointersAsEncoded(false);
6774
6775   // Set up memory allocator.
6776   if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6777       return false;
6778
6779   // Set up new space.
6780   if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6781     return false;
6782   }
6783
6784   // Initialize old pointer space.
6785   old_pointer_space_ =
6786       new OldSpace(this,
6787                    max_old_generation_size_,
6788                    OLD_POINTER_SPACE,
6789                    NOT_EXECUTABLE);
6790   if (old_pointer_space_ == NULL) return false;
6791   if (!old_pointer_space_->SetUp()) return false;
6792
6793   // Initialize old data space.
6794   old_data_space_ =
6795       new OldSpace(this,
6796                    max_old_generation_size_,
6797                    OLD_DATA_SPACE,
6798                    NOT_EXECUTABLE);
6799   if (old_data_space_ == NULL) return false;
6800   if (!old_data_space_->SetUp()) return false;
6801
6802   // Initialize the code space, set its maximum capacity to the old
6803   // generation size. It needs executable memory.
6804   // On 64-bit platform(s), we put all code objects in a 2 GB range of
6805   // virtual address space, so that they can call each other with near calls.
6806   if (code_range_size_ > 0) {
6807     if (!isolate_->code_range()->SetUp(code_range_size_)) {
6808       return false;
6809     }
6810   }
6811
6812   code_space_ =
6813       new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6814   if (code_space_ == NULL) return false;
6815   if (!code_space_->SetUp()) return false;
6816
6817   // Initialize map space.
6818   map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6819   if (map_space_ == NULL) return false;
6820   if (!map_space_->SetUp()) return false;
6821
6822   // Initialize simple cell space.
6823   cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6824   if (cell_space_ == NULL) return false;
6825   if (!cell_space_->SetUp()) return false;
6826
6827   // Initialize global property cell space.
6828   property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6829                                                PROPERTY_CELL_SPACE);
6830   if (property_cell_space_ == NULL) return false;
6831   if (!property_cell_space_->SetUp()) return false;
6832
6833   // The large object code space may contain code or data.  We set the memory
6834   // to be non-executable here for safety, but this means we need to enable it
6835   // explicitly when allocating large code objects.
6836   lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6837   if (lo_space_ == NULL) return false;
6838   if (!lo_space_->SetUp()) return false;
6839
6840   // Set up the seed that is used to randomize the string hash function.
6841   ASSERT(hash_seed() == 0);
6842   if (FLAG_randomize_hashes) {
6843     if (FLAG_hash_seed == 0) {
6844       set_hash_seed(
6845           Smi::FromInt(V8::RandomPrivate(isolate()) & 0x3fffffff));
6846     } else {
6847       set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6848     }
6849   }
6850
6851   LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6852   LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6853
6854   store_buffer()->SetUp();
6855
6856   if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
6857 #ifdef DEBUG
6858   relocation_mutex_locked_by_optimizer_thread_ = false;
6859 #endif  // DEBUG
6860
6861   return true;
6862 }
6863
6864 bool Heap::CreateHeapObjects() {
6865   // Create initial maps.
6866   if (!CreateInitialMaps()) return false;
6867   if (!CreateApiObjects()) return false;
6868
6869   // Create initial objects
6870   if (!CreateInitialObjects()) return false;
6871
6872   native_contexts_list_ = undefined_value();
6873   array_buffers_list_ = undefined_value();
6874   return true;
6875 }
6876
6877
6878 void Heap::SetStackLimits() {
6879   ASSERT(isolate_ != NULL);
6880   ASSERT(isolate_ == isolate());
6881   // On 64 bit machines, pointers are generally out of range of Smis.  We write
6882   // something that looks like an out of range Smi to the GC.
6883
6884   // Set up the special root array entries containing the stack limits.
6885   // These are actually addresses, but the tag makes the GC ignore it.
6886   roots_[kStackLimitRootIndex] =
6887       reinterpret_cast<Object*>(
6888           (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6889   roots_[kRealStackLimitRootIndex] =
6890       reinterpret_cast<Object*>(
6891           (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6892 }
6893
6894
6895 void Heap::TearDown() {
6896 #ifdef VERIFY_HEAP
6897   if (FLAG_verify_heap) {
6898     Verify();
6899   }
6900 #endif
6901
6902   if (FLAG_print_cumulative_gc_stat) {
6903     PrintF("\n");
6904     PrintF("gc_count=%d ", gc_count_);
6905     PrintF("mark_sweep_count=%d ", ms_count_);
6906     PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6907     PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6908     PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6909     PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6910            get_max_alive_after_gc());
6911     PrintF("total_marking_time=%.1f ", marking_time());
6912     PrintF("total_sweeping_time=%.1f ", sweeping_time());
6913     PrintF("\n\n");
6914   }
6915
6916   TearDownArrayBuffers();
6917
6918   isolate_->global_handles()->TearDown();
6919
6920   external_string_table_.TearDown();
6921
6922   error_object_list_.TearDown();
6923
6924   new_space_.TearDown();
6925
6926   if (old_pointer_space_ != NULL) {
6927     old_pointer_space_->TearDown();
6928     delete old_pointer_space_;
6929     old_pointer_space_ = NULL;
6930   }
6931
6932   if (old_data_space_ != NULL) {
6933     old_data_space_->TearDown();
6934     delete old_data_space_;
6935     old_data_space_ = NULL;
6936   }
6937
6938   if (code_space_ != NULL) {
6939     code_space_->TearDown();
6940     delete code_space_;
6941     code_space_ = NULL;
6942   }
6943
6944   if (map_space_ != NULL) {
6945     map_space_->TearDown();
6946     delete map_space_;
6947     map_space_ = NULL;
6948   }
6949
6950   if (cell_space_ != NULL) {
6951     cell_space_->TearDown();
6952     delete cell_space_;
6953     cell_space_ = NULL;
6954   }
6955
6956   if (property_cell_space_ != NULL) {
6957     property_cell_space_->TearDown();
6958     delete property_cell_space_;
6959     property_cell_space_ = NULL;
6960   }
6961
6962   if (lo_space_ != NULL) {
6963     lo_space_->TearDown();
6964     delete lo_space_;
6965     lo_space_ = NULL;
6966   }
6967
6968   store_buffer()->TearDown();
6969   incremental_marking()->TearDown();
6970
6971   isolate_->memory_allocator()->TearDown();
6972
6973   delete relocation_mutex_;
6974 }
6975
6976
6977 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
6978   ASSERT(callback != NULL);
6979   GCPrologueCallbackPair pair(callback, gc_type);
6980   ASSERT(!gc_prologue_callbacks_.Contains(pair));
6981   return gc_prologue_callbacks_.Add(pair);
6982 }
6983
6984
6985 void Heap::RemoveGCPrologueCallback(GCPrologueCallback callback) {
6986   ASSERT(callback != NULL);
6987   for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6988     if (gc_prologue_callbacks_[i].callback == callback) {
6989       gc_prologue_callbacks_.Remove(i);
6990       return;
6991     }
6992   }
6993   UNREACHABLE();
6994 }
6995
6996
6997 void Heap::AddGCEpilogueCallback(GCEpilogueCallback callback, GCType gc_type) {
6998   ASSERT(callback != NULL);
6999   GCEpilogueCallbackPair pair(callback, gc_type);
7000   ASSERT(!gc_epilogue_callbacks_.Contains(pair));
7001   return gc_epilogue_callbacks_.Add(pair);
7002 }
7003
7004
7005 void Heap::RemoveGCEpilogueCallback(GCEpilogueCallback callback) {
7006   ASSERT(callback != NULL);
7007   for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
7008     if (gc_epilogue_callbacks_[i].callback == callback) {
7009       gc_epilogue_callbacks_.Remove(i);
7010       return;
7011     }
7012   }
7013   UNREACHABLE();
7014 }
7015
7016
7017 #ifdef DEBUG
7018
7019 class PrintHandleVisitor: public ObjectVisitor {
7020  public:
7021   void VisitPointers(Object** start, Object** end) {
7022     for (Object** p = start; p < end; p++)
7023       PrintF("  handle %p to %p\n",
7024              reinterpret_cast<void*>(p),
7025              reinterpret_cast<void*>(*p));
7026   }
7027 };
7028
7029 void Heap::PrintHandles() {
7030   PrintF("Handles:\n");
7031   PrintHandleVisitor v;
7032   isolate_->handle_scope_implementer()->Iterate(&v);
7033 }
7034
7035 #endif
7036
7037
7038 Space* AllSpaces::next() {
7039   switch (counter_++) {
7040     case NEW_SPACE:
7041       return heap_->new_space();
7042     case OLD_POINTER_SPACE:
7043       return heap_->old_pointer_space();
7044     case OLD_DATA_SPACE:
7045       return heap_->old_data_space();
7046     case CODE_SPACE:
7047       return heap_->code_space();
7048     case MAP_SPACE:
7049       return heap_->map_space();
7050     case CELL_SPACE:
7051       return heap_->cell_space();
7052     case PROPERTY_CELL_SPACE:
7053       return heap_->property_cell_space();
7054     case LO_SPACE:
7055       return heap_->lo_space();
7056     default:
7057       return NULL;
7058   }
7059 }
7060
7061
7062 PagedSpace* PagedSpaces::next() {
7063   switch (counter_++) {
7064     case OLD_POINTER_SPACE:
7065       return heap_->old_pointer_space();
7066     case OLD_DATA_SPACE:
7067       return heap_->old_data_space();
7068     case CODE_SPACE:
7069       return heap_->code_space();
7070     case MAP_SPACE:
7071       return heap_->map_space();
7072     case CELL_SPACE:
7073       return heap_->cell_space();
7074     case PROPERTY_CELL_SPACE:
7075       return heap_->property_cell_space();
7076     default:
7077       return NULL;
7078   }
7079 }
7080
7081
7082
7083 OldSpace* OldSpaces::next() {
7084   switch (counter_++) {
7085     case OLD_POINTER_SPACE:
7086       return heap_->old_pointer_space();
7087     case OLD_DATA_SPACE:
7088       return heap_->old_data_space();
7089     case CODE_SPACE:
7090       return heap_->code_space();
7091     default:
7092       return NULL;
7093   }
7094 }
7095
7096
7097 SpaceIterator::SpaceIterator(Heap* heap)
7098     : heap_(heap),
7099       current_space_(FIRST_SPACE),
7100       iterator_(NULL),
7101       size_func_(NULL) {
7102 }
7103
7104
7105 SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7106     : heap_(heap),
7107       current_space_(FIRST_SPACE),
7108       iterator_(NULL),
7109       size_func_(size_func) {
7110 }
7111
7112
7113 SpaceIterator::~SpaceIterator() {
7114   // Delete active iterator if any.
7115   delete iterator_;
7116 }
7117
7118
7119 bool SpaceIterator::has_next() {
7120   // Iterate until no more spaces.
7121   return current_space_ != LAST_SPACE;
7122 }
7123
7124
7125 ObjectIterator* SpaceIterator::next() {
7126   if (iterator_ != NULL) {
7127     delete iterator_;
7128     iterator_ = NULL;
7129     // Move to the next space
7130     current_space_++;
7131     if (current_space_ > LAST_SPACE) {
7132       return NULL;
7133     }
7134   }
7135
7136   // Return iterator for the new current space.
7137   return CreateIterator();
7138 }
7139
7140
7141 // Create an iterator for the space to iterate.
7142 ObjectIterator* SpaceIterator::CreateIterator() {
7143   ASSERT(iterator_ == NULL);
7144
7145   switch (current_space_) {
7146     case NEW_SPACE:
7147       iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7148       break;
7149     case OLD_POINTER_SPACE:
7150       iterator_ =
7151           new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7152       break;
7153     case OLD_DATA_SPACE:
7154       iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7155       break;
7156     case CODE_SPACE:
7157       iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7158       break;
7159     case MAP_SPACE:
7160       iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7161       break;
7162     case CELL_SPACE:
7163       iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7164       break;
7165     case PROPERTY_CELL_SPACE:
7166       iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7167                                          size_func_);
7168       break;
7169     case LO_SPACE:
7170       iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7171       break;
7172   }
7173
7174   // Return the newly allocated iterator;
7175   ASSERT(iterator_ != NULL);
7176   return iterator_;
7177 }
7178
7179
7180 class HeapObjectsFilter {
7181  public:
7182   virtual ~HeapObjectsFilter() {}
7183   virtual bool SkipObject(HeapObject* object) = 0;
7184 };
7185
7186
7187 class UnreachableObjectsFilter : public HeapObjectsFilter {
7188  public:
7189   UnreachableObjectsFilter() {
7190     MarkReachableObjects();
7191   }
7192
7193   ~UnreachableObjectsFilter() {
7194     Isolate::Current()->heap()->mark_compact_collector()->ClearMarkbits();
7195   }
7196
7197   bool SkipObject(HeapObject* object) {
7198     MarkBit mark_bit = Marking::MarkBitFrom(object);
7199     return !mark_bit.Get();
7200   }
7201
7202  private:
7203   class MarkingVisitor : public ObjectVisitor {
7204    public:
7205     MarkingVisitor() : marking_stack_(10) {}
7206
7207     void VisitPointers(Object** start, Object** end) {
7208       for (Object** p = start; p < end; p++) {
7209         if (!(*p)->IsHeapObject()) continue;
7210         HeapObject* obj = HeapObject::cast(*p);
7211         MarkBit mark_bit = Marking::MarkBitFrom(obj);
7212         if (!mark_bit.Get()) {
7213           mark_bit.Set();
7214           marking_stack_.Add(obj);
7215         }
7216       }
7217     }
7218
7219     void TransitiveClosure() {
7220       while (!marking_stack_.is_empty()) {
7221         HeapObject* obj = marking_stack_.RemoveLast();
7222         obj->Iterate(this);
7223       }
7224     }
7225
7226    private:
7227     List<HeapObject*> marking_stack_;
7228   };
7229
7230   void MarkReachableObjects() {
7231     Heap* heap = Isolate::Current()->heap();
7232     MarkingVisitor visitor;
7233     heap->IterateRoots(&visitor, VISIT_ALL);
7234     visitor.TransitiveClosure();
7235   }
7236
7237   DisallowHeapAllocation no_allocation_;
7238 };
7239
7240
7241 HeapIterator::HeapIterator(Heap* heap)
7242     : heap_(heap),
7243       filtering_(HeapIterator::kNoFiltering),
7244       filter_(NULL) {
7245   Init();
7246 }
7247
7248
7249 HeapIterator::HeapIterator(Heap* heap,
7250                            HeapIterator::HeapObjectsFiltering filtering)
7251     : heap_(heap),
7252       filtering_(filtering),
7253       filter_(NULL) {
7254   Init();
7255 }
7256
7257
7258 HeapIterator::~HeapIterator() {
7259   Shutdown();
7260 }
7261
7262
7263 void HeapIterator::Init() {
7264   // Start the iteration.
7265   space_iterator_ = new SpaceIterator(heap_);
7266   switch (filtering_) {
7267     case kFilterUnreachable:
7268       filter_ = new UnreachableObjectsFilter;
7269       break;
7270     default:
7271       break;
7272   }
7273   object_iterator_ = space_iterator_->next();
7274 }
7275
7276
7277 void HeapIterator::Shutdown() {
7278 #ifdef DEBUG
7279   // Assert that in filtering mode we have iterated through all
7280   // objects. Otherwise, heap will be left in an inconsistent state.
7281   if (filtering_ != kNoFiltering) {
7282     ASSERT(object_iterator_ == NULL);
7283   }
7284 #endif
7285   // Make sure the last iterator is deallocated.
7286   delete space_iterator_;
7287   space_iterator_ = NULL;
7288   object_iterator_ = NULL;
7289   delete filter_;
7290   filter_ = NULL;
7291 }
7292
7293
7294 HeapObject* HeapIterator::next() {
7295   if (filter_ == NULL) return NextObject();
7296
7297   HeapObject* obj = NextObject();
7298   while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7299   return obj;
7300 }
7301
7302
7303 HeapObject* HeapIterator::NextObject() {
7304   // No iterator means we are done.
7305   if (object_iterator_ == NULL) return NULL;
7306
7307   if (HeapObject* obj = object_iterator_->next_object()) {
7308     // If the current iterator has more objects we are fine.
7309     return obj;
7310   } else {
7311     // Go though the spaces looking for one that has objects.
7312     while (space_iterator_->has_next()) {
7313       object_iterator_ = space_iterator_->next();
7314       if (HeapObject* obj = object_iterator_->next_object()) {
7315         return obj;
7316       }
7317     }
7318   }
7319   // Done with the last space.
7320   object_iterator_ = NULL;
7321   return NULL;
7322 }
7323
7324
7325 void HeapIterator::reset() {
7326   // Restart the iterator.
7327   Shutdown();
7328   Init();
7329 }
7330
7331
7332 #ifdef DEBUG
7333
7334 Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
7335
7336 class PathTracer::MarkVisitor: public ObjectVisitor {
7337  public:
7338   explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7339   void VisitPointers(Object** start, Object** end) {
7340     // Scan all HeapObject pointers in [start, end)
7341     for (Object** p = start; !tracer_->found() && (p < end); p++) {
7342       if ((*p)->IsHeapObject())
7343         tracer_->MarkRecursively(p, this);
7344     }
7345   }
7346
7347  private:
7348   PathTracer* tracer_;
7349 };
7350
7351
7352 class PathTracer::UnmarkVisitor: public ObjectVisitor {
7353  public:
7354   explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7355   void VisitPointers(Object** start, Object** end) {
7356     // Scan all HeapObject pointers in [start, end)
7357     for (Object** p = start; p < end; p++) {
7358       if ((*p)->IsHeapObject())
7359         tracer_->UnmarkRecursively(p, this);
7360     }
7361   }
7362
7363  private:
7364   PathTracer* tracer_;
7365 };
7366
7367
7368 void PathTracer::VisitPointers(Object** start, Object** end) {
7369   bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7370   // Visit all HeapObject pointers in [start, end)
7371   for (Object** p = start; !done && (p < end); p++) {
7372     if ((*p)->IsHeapObject()) {
7373       TracePathFrom(p);
7374       done = ((what_to_find_ == FIND_FIRST) && found_target_);
7375     }
7376   }
7377 }
7378
7379
7380 void PathTracer::Reset() {
7381   found_target_ = false;
7382   object_stack_.Clear();
7383 }
7384
7385
7386 void PathTracer::TracePathFrom(Object** root) {
7387   ASSERT((search_target_ == kAnyGlobalObject) ||
7388          search_target_->IsHeapObject());
7389   found_target_in_trace_ = false;
7390   Reset();
7391
7392   MarkVisitor mark_visitor(this);
7393   MarkRecursively(root, &mark_visitor);
7394
7395   UnmarkVisitor unmark_visitor(this);
7396   UnmarkRecursively(root, &unmark_visitor);
7397
7398   ProcessResults();
7399 }
7400
7401
7402 static bool SafeIsNativeContext(HeapObject* obj) {
7403   return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7404 }
7405
7406
7407 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7408   if (!(*p)->IsHeapObject()) return;
7409
7410   HeapObject* obj = HeapObject::cast(*p);
7411
7412   Object* map = obj->map();
7413
7414   if (!map->IsHeapObject()) return;  // visited before
7415
7416   if (found_target_in_trace_) return;  // stop if target found
7417   object_stack_.Add(obj);
7418   if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7419       (obj == search_target_)) {
7420     found_target_in_trace_ = true;
7421     found_target_ = true;
7422     return;
7423   }
7424
7425   bool is_native_context = SafeIsNativeContext(obj);
7426
7427   // not visited yet
7428   Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7429
7430   Address map_addr = map_p->address();
7431
7432   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7433
7434   // Scan the object body.
7435   if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7436     // This is specialized to scan Context's properly.
7437     Object** start = reinterpret_cast<Object**>(obj->address() +
7438                                                 Context::kHeaderSize);
7439     Object** end = reinterpret_cast<Object**>(obj->address() +
7440         Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7441     mark_visitor->VisitPointers(start, end);
7442   } else {
7443     obj->IterateBody(map_p->instance_type(),
7444                      obj->SizeFromMap(map_p),
7445                      mark_visitor);
7446   }
7447
7448   // Scan the map after the body because the body is a lot more interesting
7449   // when doing leak detection.
7450   MarkRecursively(&map, mark_visitor);
7451
7452   if (!found_target_in_trace_)  // don't pop if found the target
7453     object_stack_.RemoveLast();
7454 }
7455
7456
7457 void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7458   if (!(*p)->IsHeapObject()) return;
7459
7460   HeapObject* obj = HeapObject::cast(*p);
7461
7462   Object* map = obj->map();
7463
7464   if (map->IsHeapObject()) return;  // unmarked already
7465
7466   Address map_addr = reinterpret_cast<Address>(map);
7467
7468   map_addr -= kMarkTag;
7469
7470   ASSERT_TAG_ALIGNED(map_addr);
7471
7472   HeapObject* map_p = HeapObject::FromAddress(map_addr);
7473
7474   obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7475
7476   UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7477
7478   obj->IterateBody(Map::cast(map_p)->instance_type(),
7479                    obj->SizeFromMap(Map::cast(map_p)),
7480                    unmark_visitor);
7481 }
7482
7483
7484 void PathTracer::ProcessResults() {
7485   if (found_target_) {
7486     PrintF("=====================================\n");
7487     PrintF("====        Path to object       ====\n");
7488     PrintF("=====================================\n\n");
7489
7490     ASSERT(!object_stack_.is_empty());
7491     for (int i = 0; i < object_stack_.length(); i++) {
7492       if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7493       Object* obj = object_stack_[i];
7494       obj->Print();
7495     }
7496     PrintF("=====================================\n");
7497   }
7498 }
7499
7500
7501 // Triggers a depth-first traversal of reachable objects from one
7502 // given root object and finds a path to a specific heap object and
7503 // prints it.
7504 void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7505   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7506   tracer.VisitPointer(&root);
7507 }
7508
7509
7510 // Triggers a depth-first traversal of reachable objects from roots
7511 // and finds a path to a specific heap object and prints it.
7512 void Heap::TracePathToObject(Object* target) {
7513   PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7514   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7515 }
7516
7517
7518 // Triggers a depth-first traversal of reachable objects from roots
7519 // and finds a path to any global object and prints it. Useful for
7520 // determining the source for leaks of global objects.
7521 void Heap::TracePathToGlobal() {
7522   PathTracer tracer(PathTracer::kAnyGlobalObject,
7523                     PathTracer::FIND_ALL,
7524                     VISIT_ALL);
7525   IterateRoots(&tracer, VISIT_ONLY_STRONG);
7526 }
7527 #endif
7528
7529
7530 static intptr_t CountTotalHolesSize(Heap* heap) {
7531   intptr_t holes_size = 0;
7532   OldSpaces spaces(heap);
7533   for (OldSpace* space = spaces.next();
7534        space != NULL;
7535        space = spaces.next()) {
7536     holes_size += space->Waste() + space->Available();
7537   }
7538   return holes_size;
7539 }
7540
7541
7542 GCTracer::GCTracer(Heap* heap,
7543                    const char* gc_reason,
7544                    const char* collector_reason)
7545     : start_time_(0.0),
7546       start_object_size_(0),
7547       start_memory_size_(0),
7548       gc_count_(0),
7549       full_gc_count_(0),
7550       allocated_since_last_gc_(0),
7551       spent_in_mutator_(0),
7552       promoted_objects_size_(0),
7553       nodes_died_in_new_space_(0),
7554       nodes_copied_in_new_space_(0),
7555       nodes_promoted_(0),
7556       heap_(heap),
7557       gc_reason_(gc_reason),
7558       collector_reason_(collector_reason) {
7559   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7560   start_time_ = OS::TimeCurrentMillis();
7561   start_object_size_ = heap_->SizeOfObjects();
7562   start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7563
7564   for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7565     scopes_[i] = 0;
7566   }
7567
7568   in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7569
7570   allocated_since_last_gc_ =
7571       heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7572
7573   if (heap_->last_gc_end_timestamp_ > 0) {
7574     spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7575   }
7576
7577   steps_count_ = heap_->incremental_marking()->steps_count();
7578   steps_took_ = heap_->incremental_marking()->steps_took();
7579   longest_step_ = heap_->incremental_marking()->longest_step();
7580   steps_count_since_last_gc_ =
7581       heap_->incremental_marking()->steps_count_since_last_gc();
7582   steps_took_since_last_gc_ =
7583       heap_->incremental_marking()->steps_took_since_last_gc();
7584 }
7585
7586
7587 GCTracer::~GCTracer() {
7588   // Printf ONE line iff flag is set.
7589   if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7590
7591   bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7592
7593   heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7594   heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7595
7596   double time = heap_->last_gc_end_timestamp_ - start_time_;
7597
7598   // Update cumulative GC statistics if required.
7599   if (FLAG_print_cumulative_gc_stat) {
7600     heap_->total_gc_time_ms_ += time;
7601     heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7602     heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7603                                      heap_->alive_after_last_gc_);
7604     if (!first_gc) {
7605       heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7606                                    spent_in_mutator_);
7607     }
7608   } else if (FLAG_trace_gc_verbose) {
7609     heap_->total_gc_time_ms_ += time;
7610   }
7611
7612   if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7613
7614   heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7615
7616   if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7617   PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7618
7619   if (!FLAG_trace_gc_nvp) {
7620     int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7621
7622     double end_memory_size_mb =
7623         static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7624
7625     PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7626            CollectorString(),
7627            static_cast<double>(start_object_size_) / MB,
7628            static_cast<double>(start_memory_size_) / MB,
7629            SizeOfHeapObjects(),
7630            end_memory_size_mb);
7631
7632     if (external_time > 0) PrintF("%d / ", external_time);
7633     PrintF("%.1f ms", time);
7634     if (steps_count_ > 0) {
7635       if (collector_ == SCAVENGER) {
7636         PrintF(" (+ %.1f ms in %d steps since last GC)",
7637                steps_took_since_last_gc_,
7638                steps_count_since_last_gc_);
7639       } else {
7640         PrintF(" (+ %.1f ms in %d steps since start of marking, "
7641                    "biggest step %.1f ms)",
7642                steps_took_,
7643                steps_count_,
7644                longest_step_);
7645       }
7646     }
7647
7648     if (gc_reason_ != NULL) {
7649       PrintF(" [%s]", gc_reason_);
7650     }
7651
7652     if (collector_reason_ != NULL) {
7653       PrintF(" [%s]", collector_reason_);
7654     }
7655
7656     PrintF(".\n");
7657   } else {
7658     PrintF("pause=%.1f ", time);
7659     PrintF("mutator=%.1f ", spent_in_mutator_);
7660     PrintF("gc=");
7661     switch (collector_) {
7662       case SCAVENGER:
7663         PrintF("s");
7664         break;
7665       case MARK_COMPACTOR:
7666         PrintF("ms");
7667         break;
7668       default:
7669         UNREACHABLE();
7670     }
7671     PrintF(" ");
7672
7673     PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7674     PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7675     PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7676     PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7677     PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7678     PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7679     PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7680     PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7681     PrintF("compaction_ptrs=%.1f ",
7682         scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7683     PrintF("intracompaction_ptrs=%.1f ",
7684         scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7685     PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7686     PrintF("weakmap_process=%.1f ", scopes_[Scope::MC_WEAKMAP_PROCESS]);
7687     PrintF("weakmap_clear=%.1f ", scopes_[Scope::MC_WEAKMAP_CLEAR]);
7688
7689     PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7690     PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7691     PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7692            in_free_list_or_wasted_before_gc_);
7693     PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7694
7695     PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7696     PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7697     PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7698     PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7699     PrintF("nodes_promoted=%d ", nodes_promoted_);
7700
7701     if (collector_ == SCAVENGER) {
7702       PrintF("stepscount=%d ", steps_count_since_last_gc_);
7703       PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7704     } else {
7705       PrintF("stepscount=%d ", steps_count_);
7706       PrintF("stepstook=%.1f ", steps_took_);
7707       PrintF("longeststep=%.1f ", longest_step_);
7708     }
7709
7710     PrintF("\n");
7711   }
7712
7713   heap_->PrintShortHeapStatistics();
7714 }
7715
7716
7717 const char* GCTracer::CollectorString() {
7718   switch (collector_) {
7719     case SCAVENGER:
7720       return "Scavenge";
7721     case MARK_COMPACTOR:
7722       return "Mark-sweep";
7723   }
7724   return "Unknown GC";
7725 }
7726
7727
7728 int KeyedLookupCache::Hash(Map* map, Name* name) {
7729   // Uses only lower 32 bits if pointers are larger.
7730   uintptr_t addr_hash =
7731       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7732   return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7733 }
7734
7735
7736 int KeyedLookupCache::Lookup(Map* map, Name* name) {
7737   int index = (Hash(map, name) & kHashMask);
7738   for (int i = 0; i < kEntriesPerBucket; i++) {
7739     Key& key = keys_[index + i];
7740     if ((key.map == map) && key.name->Equals(name)) {
7741       return field_offsets_[index + i];
7742     }
7743   }
7744   return kNotFound;
7745 }
7746
7747
7748 void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7749   if (!name->IsUniqueName()) {
7750     String* internalized_string;
7751     if (!HEAP->InternalizeStringIfExists(
7752             String::cast(name), &internalized_string)) {
7753       return;
7754     }
7755     name = internalized_string;
7756   }
7757   // This cache is cleared only between mark compact passes, so we expect the
7758   // cache to only contain old space names.
7759   ASSERT(!HEAP->InNewSpace(name));
7760
7761   int index = (Hash(map, name) & kHashMask);
7762   // After a GC there will be free slots, so we use them in order (this may
7763   // help to get the most frequently used one in position 0).
7764   for (int i = 0; i< kEntriesPerBucket; i++) {
7765     Key& key = keys_[index];
7766     Object* free_entry_indicator = NULL;
7767     if (key.map == free_entry_indicator) {
7768       key.map = map;
7769       key.name = name;
7770       field_offsets_[index + i] = field_offset;
7771       return;
7772     }
7773   }
7774   // No free entry found in this bucket, so we move them all down one and
7775   // put the new entry at position zero.
7776   for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7777     Key& key = keys_[index + i];
7778     Key& key2 = keys_[index + i - 1];
7779     key = key2;
7780     field_offsets_[index + i] = field_offsets_[index + i - 1];
7781   }
7782
7783   // Write the new first entry.
7784   Key& key = keys_[index];
7785   key.map = map;
7786   key.name = name;
7787   field_offsets_[index] = field_offset;
7788 }
7789
7790
7791 void KeyedLookupCache::Clear() {
7792   for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7793 }
7794
7795
7796 void DescriptorLookupCache::Clear() {
7797   for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7798 }
7799
7800
7801 #ifdef DEBUG
7802 void Heap::GarbageCollectionGreedyCheck() {
7803   ASSERT(FLAG_gc_greedy);
7804   if (isolate_->bootstrapper()->IsActive()) return;
7805   if (disallow_allocation_failure()) return;
7806   CollectGarbage(NEW_SPACE);
7807 }
7808 #endif
7809
7810
7811 TranscendentalCache::SubCache::SubCache(Type t)
7812   : type_(t),
7813     isolate_(Isolate::Current()) {
7814   uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7815   uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7816   for (int i = 0; i < kCacheSize; i++) {
7817     elements_[i].in[0] = in0;
7818     elements_[i].in[1] = in1;
7819     elements_[i].output = NULL;
7820   }
7821 }
7822
7823
7824 void TranscendentalCache::Clear() {
7825   for (int i = 0; i < kNumberOfCaches; i++) {
7826     if (caches_[i] != NULL) {
7827       delete caches_[i];
7828       caches_[i] = NULL;
7829     }
7830   }
7831 }
7832
7833
7834 void ExternalStringTable::CleanUp() {
7835   int last = 0;
7836   for (int i = 0; i < new_space_strings_.length(); ++i) {
7837     if (new_space_strings_[i] == heap_->the_hole_value()) {
7838       continue;
7839     }
7840     if (heap_->InNewSpace(new_space_strings_[i])) {
7841       new_space_strings_[last++] = new_space_strings_[i];
7842     } else {
7843       old_space_strings_.Add(new_space_strings_[i]);
7844     }
7845   }
7846   new_space_strings_.Rewind(last);
7847   new_space_strings_.Trim();
7848
7849   last = 0;
7850   for (int i = 0; i < old_space_strings_.length(); ++i) {
7851     if (old_space_strings_[i] == heap_->the_hole_value()) {
7852       continue;
7853     }
7854     ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7855     old_space_strings_[last++] = old_space_strings_[i];
7856   }
7857   old_space_strings_.Rewind(last);
7858   old_space_strings_.Trim();
7859 #ifdef VERIFY_HEAP
7860   if (FLAG_verify_heap) {
7861     Verify();
7862   }
7863 #endif
7864 }
7865
7866
7867 void ExternalStringTable::TearDown() {
7868   new_space_strings_.Free();
7869   old_space_strings_.Free();
7870 }
7871
7872
7873 // Update all references.
7874 void ErrorObjectList::UpdateReferences() {
7875   for (int i = 0; i < list_.length(); i++) {
7876     HeapObject* object = HeapObject::cast(list_[i]);
7877     MapWord first_word = object->map_word();
7878     if (first_word.IsForwardingAddress()) {
7879       list_[i] = first_word.ToForwardingAddress();
7880     }
7881   }
7882 }
7883
7884
7885 // Unforwarded objects in new space are dead and removed from the list.
7886 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7887   if (list_.is_empty()) return;
7888   if (!nested_) {
7889     int write_index = 0;
7890     for (int i = 0; i < list_.length(); i++) {
7891       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7892       if (first_word.IsForwardingAddress()) {
7893         list_[write_index++] = first_word.ToForwardingAddress();
7894       }
7895     }
7896     list_.Rewind(write_index);
7897   } else {
7898     // If a GC is triggered during DeferredFormatStackTrace, we do not move
7899     // objects in the list, just remove dead ones, as to not confuse the
7900     // loop in DeferredFormatStackTrace.
7901     for (int i = 0; i < list_.length(); i++) {
7902       MapWord first_word = HeapObject::cast(list_[i])->map_word();
7903       list_[i] = first_word.IsForwardingAddress()
7904                      ? first_word.ToForwardingAddress()
7905                      : heap->the_hole_value();
7906     }
7907   }
7908 }
7909
7910
7911 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7912   // If formatting the stack trace causes a GC, this method will be
7913   // recursively called.  In that case, skip the recursive call, since
7914   // the loop modifies the list while iterating over it.
7915   if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7916   nested_ = true;
7917   HandleScope scope(isolate);
7918   Handle<String> stack_key = isolate->factory()->stack_string();
7919   int write_index = 0;
7920   int budget = kBudgetPerGC;
7921   for (int i = 0; i < list_.length(); i++) {
7922     Object* object = list_[i];
7923     JSFunction* getter_fun;
7924
7925     { DisallowHeapAllocation no_gc;
7926       // Skip possible holes in the list.
7927       if (object->IsTheHole()) continue;
7928       if (isolate->heap()->InNewSpace(object) || budget == 0) {
7929         list_[write_index++] = object;
7930         continue;
7931       }
7932
7933       // Check whether the stack property is backed by the original getter.
7934       LookupResult lookup(isolate);
7935       JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7936       if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7937       Object* callback = lookup.GetCallbackObject();
7938       if (!callback->IsAccessorPair()) continue;
7939       Object* getter_obj = AccessorPair::cast(callback)->getter();
7940       if (!getter_obj->IsJSFunction()) continue;
7941       getter_fun = JSFunction::cast(getter_obj);
7942       String* key = isolate->heap()->hidden_stack_trace_string();
7943       Object* value = getter_fun->GetHiddenProperty(key);
7944       if (key != value) continue;
7945     }
7946
7947     budget--;
7948     HandleScope scope(isolate);
7949     bool has_exception = false;
7950 #ifdef DEBUG
7951     Handle<Map> map(HeapObject::cast(object)->map(), isolate);
7952 #endif
7953     Handle<Object> object_handle(object, isolate);
7954     Handle<Object> getter_handle(getter_fun, isolate);
7955     Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
7956     ASSERT(*map == HeapObject::cast(*object_handle)->map());
7957     if (has_exception) {
7958       // Hit an exception (most likely a stack overflow).
7959       // Wrap up this pass and retry after another GC.
7960       isolate->clear_pending_exception();
7961       // We use the handle since calling the getter might have caused a GC.
7962       list_[write_index++] = *object_handle;
7963       budget = 0;
7964     }
7965   }
7966   list_.Rewind(write_index);
7967   list_.Trim();
7968   nested_ = false;
7969 }
7970
7971
7972 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
7973   for (int i = 0; i < list_.length(); i++) {
7974     HeapObject* object = HeapObject::cast(list_[i]);
7975     if (!Marking::MarkBitFrom(object).Get()) {
7976       list_[i] = heap->the_hole_value();
7977     }
7978   }
7979 }
7980
7981
7982 void ErrorObjectList::TearDown() {
7983   list_.Free();
7984 }
7985
7986
7987 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7988   chunk->set_next_chunk(chunks_queued_for_free_);
7989   chunks_queued_for_free_ = chunk;
7990 }
7991
7992
7993 void Heap::FreeQueuedChunks() {
7994   if (chunks_queued_for_free_ == NULL) return;
7995   MemoryChunk* next;
7996   MemoryChunk* chunk;
7997   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7998     next = chunk->next_chunk();
7999     chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
8000
8001     if (chunk->owner()->identity() == LO_SPACE) {
8002       // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
8003       // If FromAnyPointerAddress encounters a slot that belongs to a large
8004       // chunk queued for deletion it will fail to find the chunk because
8005       // it try to perform a search in the list of pages owned by of the large
8006       // object space and queued chunks were detached from that list.
8007       // To work around this we split large chunk into normal kPageSize aligned
8008       // pieces and initialize size, owner and flags field of every piece.
8009       // If FromAnyPointerAddress encounters a slot that belongs to one of
8010       // these smaller pieces it will treat it as a slot on a normal Page.
8011       Address chunk_end = chunk->address() + chunk->size();
8012       MemoryChunk* inner = MemoryChunk::FromAddress(
8013           chunk->address() + Page::kPageSize);
8014       MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
8015       while (inner <= inner_last) {
8016         // Size of a large chunk is always a multiple of
8017         // OS::AllocateAlignment() so there is always
8018         // enough space for a fake MemoryChunk header.
8019         Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
8020         // Guard against overflow.
8021         if (area_end < inner->address()) area_end = chunk_end;
8022         inner->SetArea(inner->address(), area_end);
8023         inner->set_size(Page::kPageSize);
8024         inner->set_owner(lo_space());
8025         inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
8026         inner = MemoryChunk::FromAddress(
8027             inner->address() + Page::kPageSize);
8028       }
8029     }
8030   }
8031   isolate_->heap()->store_buffer()->Compact();
8032   isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
8033   for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
8034     next = chunk->next_chunk();
8035     isolate_->memory_allocator()->Free(chunk);
8036   }
8037   chunks_queued_for_free_ = NULL;
8038 }
8039
8040
8041 void Heap::RememberUnmappedPage(Address page, bool compacted) {
8042   uintptr_t p = reinterpret_cast<uintptr_t>(page);
8043   // Tag the page pointer to make it findable in the dump file.
8044   if (compacted) {
8045     p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
8046   } else {
8047     p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
8048   }
8049   remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
8050       reinterpret_cast<Address>(p);
8051   remembered_unmapped_pages_index_++;
8052   remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
8053 }
8054
8055
8056 void Heap::ClearObjectStats(bool clear_last_time_stats) {
8057   memset(object_counts_, 0, sizeof(object_counts_));
8058   memset(object_sizes_, 0, sizeof(object_sizes_));
8059   if (clear_last_time_stats) {
8060     memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
8061     memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
8062   }
8063 }
8064
8065
8066 static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
8067
8068
8069 void Heap::CheckpointObjectStats() {
8070   ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
8071   Counters* counters = isolate()->counters();
8072 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
8073   counters->count_of_##name()->Increment(                                      \
8074       static_cast<int>(object_counts_[name]));                                 \
8075   counters->count_of_##name()->Decrement(                                      \
8076       static_cast<int>(object_counts_last_time_[name]));                       \
8077   counters->size_of_##name()->Increment(                                       \
8078       static_cast<int>(object_sizes_[name]));                                  \
8079   counters->size_of_##name()->Decrement(                                       \
8080       static_cast<int>(object_sizes_last_time_[name]));
8081   INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8082 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8083   int index;
8084 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
8085   index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
8086   counters->count_of_CODE_TYPE_##name()->Increment(       \
8087       static_cast<int>(object_counts_[index]));           \
8088   counters->count_of_CODE_TYPE_##name()->Decrement(       \
8089       static_cast<int>(object_counts_last_time_[index])); \
8090   counters->size_of_CODE_TYPE_##name()->Increment(        \
8091       static_cast<int>(object_sizes_[index]));            \
8092   counters->size_of_CODE_TYPE_##name()->Decrement(        \
8093       static_cast<int>(object_sizes_last_time_[index]));
8094   CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8095 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8096 #define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
8097   index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
8098   counters->count_of_FIXED_ARRAY_##name()->Increment(     \
8099       static_cast<int>(object_counts_[index]));           \
8100   counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
8101       static_cast<int>(object_counts_last_time_[index])); \
8102   counters->size_of_FIXED_ARRAY_##name()->Increment(      \
8103       static_cast<int>(object_sizes_[index]));            \
8104   counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
8105       static_cast<int>(object_sizes_last_time_[index]));
8106   FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
8107 #undef ADJUST_LAST_TIME_OBJECT_COUNT
8108
8109   OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
8110   OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
8111   ClearObjectStats();
8112 }
8113
8114
8115 Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
8116   if (FLAG_parallel_recompilation) {
8117     heap_->relocation_mutex_->Lock();
8118 #ifdef DEBUG
8119     heap_->relocation_mutex_locked_by_optimizer_thread_ =
8120         heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8121 #endif  // DEBUG
8122   }
8123 }
8124
8125 } }  // namespace v8::internal